All of lore.kernel.org
 help / color / mirror / Atom feed
From: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
To: dev@dpdk.org
Cc: Farah Smith <farah.smith@broadcom.com>,
	Randy Schacher <stuart.schacher@broadcom.com>,
	Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Subject: [dpdk-dev] [PATCH 08/58] net/bnxt: add action SRAM Translation
Date: Sun, 30 May 2021 14:28:39 +0530	[thread overview]
Message-ID: <20210530085929.29695-9-venkatkumar.duvvuru@broadcom.com> (raw)
In-Reply-To: <20210530085929.29695-1-venkatkumar.duvvuru@broadcom.com>

From: Farah Smith <farah.smith@broadcom.com>

- Translate Truflow action types for Thor to HCAPI RM
  resource defined SRAM banks.
- move module type enum definitions to tf_core API
- Switch to subtype concept for RM.
- alloc/free working for Thor SRAM table type for full AR.

Signed-off-by: Farah Smith <farah.smith@broadcom.com>
Signed-off-by: Randy Schacher <stuart.schacher@broadcom.com>
Signed-off-by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Reviewed-by: Peter Spreadborough <peter.spreadborough@broadcom.com>
Reviewed-by: Randy Schacher <stuart.schacher@broadcom.com>
---
 drivers/net/bnxt/bnxt_util.h                |   3 +
 drivers/net/bnxt/hcapi/cfa/hcapi_cfa.h      | 339 +++------
 drivers/net/bnxt/hcapi/cfa/hcapi_cfa_defs.h | 387 +---------
 drivers/net/bnxt/hcapi/cfa/hcapi_cfa_p58.h  | 411 ++++++++++
 drivers/net/bnxt/tf_core/meson.build        |   1 -
 drivers/net/bnxt/tf_core/tf_core.h          |  24 +
 drivers/net/bnxt/tf_core/tf_device.c        |  43 +-
 drivers/net/bnxt/tf_core/tf_device.h        |  23 -
 drivers/net/bnxt/tf_core/tf_device_p4.c     |  21 +-
 drivers/net/bnxt/tf_core/tf_device_p58.c    |  53 +-
 drivers/net/bnxt/tf_core/tf_device_p58.h    | 110 ++-
 drivers/net/bnxt/tf_core/tf_em_common.c     |   4 +-
 drivers/net/bnxt/tf_core/tf_em_host.c       |   6 +-
 drivers/net/bnxt/tf_core/tf_em_internal.c   |   4 +-
 drivers/net/bnxt/tf_core/tf_identifier.c    |  10 +-
 drivers/net/bnxt/tf_core/tf_if_tbl.c        |   2 +-
 drivers/net/bnxt/tf_core/tf_rm.c            | 508 ++++++++-----
 drivers/net/bnxt/tf_core/tf_rm.h            | 109 ++-
 drivers/net/bnxt/tf_core/tf_shadow_tbl.c    | 783 --------------------
 drivers/net/bnxt/tf_core/tf_shadow_tbl.h    | 256 -------
 drivers/net/bnxt/tf_core/tf_tbl.c           | 238 +-----
 drivers/net/bnxt/tf_core/tf_tcam.c          |  20 +-
 drivers/net/bnxt/tf_core/tf_util.c          |  36 +-
 drivers/net/bnxt/tf_core/tf_util.h          |  26 +-
 24 files changed, 1130 insertions(+), 2287 deletions(-)
 create mode 100644 drivers/net/bnxt/hcapi/cfa/hcapi_cfa_p58.h
 delete mode 100644 drivers/net/bnxt/tf_core/tf_shadow_tbl.c
 delete mode 100644 drivers/net/bnxt/tf_core/tf_shadow_tbl.h

diff --git a/drivers/net/bnxt/bnxt_util.h b/drivers/net/bnxt/bnxt_util.h
index 64e97eed15..b243c21ec2 100644
--- a/drivers/net/bnxt/bnxt_util.h
+++ b/drivers/net/bnxt/bnxt_util.h
@@ -9,6 +9,9 @@
 #ifndef BIT
 #define BIT(n)	(1UL << (n))
 #endif /* BIT */
+#ifndef BIT_MASK
+#define BIT_MASK(len) (BIT(len) - 1)
+#endif /* BIT_MASK */
 
 #define PCI_SUBSYSTEM_ID_OFFSET	0x2e
 
diff --git a/drivers/net/bnxt/hcapi/cfa/hcapi_cfa.h b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa.h
index b8c85a0fca..c67aa29ad0 100644
--- a/drivers/net/bnxt/hcapi/cfa/hcapi_cfa.h
+++ b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa.h
@@ -1,281 +1,126 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2021 Broadcom
- * All rights reserved.
+/*
+ *   Copyright(c) Broadcom Limited.
+ *   All rights reserved.
  */
 
+/*!
+ *   \file
+ *   \brief Exported functions for CFA HW programming
+ */
 #ifndef _HCAPI_CFA_H_
 #define _HCAPI_CFA_H_
 
 #include <stdio.h>
+#include <stddef.h>
 #include <string.h>
 #include <stdbool.h>
 #include <stdint.h>
 #include <stddef.h>
+#include <errno.h>
 
 #include "hcapi_cfa_defs.h"
 
-/**
- * Index used for the sram_entries field
- */
-enum hcapi_cfa_resc_type_sram {
-	HCAPI_CFA_RESC_TYPE_SRAM_FULL_ACTION,
-	HCAPI_CFA_RESC_TYPE_SRAM_MCG,
-	HCAPI_CFA_RESC_TYPE_SRAM_ENCAP_8B,
-	HCAPI_CFA_RESC_TYPE_SRAM_ENCAP_16B,
-	HCAPI_CFA_RESC_TYPE_SRAM_ENCAP_64B,
-	HCAPI_CFA_RESC_TYPE_SRAM_SP_SMAC,
-	HCAPI_CFA_RESC_TYPE_SRAM_SP_SMAC_IPV4,
-	HCAPI_CFA_RESC_TYPE_SRAM_SP_SMAC_IPV6,
-	HCAPI_CFA_RESC_TYPE_SRAM_COUNTER_64B,
-	HCAPI_CFA_RESC_TYPE_SRAM_NAT_SPORT,
-	HCAPI_CFA_RESC_TYPE_SRAM_NAT_DPORT,
-	HCAPI_CFA_RESC_TYPE_SRAM_NAT_S_IPV4,
-	HCAPI_CFA_RESC_TYPE_SRAM_NAT_D_IPV4,
-	HCAPI_CFA_RESC_TYPE_SRAM_MAX
-};
-
-/**
- * Index used for the hw_entries field in struct cfa_rm_db
- */
-enum hcapi_cfa_resc_type_hw {
-	/* common HW resources for all chip variants */
-	HCAPI_CFA_RESC_TYPE_HW_L2_CTXT_TCAM,
-	HCAPI_CFA_RESC_TYPE_HW_PROF_FUNC,
-	HCAPI_CFA_RESC_TYPE_HW_PROF_TCAM,
-	HCAPI_CFA_RESC_TYPE_HW_EM_PROF_ID,
-	HCAPI_CFA_RESC_TYPE_HW_EM_REC,
-	HCAPI_CFA_RESC_TYPE_HW_WC_TCAM_PROF_ID,
-	HCAPI_CFA_RESC_TYPE_HW_WC_TCAM,
-	HCAPI_CFA_RESC_TYPE_HW_METER_PROF,
-	HCAPI_CFA_RESC_TYPE_HW_METER_INST,
-	HCAPI_CFA_RESC_TYPE_HW_MIRROR,
-	HCAPI_CFA_RESC_TYPE_HW_UPAR,
-	/* Wh+/SR specific HW resources */
-	HCAPI_CFA_RESC_TYPE_HW_SP_TCAM,
-	/* Thor, SR2 common HW resources */
-	HCAPI_CFA_RESC_TYPE_HW_FKB,
-	/* SR specific HW resources */
-	HCAPI_CFA_RESC_TYPE_HW_TBL_SCOPE,
-	HCAPI_CFA_RESC_TYPE_HW_L2_FUNC,
-	HCAPI_CFA_RESC_TYPE_HW_EPOCH0,
-	HCAPI_CFA_RESC_TYPE_HW_EPOCH1,
-	HCAPI_CFA_RESC_TYPE_HW_METADATA,
-	HCAPI_CFA_RESC_TYPE_HW_CT_STATE,
-	HCAPI_CFA_RESC_TYPE_HW_RANGE_PROF,
-	HCAPI_CFA_RESC_TYPE_HW_RANGE_ENTRY,
-	HCAPI_CFA_RESC_TYPE_HW_LAG_ENTRY,
-	HCAPI_CFA_RESC_TYPE_HW_MAX
-};
-
-struct hcapi_cfa_key_result {
-	uint64_t bucket_mem_ptr;
-	uint8_t bucket_idx;
-};
-
-/* common CFA register access macros */
-#define CFA_REG(x)		OFFSETOF(cfa_reg_t, cfa_##x)
-
-#ifndef TF_REG_WR
-#define TF_REG_WR(_p, x, y)  (*((uint32_t volatile *)(x)) = (y))
-#endif
-#ifndef TF_REG_RD
-#define TF_REG_RD(_p, x)  (*((uint32_t volatile *)(x)))
-#endif
-#ifndef TF_CFA_REG_RD
-#define TF_CFA_REG_RD(_p, x)	\
-	TF_REG_RD(0, (uint32_t)(_p)->base_addr + CFA_REG(x))
-#endif
-#ifndef TF_CFA_REG_WR
-#define TF_CFA_REG_WR(_p, x, y)	\
-	TF_REG_WR(0, (uint32_t)(_p)->base_addr + CFA_REG(x), y)
-#endif
+#define INVALID_U64 (0xFFFFFFFFFFFFFFFFULL)
+#define INVALID_U32 (0xFFFFFFFFUL)
+#define INVALID_U16 (0xFFFFUL)
+#define INVALID_U8 (0xFFUL)
 
-/* Constants used by Resource Manager Registration*/
-#define RM_CLIENT_NAME_MAX_LEN          32
+struct hcapi_cfa_devops;
 
 /**
- *  Resource Manager Data Structures used for resource requests
+ * CFA device information
  */
-struct hcapi_cfa_resc_req_entry {
-	uint16_t min;
-	uint16_t max;
-};
-
-struct hcapi_cfa_resc_req {
-	/* Wh+/SR specific onchip Action SRAM resources */
-	/* Validity of each sram type is indicated by the
-	 * corresponding sram type bit in the sram_resc_flags. When
-	 * set to 1, the CFA sram resource type is valid and amount of
-	 * resources for this type is reserved. Each sram resource
-	 * pool is identified by the starting index and number of
-	 * resources in the pool.
-	 */
-	uint32_t sram_resc_flags;
-	struct hcapi_cfa_resc_req_entry sram_resc[HCAPI_CFA_RESC_TYPE_SRAM_MAX];
-
-	/* Validity of each resource type is indicated by the
-	 * corresponding resource type bit in the hw_resc_flags. When
-	 * set to 1, the CFA resource type is valid and amount of
-	 * resource of this type is reserved. Each resource pool is
-	 * identified by the starting index and the number of
-	 * resources in the pool.
-	 */
-	uint32_t hw_resc_flags;
-	struct hcapi_cfa_resc_req_entry hw_resc[HCAPI_CFA_RESC_TYPE_HW_MAX];
-};
-
-struct hcapi_cfa_resc_req_db {
-	struct hcapi_cfa_resc_req rx;
-	struct hcapi_cfa_resc_req tx;
-};
-
-struct hcapi_cfa_resc_entry {
-	uint16_t start;
-	uint16_t stride;
-	uint16_t tag;
-};
-
-struct hcapi_cfa_resc {
-	/* Wh+/SR specific onchip Action SRAM resources */
-	/* Validity of each sram type is indicated by the
-	 * corresponding sram type bit in the sram_resc_flags. When
-	 * set to 1, the CFA sram resource type is valid and amount of
-	 * resources for this type is reserved. Each sram resource
-	 * pool is identified by the starting index and number of
-	 * resources in the pool.
-	 */
-	uint32_t sram_resc_flags;
-	struct hcapi_cfa_resc_entry sram_resc[HCAPI_CFA_RESC_TYPE_SRAM_MAX];
-
-	/* Validity of each resource type is indicated by the
-	 * corresponding resource type bit in the hw_resc_flags. When
-	 * set to 1, the CFA resource type is valid and amount of
-	 * resource of this type is reserved. Each resource pool is
-	 * identified by the starting index and the number of resources
-	 * in the pool.
-	 */
-	uint32_t hw_resc_flags;
-	struct hcapi_cfa_resc_entry hw_resc[HCAPI_CFA_RESC_TYPE_HW_MAX];
-};
-
-struct hcapi_cfa_resc_db {
-	struct hcapi_cfa_resc rx;
-	struct hcapi_cfa_resc tx;
+struct hcapi_cfa_devinfo {
+	/** [out] CFA device ops function pointer table */
+	const struct hcapi_cfa_devops *devops;
 };
 
 /**
- * This is the main data structure used by the CFA Resource
- * Manager.  This data structure holds all the state and table
- * management information.
+ *  \defgroup CFA_HCAPI_DEVICE_API
+ *  HCAPI used for writing to the hardware
+ *  @{
  */
-typedef struct hcapi_cfa_rm_data {
-	uint32_t dummy_data;
-} hcapi_cfa_rm_data_t;
-
-/* End RM support */
-
-struct hcapi_cfa_devops;
-
-struct hcapi_cfa_devinfo {
-	uint8_t global_cfg_data[CFA_GLOBAL_CFG_DATA_SZ];
-	struct hcapi_cfa_layout_tbl layouts;
-	struct hcapi_cfa_devops *devops;
-};
-
-int hcapi_cfa_dev_bind(enum hcapi_cfa_ver hw_ver,
-		       struct hcapi_cfa_devinfo *dev_info);
-
-int hcapi_cfa_key_compile_layout(struct hcapi_cfa_key_template *key_template,
-				 struct hcapi_cfa_key_layout *key_layout);
-uint64_t hcapi_cfa_key_hash(uint64_t *key_data, uint16_t bitlen);
-int
-hcapi_cfa_action_compile_layout(struct hcapi_cfa_action_template *act_template,
-				struct hcapi_cfa_action_layout *act_layout);
-int hcapi_cfa_action_init_obj(uint64_t *act_obj,
-			      struct hcapi_cfa_action_layout *act_layout);
-int hcapi_cfa_action_compute_ptr(uint64_t *act_obj,
-				 struct hcapi_cfa_action_layout *act_layout,
-				 uint32_t base_ptr);
-
-int hcapi_cfa_action_hw_op(struct hcapi_cfa_hwop *op,
-			   uint8_t *act_tbl,
-			   struct hcapi_cfa_data *act_obj);
-int hcapi_cfa_dev_hw_op(struct hcapi_cfa_hwop *op, uint16_t tbl_id,
-			struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_rm_register_client(hcapi_cfa_rm_data_t *data,
-				 const char *client_name,
-				 int *client_id);
-int hcapi_cfa_rm_unregister_client(hcapi_cfa_rm_data_t *data,
-				   int client_id);
-int hcapi_cfa_rm_query_resources(hcapi_cfa_rm_data_t *data,
-				 int client_id,
-				 uint16_t chnl_id,
-				 struct hcapi_cfa_resc_req_db *req_db);
-int hcapi_cfa_rm_query_resources_one(hcapi_cfa_rm_data_t *data,
-				     int clien_id,
-				     struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_reserve_resources(hcapi_cfa_rm_data_t *data,
-				   int client_id,
-				   struct hcapi_cfa_resc_req_db *resc_req,
-				   struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_release_resources(hcapi_cfa_rm_data_t *data,
-				   int client_id,
-				   struct hcapi_cfa_resc_req_db *resc_req,
-				   struct hcapi_cfa_resc_db *resc_db);
-int hcapi_cfa_rm_initialize(hcapi_cfa_rm_data_t *data);
 
-#if SUPPORT_CFA_HW_P4
-
-int hcapi_cfa_p4_dev_hw_op(struct hcapi_cfa_hwop *op, uint16_t tbl_id,
-			    struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_l2ctxt_hwop(struct hcapi_cfa_hwop *op,
-				   struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_l2ctxtrmp_hwop(struct hcapi_cfa_hwop *op,
-				      struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_tcam_hwop(struct hcapi_cfa_hwop *op,
-				 struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_prof_tcamrmp_hwop(struct hcapi_cfa_hwop *op,
-				    struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_wc_tcam_hwop(struct hcapi_cfa_hwop *op,
-			       struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_wc_tcam_rec_hwop(struct hcapi_cfa_hwop *op,
-				   struct hcapi_cfa_data *obj_data);
-int hcapi_cfa_p4_mirror_hwop(struct hcapi_cfa_hwop *op,
-			     struct hcapi_cfa_data *mirror);
-int hcapi_cfa_p4_global_cfg_hwop(struct hcapi_cfa_hwop *op,
-				 uint32_t type,
-				 struct hcapi_cfa_data *config);
-/* SUPPORT_CFA_HW_P4 */
-#elif SUPPORT_CFA_HW_P45
-int hcapi_cfa_p45_mirror_hwop(struct hcapi_cfa_hwop *op,
-			      struct hcapi_cfa_data *mirror);
-int hcapi_cfa_p45_global_cfg_hwop(struct hcapi_cfa_hwop *op,
-				  uint32_t type,
-				  struct hcapi_cfa_data *config);
-/* SUPPORT_CFA_HW_P45 */
-#endif
-/**
- *  HCAPI CFA device HW operation function callback definition
- *  This is standardized function callback hook to install different
- *  CFA HW table programming function callback.
+/** CFA device specific function hooks structure
+ *
+ * The following device hooks can be defined; unless noted otherwise, they are
+ * optional and can be filled with a null pointer. The pupose of these hooks
+ * to support CFA device operations for different device variants.
  */
+struct hcapi_cfa_devops {
+	/** calculate a key hash for the provided key_data
+	 *
+	 * This API computes hash for a key.
+	 *
+	 * @param[in] key_data
+	 *   A pointer of the key data buffer
+	 *
+	 * @param[in] bitlen
+	 *   Number of bits of the key data
+	 *
+	 * @return
+	 *   0 for SUCCESS, negative value for FAILURE
+	 */
+	uint64_t (*hcapi_cfa_key_hash)(uint64_t *key_data, uint16_t bitlen);
 
-struct hcapi_cfa_tbl_cb {
-	/**
-	 * This function callback provides the functionality to read/write
-	 * HW table entry from a HW table.
+	/** hardware operation on the CFA EM key
+	 *
+	 * This API provides the functionality to program the exact match and
+	 * key data to exact match record memory.
 	 *
 	 * @param[in] op
 	 *   A pointer to the Hardware operation parameter
 	 *
-	 * @param[in] obj_data
-	 *   A pointer to the HW data object for the hardware operation
+	 * @param[in] key_tbl
+	 *   A pointer to the off-chip EM key table (applicable to EEM and
+	 *   SR2 EM only), set to NULL for on-chip EM key table or WC
+	 *   TCAM table.
 	 *
+	 * @param[in/out] key_obj
+	 *   A pointer to the key data object for the hardware operation which
+	 *   has the following contents:
+	 *     1. key record memory offset (index to WC TCAM or EM key hash
+	 *        value)
+	 *     2. key data
+	 *   When using the HWOP PUT, the key_obj holds the LREC and key to
+	 *   be written.
+	 *   When using the HWOP GET, the key_obj be populated with the LREC
+	 *   and key which was specified by the key location object.
+	 *
+	 * @param[in/out] key_loc
+	 *   When using the HWOP PUT, this is a pointer to the key location
+	 *   data structure which holds the information of where the EM key
+	 *   is stored.  It holds the bucket index and the data pointer of
+	 *   a dynamic bucket that is chained to static bucket
+	 *   When using the HWOP GET, this is a pointer to the key location
+	 *   which should be retreved.
+	 *
+	 *   (valid for SR2 only).
 	 * @return
 	 *   0 for SUCCESS, negative value for FAILURE
 	 */
-	int (*hwop_cb)(struct hcapi_cfa_hwop *op,
-		       struct hcapi_cfa_data *obj_data);
+	int (*hcapi_cfa_key_hw_op)(struct hcapi_cfa_hwop *op,
+				   struct hcapi_cfa_key_tbl *key_tbl,
+				   struct hcapi_cfa_key_data *key_data,
+				   struct hcapi_cfa_key_loc *key_loc);
 };
 
-#endif  /* HCAPI_CFA_H_ */
+/*@}*/
+
+extern const size_t CFA_RM_HANDLE_DATA_SIZE;
+
+#if SUPPORT_CFA_HW_ALL
+extern const struct hcapi_cfa_devops cfa_p4_devops;
+extern const struct hcapi_cfa_devops cfa_p58_devops;
+
+#elif defined(SUPPORT_CFA_HW_P4) && SUPPORT_CFA_HW_P4
+extern const struct hcapi_cfa_devops cfa_p4_devops;
+uint64_t hcapi_cfa_p4_key_hash(uint64_t *key_data, uint16_t bitlen);
+/* SUPPORT_CFA_HW_P4 */
+#elif defined(SUPPORT_CFA_HW_P58) && SUPPORT_CFA_HW_P58
+extern const struct hcapi_cfa_devops cfa_p58_devops;
+uint64_t hcapi_cfa_p58_key_hash(uint64_t *key_data, uint16_t bitlen);
+/* SUPPORT_CFA_HW_P58 */
+#endif
+
+#endif /* HCAPI_CFA_H_ */
diff --git a/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_defs.h b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_defs.h
index 08f098ec86..8e5095a6ef 100644
--- a/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_defs.h
+++ b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_defs.h
@@ -30,12 +30,10 @@
 
 #define CFA_GLOBAL_CFG_DATA_SZ (100)
 
+#if SUPPORT_CFA_HW_ALL
 #include "hcapi_cfa_p4.h"
-#define CFA_PROF_L2CTXT_TCAM_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_TCAM_MAX_FLD
-#define CFA_PROF_L2CTXT_REMAP_MAX_FIELD_CNT CFA_P40_PROF_L2_CTXT_RMP_DR_MAX_FLD
-#define CFA_PROF_MAX_KEY_CFG_SZ sizeof(struct cfa_p4_prof_key_cfg)
-#define CFA_KEY_MAX_FIELD_CNT 41
-#define CFA_ACT_MAX_TEMPLATE_SZ sizeof(struct cfa_p4_action_template)
+#include "hcapi_cfa_p58.h"
+#endif /* SUPPORT_CFA_HW_ALL */
 
 /**
  * CFA HW version definition
@@ -87,43 +85,6 @@ enum hcapi_cfa_key_ctrlops {
 	HCAPI_CFA_KEY_CTRLOPS_MAX
 };
 
-/**
- * CFA HW field structure definition
- */
-struct hcapi_cfa_field {
-	/** [in] Starting bit position pf the HW field within a HW table
-	 *  entry.
-	 */
-	uint16_t bitpos;
-	/** [in] Number of bits for the HW field. */
-	uint8_t bitlen;
-};
-
-/**
- * CFA HW table entry layout structure definition
- */
-struct hcapi_cfa_layout {
-	/** [out] Bit order of layout */
-	bool is_msb_order;
-	/** [out] Size in bits of entry */
-	uint32_t total_sz_in_bits;
-	/** [out] data pointer of the HW layout fields array */
-	const struct hcapi_cfa_field *field_array;
-	/** [out] number of HW field entries in the HW layout field array */
-	uint32_t array_sz;
-	/** [out] layout_id - layout id associated with the layout */
-	uint16_t layout_id;
-};
-
-/**
- * CFA HW data object definition
- */
-struct hcapi_cfa_data_obj {
-	/** [in] HW field identifier. Used as an index to a HW table layout */
-	uint16_t field_id;
-	/** [in] Value of the HW field */
-	uint64_t val;
-};
 
 /**
  * CFA HW definition
@@ -280,348 +241,6 @@ struct hcapi_cfa_key_loc {
 	uint8_t bucket_idx;
 };
 
-/**
- * CFA HW layout table definition
- */
-struct hcapi_cfa_layout_tbl {
-	/** [out] data pointer to an array of fix formatted layouts supported.
-	 *  The index to the array is the CFA HW table ID
-	 */
-	const struct hcapi_cfa_layout *tbl;
-	/** [out] number of fix formatted layouts in the layout array */
-	uint16_t num_layouts;
-};
-
-/**
- * Key template consists of key fields that can be enabled/disabled
- * individually.
- */
-struct hcapi_cfa_key_template {
-	/** [in] key field enable field array, set 1 to the correspeonding
-	 *  field enable to make a field valid
-	 */
-	uint8_t field_en[CFA_KEY_MAX_FIELD_CNT];
-	/** [in] Identified if the key template is for TCAM. If false, the
-	 *  the key template is for EM. This field is mandantory for device that
-	 *  only support fix key formats.
-	 */
-	bool is_wc_tcam_key;
-};
-
-/**
- * key layout consist of field array, key bitlen, key ID, and other meta data
- * pertain to a key
- */
-struct hcapi_cfa_key_layout {
-	/** [out] key layout data */
-	struct hcapi_cfa_layout *layout;
-	/** [out] actual key size in number of bits */
-	uint16_t bitlen;
-	/** [out] key identifier and this field is only valid for device
-	 *  that supports fix key formats
-	 */
-	uint16_t id;
-	/** [out] Identified the key layout is WC TCAM key */
-	bool is_wc_tcam_key;
-	/** [out] total slices size, valid for WC TCAM key only. It can be
-	 *  used by the user to determine the total size of WC TCAM key slices
-	 *  in bytes.
-	 */
-	uint16_t slices_size;
-};
-
-/**
- * key layout memory contents
- */
-struct hcapi_cfa_key_layout_contents {
-	/** key layouts */
-	struct hcapi_cfa_key_layout key_layout;
-
-	/** layout */
-	struct hcapi_cfa_layout layout;
-
-	/** fields */
-	struct hcapi_cfa_field field_array[CFA_KEY_MAX_FIELD_CNT];
-};
-
-/**
- * Action template consists of action fields that can be enabled/disabled
- * individually.
- */
-struct hcapi_cfa_action_template {
-	/** [in] CFA version for the action template */
-	enum hcapi_cfa_ver hw_ver;
-	/** [in] action field enable field array, set 1 to the correspeonding
-	 *  field enable to make a field valid
-	 */
-	uint8_t data[CFA_ACT_MAX_TEMPLATE_SZ];
-};
-
-/**
- * action layout consist of field array, action wordlen and action format ID
- */
-struct hcapi_cfa_action_layout {
-	/** [in] action identifier */
-	uint16_t id;
-	/** [out] action layout data */
-	struct hcapi_cfa_layout *layout;
-	/** [out] actual action record size in number of bits */
-	uint16_t wordlen;
-};
-
-/**
- *  \defgroup CFA_HCAPI_PUT_API
- *  HCAPI used for writing to the hardware
- *  @{
- */
-
-/**
- * This API provides the functionality to program a specified value to a
- * HW field based on the provided programming layout.
- *
- * @param[in,out] obj_data
- *   A data pointer to a CFA HW key/mask data
- *
- * @param[in] layout
- *   A pointer to CFA HW programming layout
- *
- * @param[in] field_id
- *   ID of the HW field to be programmed
- *
- * @param[in] val
- *   Value of the HW field to be programmed
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_put_field(uint64_t *data_buf,
-			const struct hcapi_cfa_layout *layout,
-			uint16_t field_id, uint64_t val);
-
-/**
- * This API provides the functionality to program an array of field values
- * with corresponding field IDs to a number of profiler sub-block fields
- * based on the fixed profiler sub-block hardware programming layout.
- *
- * @param[in, out] obj_data
- *   A pointer to a CFA profiler key/mask object data
- *
- * @param[in] layout
- *   A pointer to CFA HW programming layout
- *
- * @param[in] field_tbl
- *   A pointer to an array that consists of the object field
- *   ID/value pairs
- *
- * @param[in] field_tbl_sz
- *   Number of entries in the table
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_put_fields(uint64_t *obj_data,
-			 const struct hcapi_cfa_layout *layout,
-			 struct hcapi_cfa_data_obj *field_tbl,
-			 uint16_t field_tbl_sz);
-
-/**
- * This API provides the functionality to write a value to a
- * field within the bit position and bit length of a HW data
- * object based on a provided programming layout.
- *
- * @param[in, out] act_obj
- *   A pointer of the action object to be initialized
- *
- * @param[in] layout
- *   A pointer of the programming layout
- *
- * @param field_id
- *   [in] Identifier of the HW field
- *
- * @param[in] bitpos_adj
- *   Bit position adjustment value
- *
- * @param[in] bitlen_adj
- *   Bit length adjustment value
- *
- * @param[in] val
- *   HW field value to be programmed
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_put_field_rel(uint64_t *obj_data,
-			    const struct hcapi_cfa_layout *layout,
-			    uint16_t field_id, int16_t bitpos_adj,
-			    int16_t bitlen_adj, uint64_t val);
-
-/*@}*/
-
-/**
- *  \defgroup CFA_HCAPI_GET_API
- *  HCAPI used for writing to the hardware
- *  @{
- */
-
-/**
- * This API provides the functionality to get the word length of
- * a layout object.
- *
- * @param[in] layout
- *   A pointer of the HW layout
- *
- * @return
- *   Word length of the layout object
- */
-uint16_t hcapi_cfa_get_wordlen(const struct hcapi_cfa_layout *layout);
-
-/**
- * The API provides the functionality to get bit offset and bit
- * length information of a field from a programming layout.
- *
- * @param[in] layout
- *   A pointer of the action layout
- *
- * @param[out] slice
- *   A pointer to the action offset info data structure
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_get_slice(const struct hcapi_cfa_layout *layout,
-			uint16_t field_id, struct hcapi_cfa_field *slice);
-
-/**
- * This API provides the functionality to read the value of a
- * CFA HW field from CFA HW data object based on the hardware
- * programming layout.
- *
- * @param[in] obj_data
- *   A pointer to a CFA HW key/mask object data
- *
- * @param[in] layout
- *   A pointer to CFA HW programming layout
- *
- * @param[in] field_id
- *   ID of the HW field to be programmed
- *
- * @param[out] val
- *   Value of the HW field
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_get_field(uint64_t *obj_data,
-			const struct hcapi_cfa_layout *layout,
-			uint16_t field_id, uint64_t *val);
-
-/**
- * This API provides the functionality to read a number of
- * HW fields from a CFA HW data object based on the hardware
- * programming layout.
- *
- * @param[in] obj_data
- *   A pointer to a CFA profiler key/mask object data
- *
- * @param[in] layout
- *   A pointer to CFA HW programming layout
- *
- * @param[in, out] field_tbl
- *   A pointer to an array that consists of the object field
- *   ID/value pairs
- *
- * @param[in] field_tbl_sz
- *   Number of entries in the table
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_get_fields(uint64_t *obj_data,
-			 const struct hcapi_cfa_layout *layout,
-			 struct hcapi_cfa_data_obj *field_tbl,
-			 uint16_t field_tbl_sz);
-
-/**
- * Get a value to a specific location relative to a HW field
- *
- * This API provides the functionality to read HW field from
- * a section of a HW data object identified by the bit position
- * and bit length from a given programming layout in order to avoid
- * reading the entire HW data object.
- *
- * @param[in] obj_data
- *   A pointer of the data object to read from
- *
- * @param[in] layout
- *   A pointer of the programming layout
- *
- * @param[in] field_id
- *   Identifier of the HW field
- *
- * @param[in] bitpos_adj
- *   Bit position adjustment value
- *
- * @param[in] bitlen_adj
- *   Bit length adjustment value
- *
- * @param[out] val
- *   Value of the HW field
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_get_field_rel(uint64_t *obj_data,
-			    const struct hcapi_cfa_layout *layout,
-			    uint16_t field_id, int16_t bitpos_adj,
-			    int16_t bitlen_adj, uint64_t *val);
-
-/**
- * This function is used to initialize a layout_contents structure
- *
- * The struct hcapi_cfa_key_layout is complex as there are three
- * layers of abstraction.  Each of those layer need to be properly
- * initialized.
- *
- * @param[in] layout_contents
- *  A pointer of the layout contents to initialize
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int
-hcapi_cfa_init_key_layout_contents(struct hcapi_cfa_key_layout_contents *cont);
-
-/**
- * This function is used to validate a key template
- *
- * The struct hcapi_cfa_key_template is complex as there are three
- * layers of abstraction.  Each of those layer need to be properly
- * validated.
- *
- * @param[in] key_template
- *  A pointer of the key template contents to validate
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int
-hcapi_cfa_is_valid_key_template(struct hcapi_cfa_key_template *key_template);
-
-/**
- * This function is used to validate a key layout
- *
- * The struct hcapi_cfa_key_layout is complex as there are three
- * layers of abstraction.  Each of those layer need to be properly
- * validated.
- *
- * @param[in] key_layout
- *  A pointer of the key layout contents to validate
- *
- * @return
- *   0 for SUCCESS, negative value for FAILURE
- */
-int hcapi_cfa_is_valid_key_layout(struct hcapi_cfa_key_layout *key_layout);
-
 /**
  * This function is used to hash E/EM keys
  *
diff --git a/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_p58.h b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_p58.h
new file mode 100644
index 0000000000..b2535098d2
--- /dev/null
+++ b/drivers/net/bnxt/hcapi/cfa/hcapi_cfa_p58.h
@@ -0,0 +1,411 @@
+/*
+ *   Copyright(c) Broadcom Limited.
+ *   All rights reserved.
+ */
+
+#ifndef _HCAPI_CFA_P58_H_
+#define _HCAPI_CFA_P58_H_
+
+/** CFA phase 5.8 fix formatted table(layout) ID definition
+ *
+ */
+enum cfa_p58_tbl_id {
+	CFA_P58_TBL_ILT = 0,
+	CFA_P58_TBL_L2CTXT_TCAM,
+	CFA_P58_TBL_L2CTXT_REMAP,
+	CFA_P58_TBL_PROF_TCAM,
+	CFA_P58_TBL_PROF_TCAM_REMAP,
+	CFA_P58_TBL_WC_TCAM,
+	CFA_P58_TBL_WC_TCAM_REC,
+	CFA_P58_TBL_VEB_TCAM,
+	CFA_P58_TBL_SP_TCAM,
+	/** Default Profile TCAM/Lookup Action Record Pointer Table */
+	CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR,
+	/** Error Profile TCAM Miss Action Record Pointer Table */
+	CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR,
+	/** SR2 VNIC/SVIF Properties Table */
+	CFA_P58_TBL_VSPT,
+	CFA_P58_TBL_MAX
+};
+
+#define CFA_P58_PROF_MAX_KEYS 4
+enum cfa_p58_mac_sel_mode {
+	CFA_P58_MAC_SEL_MODE_FIRST = 0,
+	CFA_P58_MAC_SEL_MODE_LOWEST = 1,
+};
+
+struct cfa_p58_prof_key_cfg {
+	uint8_t mac_sel[CFA_P58_PROF_MAX_KEYS];
+#define CFA_P58_PROF_MAC_SEL_DMAC0 (1 << 0)
+#define CFA_P58_PROF_MAC_SEL_T_MAC0 (1 << 1)
+#define CFA_P58_PROF_MAC_SEL_OUTERMOST_MAC0 (1 << 2)
+#define CFA_P58_PROF_MAC_SEL_DMAC1 (1 << 3)
+#define CFA_P58_PROF_MAC_SEL_T_MAC1 (1 << 4)
+#define CFA_P58_PROF_MAC_OUTERMOST_MAC1 (1 << 5)
+	uint8_t vlan_sel[CFA_P58_PROF_MAX_KEYS];
+#define CFA_P58_PROFILER_VLAN_SEL_INNER_HDR 0
+#define CFA_P58_PROFILER_VLAN_SEL_TUNNEL_HDR 1
+#define CFA_P58_PROFILER_VLAN_SEL_OUTERMOST_HDR 2
+	uint8_t pass_cnt;
+	enum cfa_p58_mac_sel_mode mode;
+};
+
+/**
+ * CFA action layout definition
+ */
+
+#define CFA_P58_ACTION_MAX_LAYOUT_SIZE 184
+
+/**
+ * Action object template structure
+ *
+ * Template structure presents data fields that are necessary to know
+ * at the beginning of Action Builder (AB) processing. Like before the
+ * AB compilation. One such example could be a template that is
+ * flexible in size (Encap Record) and the presence of these fields
+ * allows for determining the template size as well as where the
+ * fields are located in the record.
+ *
+ * The template may also present fields that are not made visible to
+ * the caller by way of the action fields.
+ *
+ * Template fields also allow for additional checking on user visible
+ * fields. One such example could be the encap pointer behavior on a
+ * CFA_P58_ACT_OBJ_TYPE_ACT or CFA_P58_ACT_OBJ_TYPE_ACT_SRAM.
+ */
+struct cfa_p58_action_template {
+	/** Action Object type
+	 *
+	 * Controls the type of the Action Template
+	 */
+	enum {
+		/** Select this type to build an Action Record Object
+		 */
+		CFA_P58_ACT_OBJ_TYPE_ACT,
+		/** Select this type to build an Action Statistics
+		 * Object
+		 */
+		CFA_P58_ACT_OBJ_TYPE_STAT,
+		/** Select this type to build a SRAM Action Record
+		 * Object.
+		 */
+		CFA_P58_ACT_OBJ_TYPE_ACT_SRAM,
+		/** Select this type to build a SRAM Action
+		 * Encapsulation Object.
+		 */
+		CFA_P58_ACT_OBJ_TYPE_ENCAP_SRAM,
+		/** Select this type to build a SRAM Action Modify
+		 * Object, with IPv4 capability.
+		 */
+		/* In case of Stingray the term Modify is used for the 'NAT
+		 * action'. Action builder is leveraged to fill in the NAT
+		 * object which then can be referenced by the action
+		 * record.
+		 */
+		CFA_P58_ACT_OBJ_TYPE_MODIFY_IPV4_SRAM,
+		/** Select this type to build a SRAM Action Source
+		 * Property Object.
+		 */
+		/* In case of Stingray this is not a 'pure' action record.
+		 * Action builder is leveraged to full in the Source Property
+		 * object which can then be referenced by the action
+		 * record.
+		 */
+		CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM,
+		/** Select this type to build a SRAM Action Statistics
+		 * Object
+		 */
+		CFA_P58_ACT_OBJ_TYPE_STAT_SRAM,
+	} obj_type;
+
+	/** Action Control
+	 *
+	 * Controls the internals of the Action Template
+	 *
+	 * act is valid when:
+	 * (obj_type == CFA_P58_ACT_OBJ_TYPE_ACT)
+	 */
+	/*
+	 * Stat and encap are always inline for EEM as table scope
+	 * allocation does not allow for separate Stats allocation,
+	 * but has the xx_inline flags as to be forward compatible
+	 * with Stingray 2, always treated as TRUE.
+	 */
+	struct {
+		/** Set to CFA_HCAPI_TRUE to enable statistics
+		 */
+		uint8_t stat_enable;
+		/** Set to CFA_HCAPI_TRUE to enable statistics to be inlined
+		 */
+		uint8_t stat_inline;
+
+		/** Set to CFA_HCAPI_TRUE to enable encapsulation
+		 */
+		uint8_t encap_enable;
+		/** Set to CFA_HCAPI_TRUE to enable encapsulation to be inlined
+		 */
+		uint8_t encap_inline;
+	} act;
+
+	/** Modify Setting
+	 *
+	 * Controls the type of the Modify Action the template is
+	 * describing
+	 *
+	 * modify is valid when:
+	 * (obj_type == CFA_P58_ACT_OBJ_TYPE_MODIFY_SRAM)
+	 */
+	enum {
+		/** Set to enable Modify of Source IPv4 Address
+		 */
+		CFA_P58_MR_REPLACE_SOURCE_IPV4 = 0,
+		/** Set to enable Modify of Destination IPv4 Address
+		 */
+		CFA_P58_MR_REPLACE_DEST_IPV4
+	} modify;
+
+	/** Encap Control
+	 * Controls the type of encapsulation the template is
+	 * describing
+	 *
+	 * encap is valid when:
+	 * ((obj_type == CFA_P58_ACT_OBJ_TYPE_ACT) &&
+	 *   act.encap_enable) ||
+	 * ((obj_type == CFA_P58_ACT_OBJ_TYPE_SRC_PROP_SRAM)
+	 */
+	struct {
+		/* Direction is required as Stingray Encap on RX is
+		 * limited to l2 and VTAG only.
+		 */
+		/** Receive or Transmit direction
+		 */
+		uint8_t direction;
+		/** Set to CFA_HCAPI_TRUE to enable L2 capability in the
+		 *  template
+		 */
+		uint8_t l2_enable;
+		/** vtag controls the Encap Vector - VTAG Encoding, 4 bits
+		 *
+		 * <ul>
+		 * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_0, default, no VLAN
+		 *      Tags applied
+		 * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_1, adds capability to
+		 *      set 1 VLAN Tag. Action Template compile adds
+		 *      the following field to the action object
+		 *      ::TF_ER_VLAN1
+		 * <li> CFA_P58_ACT_ENCAP_VTAGS_PUSH_2, adds capability to
+		 *      set 2 VLAN Tags. Action Template compile adds
+		 *      the following fields to the action object
+		 *      ::TF_ER_VLAN1 and ::TF_ER_VLAN2
+		 * </ul>
+		 */
+		enum { CFA_P58_ACT_ENCAP_VTAGS_PUSH_0 = 0,
+		       CFA_P58_ACT_ENCAP_VTAGS_PUSH_1,
+		       CFA_P58_ACT_ENCAP_VTAGS_PUSH_2 } vtag;
+
+		/*
+		 * The remaining fields are NOT supported when
+		 * direction is RX and ((obj_type ==
+		 * CFA_P58_ACT_OBJ_TYPE_ACT) && act.encap_enable).
+		 * ab_compile_layout will perform the checking and
+		 * skip remaining fields.
+		 */
+		/** L3 Encap controls the Encap Vector - L3 Encoding,
+		 *  3 bits. Defines the type of L3 Encapsulation the
+		 *  template is describing.
+		 * <ul>
+		 * <li> CFA_P58_ACT_ENCAP_L3_NONE, default, no L3
+		 *      Encapsulation processing.
+		 * <li> CFA_P58_ACT_ENCAP_L3_IPV4, enables L3 IPv4
+		 *      Encapsulation.
+		 * <li> CFA_P58_ACT_ENCAP_L3_IPV6, enables L3 IPv6
+		 *      Encapsulation.
+		 * <li> CFA_P58_ACT_ENCAP_L3_MPLS_8847, enables L3 MPLS
+		 *      8847 Encapsulation.
+		 * <li> CFA_P58_ACT_ENCAP_L3_MPLS_8848, enables L3 MPLS
+		 *      8848 Encapsulation.
+		 * </ul>
+		 */
+		enum {
+			/** Set to disable any L3 encapsulation
+			 * processing, default
+			 */
+			CFA_P58_ACT_ENCAP_L3_NONE = 0,
+			/** Set to enable L3 IPv4 encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_L3_IPV4 = 4,
+			/** Set to enable L3 IPv6 encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_L3_IPV6 = 5,
+			/** Set to enable L3 MPLS 8847 encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_L3_MPLS_8847 = 6,
+			/** Set to enable L3 MPLS 8848 encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_L3_MPLS_8848 = 7
+		} l3;
+
+#define CFA_P58_ACT_ENCAP_MAX_MPLS_LABELS 8
+		/** 1-8 labels, valid when
+		 * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8847) ||
+		 * (l3 == CFA_P58_ACT_ENCAP_L3_MPLS_8848)
+		 *
+		 * MAX number of MPLS Labels 8.
+		 */
+		uint8_t l3_num_mpls_labels;
+
+		/** Set to CFA_HCAPI_TRUE to enable L4 capability in the
+		 * template.
+		 *
+		 * CFA_HCAPI_TRUE adds ::TF_EN_UDP_SRC_PORT and
+		 * ::TF_EN_UDP_DST_PORT to the template.
+		 */
+		uint8_t l4_enable;
+
+		/** Tunnel Encap controls the Encap Vector - Tunnel
+		 *  Encap, 3 bits. Defines the type of Tunnel
+		 *  encapsulation the template is describing
+		 * <ul>
+		 * <li> CFA_P58_ACT_ENCAP_TNL_NONE, default, no Tunnel
+		 *      Encapsulation processing.
+		 * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL
+		 * <li> CFA_P58_ACT_ENCAP_TNL_VXLAN. NOTE: Expects
+		 *      l4_enable set to CFA_P58_TRUE;
+		 * <li> CFA_P58_ACT_ENCAP_TNL_NGE. NOTE: Expects l4_enable
+		 *      set to CFA_P58_TRUE;
+		 * <li> CFA_P58_ACT_ENCAP_TNL_NVGRE. NOTE: only valid if
+		 *      l4_enable set to CFA_HCAPI_FALSE.
+		 * <li> CFA_P58_ACT_ENCAP_TNL_GRE.NOTE: only valid if
+		 *      l4_enable set to CFA_HCAPI_FALSE.
+		 * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4
+		 * <li> CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+		 * </ul>
+		 */
+		enum {
+			/** Set to disable Tunnel header encapsulation
+			 * processing, default
+			 */
+			CFA_P58_ACT_ENCAP_TNL_NONE = 0,
+			/** Set to enable Tunnel Generic Full header
+			 * encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL,
+			/** Set to enable VXLAN header encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_VXLAN,
+			/** Set to enable NGE (VXLAN2) header encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_NGE,
+			/** Set to enable NVGRE header encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_NVGRE,
+			/** Set to enable GRE header encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_GRE,
+			/** Set to enable Generic header after Tunnel
+			 * L4 encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4,
+			/** Set to enable Generic header after Tunnel
+			 * encapsulation
+			 */
+			CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL
+		} tnl;
+
+		/** Number of bytes of generic tunnel header,
+		 * valid when
+		 * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_FULL) ||
+		 * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TL4) ||
+		 * (tnl == CFA_P58_ACT_ENCAP_TNL_GENERIC_AFTER_TNL)
+		 */
+		uint8_t tnl_generic_size;
+		/** Number of 32b words of nge options,
+		 * valid when
+		 * (tnl == CFA_P58_ACT_ENCAP_TNL_NGE)
+		 */
+		uint8_t tnl_nge_op_len;
+		/* Currently not planned */
+		/* Custom Header */
+		/*	uint8_t custom_enable; */
+	} encap;
+};
+
+/**
+ * Enumeration of SRAM entry types, used for allocation of
+ * fixed SRAM entities. The memory model for CFA HCAPI
+ * determines if an SRAM entry type is supported.
+ */
+enum cfa_p58_action_sram_entry_type {
+	/* NOTE: Any additions to this enum must be reflected on FW
+	 * side as well.
+	 */
+
+	/** SRAM Action Record */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_ACT,
+	/** SRAM Action Encap 8 Bytes */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_8B,
+	/** SRAM Action Encap 16 Bytes */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_16B,
+	/** SRAM Action Encap 64 Bytes */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_ENCAP_64B,
+	/** SRAM Action Modify IPv4 Source */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_SRC,
+	/** SRAM Action Modify IPv4 Destination */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_MODIFY_IPV4_DEST,
+	/** SRAM Action Source Properties SMAC */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC,
+	/** SRAM Action Source Properties SMAC IPv4 */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV4,
+	/** SRAM Action Source Properties SMAC IPv6 */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_SP_SMAC_IPV6,
+	/** SRAM Action Statistics 64 Bits */
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_STATS_64,
+	CFA_P58_ACTION_SRAM_ENTRY_TYPE_MAX
+};
+
+/**
+ * SRAM Action Record structure holding either an action index or an
+ * action ptr.
+ */
+union cfa_p58_action_sram_act_record {
+	/** SRAM Action idx specifies the offset of the SRAM
+	 * element within its SRAM Entry Type block. This
+	 * index can be written into i.e. an L2 Context. Use
+	 * this type for all SRAM Action Record types except
+	 * SRAM Full Action records. Use act_ptr instead.
+	 */
+	uint16_t act_idx;
+	/** SRAM Full Action is special in that it needs an
+	 * action record pointer. This pointer can be written
+	 * into i.e. a Wildcard TCAM entry.
+	 */
+	uint32_t act_ptr;
+};
+
+/**
+ * cfa_p58_action_param parameter definition
+ */
+struct cfa_p58_action_param {
+	/**
+	 * [in] receive or transmit direction
+	 */
+	uint8_t dir;
+	/**
+	 * [in] type of the sram allocation type
+	 */
+	enum cfa_p58_action_sram_entry_type type;
+	/**
+	 * [in] action record to set. The 'type' specified lists the
+	 *	record definition to use in the passed in record.
+	 */
+	union cfa_p58_action_sram_act_record record;
+	/**
+	 * [in] number of elements in act_data
+	 */
+	uint32_t act_size;
+	/**
+	 * [in] ptr to array of action data
+	 */
+	uint64_t *act_data;
+};
+#endif /* _CFA_HW_P58_H_ */
diff --git a/drivers/net/bnxt/tf_core/meson.build b/drivers/net/bnxt/tf_core/meson.build
index 373ee0413b..2c02214d83 100644
--- a/drivers/net/bnxt/tf_core/meson.build
+++ b/drivers/net/bnxt/tf_core/meson.build
@@ -22,7 +22,6 @@ sources += files(
         'tf_device_p4.c',
         'tf_device_p58.c',
         'tf_identifier.c',
-        'tf_shadow_tbl.c',
         'tf_shadow_tcam.c',
         'tf_tcam.c',
         'tf_util.c',
diff --git a/drivers/net/bnxt/tf_core/tf_core.h b/drivers/net/bnxt/tf_core/tf_core.h
index 4fe0590569..0cc3719a1b 100644
--- a/drivers/net/bnxt/tf_core/tf_core.h
+++ b/drivers/net/bnxt/tf_core/tf_core.h
@@ -153,6 +153,30 @@ enum tf_device_type {
 	TF_DEVICE_TYPE_MAX     /**< Maximum   */
 };
 
+/**
+ * Module types
+ */
+enum tf_module_type {
+	/**
+	 * Identifier module
+	 */
+	TF_MODULE_TYPE_IDENTIFIER,
+	/**
+	 * Table type module
+	 */
+	TF_MODULE_TYPE_TABLE,
+	/**
+	 * TCAM module
+	 */
+	TF_MODULE_TYPE_TCAM,
+	/**
+	 * EM module
+	 */
+	TF_MODULE_TYPE_EM,
+	TF_MODULE_TYPE_MAX
+};
+
+
 /**
  * Identifier resource types
  */
diff --git a/drivers/net/bnxt/tf_core/tf_device.c b/drivers/net/bnxt/tf_core/tf_device.c
index d072b9877c..61b3746d8b 100644
--- a/drivers/net/bnxt/tf_core/tf_device.c
+++ b/drivers/net/bnxt/tf_core/tf_device.c
@@ -8,6 +8,7 @@
 #include "tf_device_p58.h"
 #include "tfp.h"
 #include "tf_em.h"
+#include "tf_rm.h"
 
 struct tf;
 
@@ -18,8 +19,8 @@ static int tf_dev_unbind_p58(struct tf *tfp);
 /**
  * Resource Reservation Check function
  *
- * [in] tfp
- *   Pointer to TF handle
+ * [in] count
+ *   Number of module subtypes
  *
  * [in] cfg
  *   Pointer to rm element config
@@ -28,11 +29,10 @@ static int tf_dev_unbind_p58(struct tf *tfp);
  *   Pointer to resource reservation array
  *
  * Returns
- *   - (n) number of tables that have non-zero reservation count.
+ *   - (n) number of tables in module that have non-zero reservation count.
  */
 static int
-tf_dev_reservation_check(struct tf *tfp __rte_unused,
-			 uint16_t count,
+tf_dev_reservation_check(uint16_t count,
 			 struct tf_rm_element_cfg *cfg,
 			 uint16_t *reservations)
 {
@@ -94,8 +94,7 @@ tf_dev_bind_p4(struct tf *tfp,
 
 	/* Initialize the modules */
 
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_IDENT_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_IDENT_TYPE_MAX,
 					   tf_ident_p4,
 					   (uint16_t *)resources->ident_cnt);
 	if (rsv_cnt) {
@@ -113,8 +112,7 @@ tf_dev_bind_p4(struct tf *tfp,
 		no_rsv_flag = false;
 	}
 
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_TBL_TYPE_MAX,
 					   tf_tbl_p4,
 					   (uint16_t *)resources->tbl_cnt);
 	if (rsv_cnt) {
@@ -132,8 +130,7 @@ tf_dev_bind_p4(struct tf *tfp,
 		no_rsv_flag = false;
 	}
 
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_TCAM_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX,
 					   tf_tcam_p4,
 					   (uint16_t *)resources->tcam_cnt);
 	if (rsv_cnt) {
@@ -155,8 +152,7 @@ tf_dev_bind_p4(struct tf *tfp,
 	 */
 
 	em_cfg.cfg = tf_em_ext_p4;
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_EM_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_EM_TBL_TYPE_MAX,
 					   em_cfg.cfg,
 					   (uint16_t *)resources->em_cnt);
 	if (rsv_cnt) {
@@ -175,8 +171,7 @@ tf_dev_bind_p4(struct tf *tfp,
 	/*
 	 * EM
 	 */
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_EM_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_EM_TBL_TYPE_MAX,
 					   tf_em_int_p4,
 					   (uint16_t *)resources->em_cnt);
 	if (rsv_cnt) {
@@ -360,10 +355,7 @@ tf_dev_bind_p58(struct tf *tfp,
 	/* Initial function initialization */
 	dev_handle->ops = &tf_dev_ops_p58_init;
 
-	/* Initialize the modules */
-
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_IDENT_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_IDENT_TYPE_MAX,
 					   tf_ident_p58,
 					   (uint16_t *)resources->ident_cnt);
 	if (rsv_cnt) {
@@ -380,8 +372,7 @@ tf_dev_bind_p58(struct tf *tfp,
 		no_rsv_flag = false;
 	}
 
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_TBL_TYPE_MAX,
 					   tf_tbl_p58,
 					   (uint16_t *)resources->tbl_cnt);
 	if (rsv_cnt) {
@@ -398,8 +389,7 @@ tf_dev_bind_p58(struct tf *tfp,
 		no_rsv_flag = false;
 	}
 
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_TCAM_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_TCAM_TBL_TYPE_MAX,
 					   tf_tcam_p58,
 					   (uint16_t *)resources->tcam_cnt);
 	if (rsv_cnt) {
@@ -419,8 +409,7 @@ tf_dev_bind_p58(struct tf *tfp,
 	/*
 	 * EM
 	 */
-	rsv_cnt = tf_dev_reservation_check(tfp,
-					   TF_EM_TBL_TYPE_MAX,
+	rsv_cnt = tf_dev_reservation_check(TF_EM_TBL_TYPE_MAX,
 					   tf_em_int_p58,
 					   (uint16_t *)resources->em_cnt);
 	if (rsv_cnt) {
@@ -593,10 +582,10 @@ tf_dev_bind_ops(enum tf_device_type type,
 	switch (type) {
 	case TF_DEVICE_TYPE_WH:
 	case TF_DEVICE_TYPE_SR:
-		dev_handle->ops = &tf_dev_ops_p4;
+		dev_handle->ops = &tf_dev_ops_p4_init;
 		break;
 	case TF_DEVICE_TYPE_THOR:
-		dev_handle->ops = &tf_dev_ops_p58;
+		dev_handle->ops = &tf_dev_ops_p58_init;
 		break;
 	default:
 		TFP_DRV_LOG(ERR,
diff --git a/drivers/net/bnxt/tf_core/tf_device.h b/drivers/net/bnxt/tf_core/tf_device.h
index 4f4120c603..2cbb42fe2a 100644
--- a/drivers/net/bnxt/tf_core/tf_device.h
+++ b/drivers/net/bnxt/tf_core/tf_device.h
@@ -16,29 +16,6 @@
 struct tf;
 struct tf_session;
 
-/**
- * Device module types
- */
-enum tf_device_module_type {
-	/**
-	 * Identifier module
-	 */
-	TF_DEVICE_MODULE_TYPE_IDENTIFIER,
-	/**
-	 * Table type module
-	 */
-	TF_DEVICE_MODULE_TYPE_TABLE,
-	/**
-	 * TCAM module
-	 */
-	TF_DEVICE_MODULE_TYPE_TCAM,
-	/**
-	 * EM module
-	 */
-	TF_DEVICE_MODULE_TYPE_EM,
-	TF_DEVICE_MODULE_TYPE_MAX
-};
-
 /**
  * The Device module provides a general device template. A supported
  * device type should implement one or more of the listed function
diff --git a/drivers/net/bnxt/tf_core/tf_device_p4.c b/drivers/net/bnxt/tf_core/tf_device_p4.c
index fbe92b7733..d0bede89e3 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p4.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p4.c
@@ -74,29 +74,10 @@ static int
 tf_dev_p4_get_max_types(struct tf *tfp,
 			uint16_t *max_types)
 {
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	int rc;
-
 	if (max_types == NULL || tfp == NULL)
 		return -EINVAL;
 
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc)
-		return rc;
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc)
-		return rc;
-
-	if (dev->type == TF_DEVICE_TYPE_WH)
-		*max_types = CFA_RESOURCE_TYPE_P4_LAST + 1;
-	else if (dev->type == TF_DEVICE_TYPE_SR)
-		*max_types = CFA_RESOURCE_TYPE_P45_LAST + 1;
-	else
-		return -ENODEV;
+	*max_types = CFA_RESOURCE_TYPE_P4_LAST + 1;
 
 	return 0;
 }
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.c b/drivers/net/bnxt/tf_core/tf_device_p58.c
index 688d987cb7..50a8d82074 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.c
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.c
@@ -58,25 +58,11 @@ const char *tf_resource_str_p58[CFA_RESOURCE_TYPE_P58_LAST + 1] = {
  */
 static int
 tf_dev_p58_get_max_types(struct tf *tfp,
-			uint16_t *max_types)
+			 uint16_t *max_types)
 {
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	int rc;
-
 	if (max_types == NULL || tfp == NULL)
 		return -EINVAL;
 
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc)
-		return rc;
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc)
-		return rc;
-
 	*max_types = CFA_RESOURCE_TYPE_P58_LAST + 1;
 
 	return 0;
@@ -153,41 +139,6 @@ tf_dev_p58_get_tcam_slice_info(struct tf *tfp __rte_unused,
 	return 0;
 }
 
-static int
-tf_dev_p58_map_parif(struct tf *tfp __rte_unused,
-		    uint16_t parif_bitmask,
-		    uint16_t pf,
-		    uint8_t *data,
-		    uint8_t *mask,
-		    uint16_t sz_in_bytes)
-{
-	uint32_t parif_pf[2] = { 0 };
-	uint32_t parif_pf_mask[2] = { 0 };
-	uint32_t parif;
-	uint32_t shift;
-
-	if (sz_in_bytes != sizeof(uint64_t))
-		return -ENOTSUP;
-
-	for (parif = 0; parif < TF_DEV_P58_PARIF_MAX; parif++) {
-		if (parif_bitmask & (1UL << parif)) {
-			if (parif < 8) {
-				shift = 4 * parif;
-				parif_pf_mask[0] |= TF_DEV_P58_PF_MASK << shift;
-				parif_pf[0] |= pf << shift;
-			} else {
-				shift = 4 * (parif - 8);
-				parif_pf_mask[1] |= TF_DEV_P58_PF_MASK << shift;
-				parif_pf[1] |= pf << shift;
-			}
-		}
-	}
-	tfp_memcpy(data, parif_pf, sz_in_bytes);
-	tfp_memcpy(mask, parif_pf_mask, sz_in_bytes);
-
-	return 0;
-}
-
 static int tf_dev_p58_get_mailbox(void)
 {
 	return TF_CHIMP_MB;
@@ -268,7 +219,7 @@ const struct tf_dev_ops tf_dev_ops_p58 = {
 	.tf_dev_delete_ext_em_entry = NULL,
 	.tf_dev_alloc_tbl_scope = NULL,
 	.tf_dev_map_tbl_scope = NULL,
-	.tf_dev_map_parif = tf_dev_p58_map_parif,
+	.tf_dev_map_parif = NULL,
 	.tf_dev_free_tbl_scope = NULL,
 	.tf_dev_set_if_tbl = tf_if_tbl_set,
 	.tf_dev_get_if_tbl = tf_if_tbl_get,
diff --git a/drivers/net/bnxt/tf_core/tf_device_p58.h b/drivers/net/bnxt/tf_core/tf_device_p58.h
index de7bb1cd76..abd916985e 100644
--- a/drivers/net/bnxt/tf_core/tf_device_p58.h
+++ b/drivers/net/bnxt/tf_core/tf_device_p58.h
@@ -64,6 +64,105 @@ struct tf_rm_element_cfg tf_tbl_p58[TF_TBL_TYPE_MAX] = {
 	[TF_TBL_TYPE_MIRROR_CONFIG] = {
 		TF_RM_ELEM_CFG_HCAPI_BA, CFA_RESOURCE_TYPE_P58_MIRROR
 	},
+	/* Policy - ARs in bank 1 */
+	[TF_TBL_TYPE_FULL_ACT_RECORD] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
+		.slices          = 4,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_COMPACT_ACT_RECORD] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_FULL_ACT_RECORD,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_1,
+		.slices          = 8,
+		.divider         = 8,
+	},
+	/* Policy - Encaps in bank 2 */
+	[TF_TBL_TYPE_ACT_ENCAP_8B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 8,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_ENCAP_16B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 4,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_ENCAP_32B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 2,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_ENCAP_64B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 1,
+		.divider         = 8,
+	},
+	/* Policy - Modify in bank 2 with Encaps */
+	[TF_TBL_TYPE_ACT_MODIFY_8B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 8,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_MODIFY_16B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 4,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_MODIFY_32B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 2,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_MODIFY_64B] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_ENCAP_8B,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_2,
+		.slices          = 1,
+		.divider         = 8,
+	},
+	/* Policy - SP in bank 0 */
+	[TF_TBL_TYPE_ACT_SP_SMAC] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
+		.slices          = 8,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_SP_SMAC_IPV4] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
+		.slices          = 4,
+		.divider         = 8,
+	},
+	[TF_TBL_TYPE_ACT_SP_SMAC_IPV6] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+		.parent_subtype  = TF_TBL_TYPE_ACT_SP_SMAC,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_0,
+		.slices          = 2,
+		.divider         = 8,
+	},
+	/* Policy - Stats in bank 3 */
+	[TF_TBL_TYPE_ACT_STATS_64] = {
+		.cfg_type        = TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
+		.hcapi_type      = CFA_RESOURCE_TYPE_P58_SRAM_BANK_3,
+		.slices          = 8,
+		.divider         = 8,
+	},
 };
 
 struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {
@@ -72,7 +171,16 @@ struct tf_rm_element_cfg tf_em_int_p58[TF_EM_TBL_TYPE_MAX] = {
 	},
 };
 
-struct tf_if_tbl_cfg tf_if_tbl_p58[TF_IF_TBL_TYPE_MAX];
+struct tf_if_tbl_cfg tf_if_tbl_p58[TF_IF_TBL_TYPE_MAX] = {
+	[TF_IF_TBL_TYPE_PROF_PARIF_DFLT_ACT_REC_PTR] = {
+		TF_IF_TBL_CFG, CFA_P58_TBL_PROF_PARIF_DFLT_ACT_REC_PTR},
+	[TF_IF_TBL_TYPE_PROF_PARIF_ERR_ACT_REC_PTR] = {
+		TF_IF_TBL_CFG, CFA_P58_TBL_PROF_PARIF_ERR_ACT_REC_PTR},
+	[TF_IF_TBL_TYPE_ILT] = {
+		TF_IF_TBL_CFG, CFA_P58_TBL_ILT},
+	[TF_IF_TBL_TYPE_VSPT] = {
+		TF_IF_TBL_CFG, CFA_P58_TBL_VSPT},
+};
 
 struct tf_global_cfg_cfg tf_global_cfg_p58[TF_GLOBAL_CFG_TYPE_MAX] = {
 	[TF_TUNNEL_ENCAP] = {
diff --git a/drivers/net/bnxt/tf_core/tf_em_common.c b/drivers/net/bnxt/tf_core/tf_em_common.c
index 6cd6086685..589df60041 100644
--- a/drivers/net/bnxt/tf_core/tf_em_common.c
+++ b/drivers/net/bnxt/tf_core/tf_em_common.c
@@ -54,7 +54,7 @@ tbl_scope_cb_find(uint32_t tbl_scope_id)
 
 	/* Check that id is valid */
 	parms.rm_db = eem_db[TF_DIR_RX];
-	parms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
+	parms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
 	parms.index = tbl_scope_id;
 	parms.allocated = &allocated;
 
@@ -895,7 +895,7 @@ tf_em_ext_common_bind(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
+	db_cfg.module = TF_MODULE_TYPE_EM;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
 
diff --git a/drivers/net/bnxt/tf_core/tf_em_host.c b/drivers/net/bnxt/tf_core/tf_em_host.c
index 69f7e5bddd..166f397935 100644
--- a/drivers/net/bnxt/tf_core/tf_em_host.c
+++ b/drivers/net/bnxt/tf_core/tf_em_host.c
@@ -379,7 +379,7 @@ tf_em_ext_alloc(struct tf *tfp, struct tf_alloc_tbl_scope_parms *parms)
 
 	/* Get Table Scope control block from the session pool */
 	aparms.rm_db = eem_db[TF_DIR_RX];
-	aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
+	aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
 	aparms.index = (uint32_t *)&parms->tbl_scope_id;
 	rc = tf_rm_allocate(&aparms);
 	if (rc) {
@@ -488,7 +488,7 @@ tf_em_ext_alloc(struct tf *tfp, struct tf_alloc_tbl_scope_parms *parms)
 cleanup:
 	/* Free Table control block */
 	fparms.rm_db = eem_db[TF_DIR_RX];
-	fparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
+	fparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
 	fparms.index = parms->tbl_scope_id;
 	tf_rm_free(&fparms);
 	return -EINVAL;
@@ -512,7 +512,7 @@ tf_em_ext_free(struct tf *tfp,
 
 	/* Free Table control block */
 	aparms.rm_db = eem_db[TF_DIR_RX];
-	aparms.db_index = TF_EM_TBL_TYPE_TBL_SCOPE;
+	aparms.subtype = TF_EM_TBL_TYPE_TBL_SCOPE;
 	aparms.index = parms->tbl_scope_id;
 	rc = tf_rm_free(&aparms);
 	if (rc) {
diff --git a/drivers/net/bnxt/tf_core/tf_em_internal.c b/drivers/net/bnxt/tf_core/tf_em_internal.c
index 0864218469..043f9be4da 100644
--- a/drivers/net/bnxt/tf_core/tf_em_internal.c
+++ b/drivers/net/bnxt/tf_core/tf_em_internal.c
@@ -251,7 +251,7 @@ tf_em_int_bind(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	db_cfg.type = TF_DEVICE_MODULE_TYPE_EM;
+	db_cfg.module = TF_MODULE_TYPE_EM;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
 
@@ -294,7 +294,7 @@ tf_em_int_bind(struct tf *tfp,
 
 	for (i = 0; i < TF_DIR_MAX; i++) {
 		iparms.rm_db = em_db[i];
-		iparms.db_index = TF_EM_DB_EM_REC;
+		iparms.subtype = TF_EM_DB_EM_REC;
 		iparms.info = &info;
 
 		rc = tf_rm_get_info(&iparms);
diff --git a/drivers/net/bnxt/tf_core/tf_identifier.c b/drivers/net/bnxt/tf_core/tf_identifier.c
index 41ab13c132..9d0a578085 100644
--- a/drivers/net/bnxt/tf_core/tf_identifier.c
+++ b/drivers/net/bnxt/tf_core/tf_identifier.c
@@ -52,7 +52,7 @@ tf_ident_bind(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	db_cfg.type = TF_DEVICE_MODULE_TYPE_IDENTIFIER;
+	db_cfg.module = TF_MODULE_TYPE_IDENTIFIER;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
 
@@ -161,7 +161,7 @@ tf_ident_alloc(struct tf *tfp __rte_unused,
 
 	/* Allocate requested element */
 	aparms.rm_db = ident_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = &id;
 	aparms.base_index = &base_id;
 	rc = tf_rm_allocate(&aparms);
@@ -215,7 +215,7 @@ tf_ident_free(struct tf *tfp __rte_unused,
 
 	/* Check if element is in use */
 	aparms.rm_db = ident_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->id;
 	aparms.base_index = &base_id;
 	aparms.allocated = &allocated;
@@ -255,7 +255,7 @@ tf_ident_free(struct tf *tfp __rte_unused,
 
 	/* Free requested element */
 	fparms.rm_db = ident_db[parms->dir];
-	fparms.db_index = parms->type;
+	fparms.subtype = parms->type;
 	fparms.index = parms->id;
 	rc = tf_rm_free(&fparms);
 	if (rc) {
@@ -298,7 +298,7 @@ tf_ident_search(struct tf *tfp __rte_unused,
 
 	/* Check if element is in use */
 	aparms.rm_db = ident_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->search_id;
 	aparms.base_index = &base_id;
 	aparms.allocated = &allocated;
diff --git a/drivers/net/bnxt/tf_core/tf_if_tbl.c b/drivers/net/bnxt/tf_core/tf_if_tbl.c
index 16afa95e38..f58fa79b63 100644
--- a/drivers/net/bnxt/tf_core/tf_if_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_if_tbl.c
@@ -144,7 +144,7 @@ int
 tf_if_tbl_get(struct tf *tfp,
 	      struct tf_if_tbl_get_parms *parms)
 {
-	int rc;
+	int rc = 0;
 	struct tf_if_tbl_get_hcapi_parms hparms;
 
 	TF_CHECK_PARMS3(tfp, parms, parms->data);
diff --git a/drivers/net/bnxt/tf_core/tf_rm.c b/drivers/net/bnxt/tf_core/tf_rm.c
index 19de6e4c63..9fd660543c 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.c
+++ b/drivers/net/bnxt/tf_core/tf_rm.c
@@ -42,10 +42,18 @@ struct tf_rm_element {
 	 */
 	struct tf_rm_alloc_info alloc;
 
+	/**
+	 * If cfg_type == HCAPI_BA_CHILD, this field indicates
+	 * the parent module subtype for look up into the parent pool.
+	 * An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
+	 * module subtype of TF_MODULE_TYPE_TABLE.
+	 */
+	uint16_t parent_subtype;
+
 	/**
 	 * Bit allocator pool for the element. Pool size is controlled
 	 * by the struct tf_session_resources at time of session creation.
-	 * Null indicates that the element is not used for the device.
+	 * Null indicates that the pool is not used for the element.
 	 */
 	struct bitalloc *pool;
 };
@@ -67,7 +75,7 @@ struct tf_rm_new_db {
 	/**
 	 * Module type, used for logging purposes.
 	 */
-	enum tf_device_module_type type;
+	enum tf_module_type module;
 
 	/**
 	 * The DB consists of an array of elements
@@ -100,7 +108,7 @@ struct tf_rm_new_db {
  */
 static void
 tf_rm_count_hcapi_reservations(enum tf_dir dir,
-			       enum tf_device_module_type type,
+			       enum tf_module_type module,
 			       struct tf_rm_element_cfg *cfg,
 			       uint16_t *reservations,
 			       uint16_t count,
@@ -110,8 +118,7 @@ tf_rm_count_hcapi_reservations(enum tf_dir dir,
 	uint16_t cnt = 0;
 
 	for (i = 0; i < count; i++) {
-		if ((cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
-		     cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) &&
+		if (cfg[i].cfg_type != TF_RM_ELEM_CFG_NULL &&
 		    reservations[i] > 0)
 			cnt++;
 
@@ -120,14 +127,14 @@ tf_rm_count_hcapi_reservations(enum tf_dir dir,
 		 * split configuration array thus it would fail for
 		 * this type of check.
 		 */
-		if (type != TF_DEVICE_MODULE_TYPE_EM &&
+		if (module != TF_MODULE_TYPE_EM &&
 		    cfg[i].cfg_type == TF_RM_ELEM_CFG_NULL &&
 		    reservations[i] > 0) {
 			TFP_DRV_LOG(ERR,
 				"%s, %s, %s allocation of %d not supported\n",
-				tf_device_module_type_2_str(type),
+				tf_module_2_str(module),
 				tf_dir_2_str(dir),
-				tf_device_module_type_subtype_2_str(type, i),
+				tf_module_subtype_2_str(module, i),
 				reservations[i]);
 		}
 	}
@@ -156,8 +163,10 @@ enum tf_rm_adjust_type {
  * [in] action
  *   Adjust action
  *
- * [in] db_index
- *   DB index for the element type
+ * [in] subtype
+ *   TF module subtype used as an index into the database.
+ *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
+ *   module subtype of TF_MODULE_TYPE_TABLE.
  *
  * [in] index
  *   Index to convert
@@ -172,14 +181,14 @@ enum tf_rm_adjust_type {
 static int
 tf_rm_adjust_index(struct tf_rm_element *db,
 		   enum tf_rm_adjust_type action,
-		   uint32_t db_index,
+		   uint32_t subtype,
 		   uint32_t index,
 		   uint32_t *adj_index)
 {
 	int rc = 0;
 	uint32_t base_index;
 
-	base_index = db[db_index].alloc.entry.start;
+	base_index = db[subtype].alloc.entry.start;
 
 	switch (action) {
 	case TF_RM_ADJUST_RM_BASE:
@@ -201,7 +210,7 @@ tf_rm_adjust_index(struct tf_rm_element *db,
  * [in] dir
  *   Receive or transmit direction
  *
- * [in] type
+ * [in] module
  *   Type of Device Module
  *
  * [in] count
@@ -214,7 +223,7 @@ tf_rm_adjust_index(struct tf_rm_element *db,
  */
 static void
 tf_rm_log_residuals(enum tf_dir dir,
-		    enum tf_device_module_type type,
+		    enum tf_module_type module,
 		    uint16_t count,
 		    uint16_t *residuals)
 {
@@ -228,7 +237,7 @@ tf_rm_log_residuals(enum tf_dir dir,
 			TFP_DRV_LOG(ERR,
 				"%s, %s was not cleaned up, %d outstanding\n",
 				tf_dir_2_str(dir),
-				tf_device_module_type_subtype_2_str(type, i),
+				tf_module_subtype_2_str(module, i),
 				residuals[i]);
 	}
 }
@@ -295,7 +304,7 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
 	iparms.rm_db = rm_db;
 	iparms.count = &count;
 	for (i = 0, found = 0; i < rm_db->num_entries; i++) {
-		iparms.db_index = i;
+		iparms.subtype = i;
 		rc = tf_rm_get_inuse_count(&iparms);
 		/* Not a device supported entry, just skip */
 		if (rc == -ENOTSUP)
@@ -329,13 +338,13 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
 		for (i = 0, f = 0; i < rm_db->num_entries; i++) {
 			if (residuals[i] == 0)
 				continue;
-			aparms.db_index = i;
+			aparms.subtype = i;
 			aparms.info = &info;
 			rc = tf_rm_get_info(&aparms);
 			if (rc)
 				goto cleanup_all;
 
-			hparms.db_index = i;
+			hparms.subtype = i;
 			rc = tf_rm_get_hcapi_type(&hparms);
 			if (rc)
 				goto cleanup_all;
@@ -349,7 +358,7 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
 	}
 
 	tf_rm_log_residuals(rm_db->dir,
-			    rm_db->type,
+			    rm_db->module,
 			    rm_db->num_entries,
 			    residuals);
 
@@ -367,16 +376,93 @@ tf_rm_check_residuals(struct tf_rm_new_db *rm_db,
 	return rc;
 }
 
+/**
+ * Some resources do not have a 1:1 mapping between the Truflow type and the cfa
+ * resource type (HCAPI RM).  These resources have multiple Truflow types which
+ * map to a single HCAPI RM type.  In order to support this, one Truflow type
+ * sharing the HCAPI resources is designated the parent.  All other Truflow
+ * types associated with that HCAPI RM type are designated the children.
+ *
+ * This function updates the resource counts of any HCAPI_BA_PARENT with the
+ * counts of the HCAPI_BA_CHILDREN.  These are read from the alloc_cnt and
+ * written back to the req_cnt.
+ *
+ * [in] cfg
+ *   Pointer to an array of module specific Truflow type indexed RM cfg items
+ *
+ * [in] alloc_cnt
+ *   Pointer to the tf_open_session() configured array of module specific
+ *   Truflow type indexed requested counts.
+ *
+ * [in/out] req_cnt
+ *   Pointer to the location to put the updated resource counts.
+ *
+ * Returns:
+ *     0          - Success
+ *     -          - Failure if negative
+ */
+static int
+tf_rm_update_parent_reservations(struct tf_rm_element_cfg *cfg,
+				 uint16_t *alloc_cnt,
+				 uint16_t num_elements,
+				 uint16_t *req_cnt)
+{
+	int parent, child;
+
+	/* Search through all the elements */
+	for (parent = 0; parent < num_elements; parent++) {
+		uint16_t combined_cnt = 0;
+
+		/* If I am a parent */
+		if (cfg[parent].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
+			/* start with my own count */
+			RTE_ASSERT(cfg[parent].slices);
+			combined_cnt =
+				alloc_cnt[parent] / cfg[parent].slices;
+
+			if (alloc_cnt[parent] % cfg[parent].slices)
+				combined_cnt++;
+
+			/* Search again through all the elements */
+			for (child = 0; child < num_elements; child++) {
+				/* If this is one of my children */
+				if (cfg[child].cfg_type ==
+				    TF_RM_ELEM_CFG_HCAPI_BA_CHILD &&
+				    cfg[child].parent_subtype == parent) {
+					uint16_t cnt = 0;
+					RTE_ASSERT(cfg[child].slices);
+
+					/* Increment the parents combined count
+					 * with each child's count adjusted for
+					 * number of slices per RM alloced item.
+					 */
+					cnt =
+					 alloc_cnt[child] / cfg[child].slices;
+
+					if (alloc_cnt[child] % cfg[child].slices)
+						cnt++;
+
+					combined_cnt += cnt;
+					/* Clear the requested child count */
+					req_cnt[child] = 0;
+				}
+			}
+			/* Save the parent count to be requested */
+			req_cnt[parent] = combined_cnt;
+		}
+	}
+	return 0;
+}
+
 int
 tf_rm_create_db(struct tf *tfp,
 		struct tf_rm_create_db_parms *parms)
 {
 	int rc;
-	int i;
-	int j;
 	struct tf_session *tfs;
 	struct tf_dev_info *dev;
-	uint16_t max_types;
+	int i, j;
+	uint16_t max_types, hcapi_items, *req_cnt;
 	struct tfp_calloc_parms cparms;
 	struct tf_rm_resc_req_entry *query;
 	enum tf_rm_resc_resv_strategy resv_strategy;
@@ -385,7 +471,6 @@ tf_rm_create_db(struct tf *tfp,
 	struct tf_rm_new_db *rm_db;
 	struct tf_rm_element *db;
 	uint32_t pool_size;
-	uint16_t hcapi_items;
 
 	TF_CHECK_PARMS2(tfp, parms);
 
@@ -401,9 +486,9 @@ tf_rm_create_db(struct tf *tfp,
 
 	/* Need device max number of elements for the RM QCAPS */
 	rc = dev->ops->tf_dev_get_max_types(tfp, &max_types);
-	if (rc)
-		return rc;
 
+
+	/* Allocate memory for RM QCAPS request */
 	cparms.nitems = max_types;
 	cparms.size = sizeof(struct tf_rm_resc_req_entry);
 	cparms.alignment = 0;
@@ -423,6 +508,28 @@ tf_rm_create_db(struct tf *tfp,
 	if (rc)
 		return rc;
 
+	/* Copy requested counts (alloc_cnt) from tf_open_session() to local
+	 * copy (req_cnt) so that it can be updated if required.
+	 */
+
+	cparms.nitems = parms->num_elements;
+	cparms.size = sizeof(uint16_t);
+	rc = tfp_calloc(&cparms);
+	if (rc)
+		return rc;
+
+	req_cnt = (uint16_t *)cparms.mem_va;
+
+	tfp_memcpy(req_cnt, parms->alloc_cnt,
+		   parms->num_elements * sizeof(uint16_t));
+
+	/* Update the req_cnt based upon the element configuration
+	 */
+	tf_rm_update_parent_reservations(parms->cfg,
+					 parms->alloc_cnt,
+					 parms->num_elements,
+					 req_cnt);
+
 	/* Process capabilities against DB requirements. However, as a
 	 * DB can hold elements that are not HCAPI we can reduce the
 	 * req msg content by removing those out of the request yet
@@ -430,21 +537,17 @@ tf_rm_create_db(struct tf *tfp,
 	 * remove entries where there are no request for elements.
 	 */
 	tf_rm_count_hcapi_reservations(parms->dir,
-				       parms->type,
+				       parms->module,
 				       parms->cfg,
-				       parms->alloc_cnt,
+				       req_cnt,
 				       parms->num_elements,
 				       &hcapi_items);
 
-	/* Handle the case where a DB create request really ends up
-	 * being empty. Unsupported (if not rare) case but possible
-	 * that no resources are necessary for a 'direction'.
-	 */
 	if (hcapi_items == 0) {
 		TFP_DRV_LOG(ERR,
-			"%s: DB create request for Zero elements, DB Type:%s\n",
-			tf_dir_2_str(parms->dir),
-			tf_device_module_type_2_str(parms->type));
+			    "%s: module:%s Empty RM DB create request\n",
+			    tf_dir_2_str(parms->dir),
+			    tf_module_2_str(parms->module));
 
 		parms->rm_db = NULL;
 		return -ENOMEM;
@@ -467,44 +570,45 @@ tf_rm_create_db(struct tf *tfp,
 
 	/* Build the request */
 	for (i = 0, j = 0; i < parms->num_elements; i++) {
-		/* Skip any non HCAPI cfg elements */
-		if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI ||
-		    parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
-			/* Only perform reservation for entries that
-			 * has been requested
-			 */
-			if (parms->alloc_cnt[i] == 0)
-				continue;
+		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
+		uint16_t hcapi_type = cfg->hcapi_type;
+
+		/* Only perform reservation for requested entries
+		 */
+		if (req_cnt[i] == 0)
+			continue;
+
+		/* Skip any children in the request */
+		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI ||
+		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
+		    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
 
-			/* Verify that we can get the full amount
-			 * allocated per the qcaps availability.
+			/* Verify that we can get the full amount per qcaps.
 			 */
-			if (parms->alloc_cnt[i] <=
-			    query[parms->cfg[i].hcapi_type].max) {
-				req[j].type = parms->cfg[i].hcapi_type;
-				req[j].min = parms->alloc_cnt[i];
-				req[j].max = parms->alloc_cnt[i];
+			if (req_cnt[i] <= query[hcapi_type].max) {
+				req[j].type = hcapi_type;
+				req[j].min = req_cnt[i];
+				req[j].max = req_cnt[i];
 				j++;
 			} else {
 				const char *type_str;
-				uint16_t hcapi_type = parms->cfg[i].hcapi_type;
 
 				dev->ops->tf_dev_get_resource_str(tfp,
-								  hcapi_type,
-								  &type_str);
+							      hcapi_type,
+							      &type_str);
 				TFP_DRV_LOG(ERR,
-					"%s: Resource failure, type:%d:%s\n",
-					tf_dir_2_str(parms->dir),
-					hcapi_type, type_str);
-				TFP_DRV_LOG(ERR,
-					"req:%d, avail:%d\n",
-					parms->alloc_cnt[i],
-					query[hcapi_type].max);
+					    "Failure, %s:%d:%s req:%d avail:%d\n",
+					    tf_dir_2_str(parms->dir),
+					    hcapi_type, type_str,
+					    req_cnt[i],
+					    query[hcapi_type].max);
 				return -EINVAL;
 			}
 		}
 	}
 
+	/* Allocate all resources for the module type
+	 */
 	rc = tf_msg_session_resc_alloc(tfp,
 				       dev,
 				       parms->dir,
@@ -532,32 +636,56 @@ tf_rm_create_db(struct tf *tfp,
 
 	db = rm_db->db;
 	for (i = 0, j = 0; i < parms->num_elements; i++) {
-		db[i].cfg_type = parms->cfg[i].cfg_type;
-		db[i].hcapi_type = parms->cfg[i].hcapi_type;
+		struct tf_rm_element_cfg *cfg = &parms->cfg[i];
+		const char *type_str;
+
+		dev->ops->tf_dev_get_resource_str(tfp,
+						  cfg->hcapi_type,
+						  &type_str);
 
-		/* Skip any non HCAPI types as we didn't include them
-		 * in the reservation request.
+		db[i].cfg_type = cfg->cfg_type;
+		db[i].hcapi_type = cfg->hcapi_type;
+
+		/* Save the parent subtype for later use to find the pool
 		 */
-		if (parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI &&
-		    parms->cfg[i].cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
-			continue;
+		if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
+			db[i].parent_subtype = cfg->parent_subtype;
 
 		/* If the element didn't request an allocation no need
 		 * to create a pool nor verify if we got a reservation.
 		 */
-		if (parms->alloc_cnt[i] == 0)
+		if (req_cnt[i] == 0)
+			continue;
+
+		/* Skip any children or invalid
+		 */
+		if (cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI &&
+		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+		    cfg->cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT)
 			continue;
 
 		/* If the element had requested an allocation and that
 		 * allocation was a success (full amount) then
 		 * allocate the pool.
 		 */
-		if (parms->alloc_cnt[i] == resv[j].stride) {
+		if (req_cnt[i] == resv[j].stride) {
 			db[i].alloc.entry.start = resv[j].start;
 			db[i].alloc.entry.stride = resv[j].stride;
 
-			/* Only allocate BA pool if so requested */
-			if (parms->cfg[i].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA) {
+			/* Only allocate BA pool if a BA type not a child */
+			if (cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA ||
+			    cfg->cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_PARENT) {
+				if (cfg->divider) {
+					resv[j].stride =
+						resv[j].stride / cfg->divider;
+					if (resv[j].stride <= 0) {
+						TFP_DRV_LOG(ERR,
+						     "%s:Divide fails:%d:%s\n",
+						     tf_dir_2_str(parms->dir),
+						     cfg->hcapi_type, type_str);
+						goto fail;
+					}
+				}
 				/* Create pool */
 				pool_size = (BITALLOC_SIZEOF(resv[j].stride) /
 					     sizeof(struct bitalloc));
@@ -567,9 +695,9 @@ tf_rm_create_db(struct tf *tfp,
 				rc = tfp_calloc(&cparms);
 				if (rc) {
 					TFP_DRV_LOG(ERR,
-					     "%s: Pool alloc failed, type:%d\n",
-					     tf_dir_2_str(parms->dir),
-					     db[i].cfg_type);
+					 "%s: Pool alloc failed, type:%d:%s\n",
+					 tf_dir_2_str(parms->dir),
+					 cfg->hcapi_type, type_str);
 					goto fail;
 				}
 				db[i].pool = (struct bitalloc *)cparms.mem_va;
@@ -577,9 +705,9 @@ tf_rm_create_db(struct tf *tfp,
 				rc = ba_init(db[i].pool, resv[j].stride);
 				if (rc) {
 					TFP_DRV_LOG(ERR,
-					     "%s: Pool init failed, type:%d\n",
-					     tf_dir_2_str(parms->dir),
-					     db[i].cfg_type);
+					  "%s: Pool init failed, type:%d:%s\n",
+					  tf_dir_2_str(parms->dir),
+					  cfg->hcapi_type, type_str);
 					goto fail;
 				}
 			}
@@ -589,25 +717,21 @@ tf_rm_create_db(struct tf *tfp,
 			 * all elements, not any less.
 			 */
 			TFP_DRV_LOG(ERR,
-				    "%s: Alloc failed, type:%d\n",
-				    tf_dir_2_str(parms->dir),
-				    db[i].cfg_type);
-			TFP_DRV_LOG(ERR,
-				    "req:%d, alloc:%d\n",
-				    parms->alloc_cnt[i],
-				    resv[j].stride);
+				    "%s: Alloc failed %d:%s req:%d, alloc:%d\n",
+				    tf_dir_2_str(parms->dir), cfg->hcapi_type,
+				    type_str, req_cnt[i], resv[j].stride);
 			goto fail;
 		}
 	}
 
 	rm_db->num_entries = parms->num_elements;
 	rm_db->dir = parms->dir;
-	rm_db->type = parms->type;
+	rm_db->module = parms->module;
 	*parms->rm_db = (void *)rm_db;
 
 	tfp_free((void *)req);
 	tfp_free((void *)resv);
-
+	tfp_free((void *)req_cnt);
 	return 0;
 
  fail:
@@ -616,6 +740,7 @@ tf_rm_create_db(struct tf *tfp,
 	tfp_free((void *)db->pool);
 	tfp_free((void *)db);
 	tfp_free((void *)rm_db);
+	tfp_free((void *)req_cnt);
 	parms->rm_db = NULL;
 
 	return -EINVAL;
@@ -682,7 +807,7 @@ tf_rm_free_db(struct tf *tfp,
 			TFP_DRV_LOG(ERR,
 				    "%s: Internal Flush error, module:%s\n",
 				    tf_dir_2_str(parms->dir),
-				    tf_device_module_type_2_str(rm_db->type));
+				    tf_module_2_str(rm_db->module));
 	}
 
 	/* No need to check for configuration type, even if we do not
@@ -695,6 +820,54 @@ tf_rm_free_db(struct tf *tfp,
 
 	return rc;
 }
+/**
+ * Get the bit allocator pool associated with the subtype and the db
+ *
+ * [in] rm_db
+ *   Pointer to the DB
+ *
+ * [in] subtype
+ *   Module subtype used to index into the module specific database.
+ *   An example subtype is TF_TBL_TYPE_FULL_ACT_RECORD which is a
+ *   module subtype of TF_MODULE_TYPE_TABLE.
+ *
+ * [in/out] pool
+ *   Pointer to the bit allocator pool used
+ *
+ * [in/out] new_subtype
+ *   Pointer to the subtype of the actual pool used
+ * Returns:
+ *     0          - Success
+ *   - ENOTSUP    - Operation not supported
+ */
+static int
+tf_rm_get_pool(struct tf_rm_new_db *rm_db,
+	       uint16_t subtype,
+	       struct bitalloc **pool,
+	       uint16_t *new_subtype)
+{
+	int rc = 0;
+	uint16_t tmp_subtype = subtype;
+
+	/* If we are a child, get the parent table index */
+	if (rm_db->db[subtype].cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
+		tmp_subtype = rm_db->db[subtype].parent_subtype;
+
+	*pool = rm_db->db[tmp_subtype].pool;
+
+	/* Bail out if the pool is not valid, should never happen */
+	if (rm_db->db[tmp_subtype].pool == NULL) {
+		rc = -ENOTSUP;
+		TFP_DRV_LOG(ERR,
+			    "%s: Invalid pool for this type:%d, rc:%s\n",
+			    tf_dir_2_str(rm_db->dir),
+			    tmp_subtype,
+			    strerror(-rc));
+		return rc;
+	}
+	*new_subtype = tmp_subtype;
+	return rc;
+}
 
 int
 tf_rm_allocate(struct tf_rm_allocate_parms *parms)
@@ -704,37 +877,33 @@ tf_rm_allocate(struct tf_rm_allocate_parms *parms)
 	uint32_t index;
 	struct tf_rm_new_db *rm_db;
 	enum tf_rm_elem_cfg_type cfg_type;
+	struct bitalloc *pool;
+	uint16_t subtype;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
 
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
 
 	/* Bail out if not controlled by RM */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
 		return -ENOTSUP;
 
-	/* Bail out if the pool is not valid, should never happen */
-	if (rm_db->db[parms->db_index].pool == NULL) {
-		rc = -ENOTSUP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Invalid pool for this type:%d, rc:%s\n",
-			    tf_dir_2_str(rm_db->dir),
-			    parms->db_index,
-			    strerror(-rc));
+	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
+	if (rc)
 		return rc;
-	}
-
 	/*
 	 * priority  0: allocate from top of the tcam i.e. high
 	 * priority !0: allocate index from bottom i.e lowest
 	 */
 	if (parms->priority)
-		id = ba_alloc_reverse(rm_db->db[parms->db_index].pool);
+		id = ba_alloc_reverse(pool);
 	else
-		id = ba_alloc(rm_db->db[parms->db_index].pool);
+		id = ba_alloc(pool);
 	if (id == BA_FAIL) {
 		rc = -ENOMEM;
 		TFP_DRV_LOG(ERR,
@@ -747,7 +916,7 @@ tf_rm_allocate(struct tf_rm_allocate_parms *parms)
 	/* Adjust for any non zero start value */
 	rc = tf_rm_adjust_index(rm_db->db,
 				TF_RM_ADJUST_ADD_BASE,
-				parms->db_index,
+				subtype,
 				id,
 				&index);
 	if (rc) {
@@ -772,39 +941,35 @@ tf_rm_free(struct tf_rm_free_parms *parms)
 	uint32_t adj_index;
 	struct tf_rm_new_db *rm_db;
 	enum tf_rm_elem_cfg_type cfg_type;
+	struct bitalloc *pool;
+	uint16_t subtype;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
 
 	/* Bail out if not controlled by RM */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
 		return -ENOTSUP;
 
-	/* Bail out if the pool is not valid, should never happen */
-	if (rm_db->db[parms->db_index].pool == NULL) {
-		rc = -ENOTSUP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Invalid pool for this type:%d, rc:%s\n",
-			    tf_dir_2_str(rm_db->dir),
-			    parms->db_index,
-			    strerror(-rc));
+	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
+	if (rc)
 		return rc;
-	}
 
 	/* Adjust for any non zero start value */
 	rc = tf_rm_adjust_index(rm_db->db,
 				TF_RM_ADJUST_RM_BASE,
-				parms->db_index,
+				subtype,
 				parms->index,
 				&adj_index);
 	if (rc)
 		return rc;
 
-	rc = ba_free(rm_db->db[parms->db_index].pool, adj_index);
+	rc = ba_free(pool, adj_index);
 	/* No logging direction matters and that is not available here */
 	if (rc)
 		return rc;
@@ -819,33 +984,30 @@ tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
 	uint32_t adj_index;
 	struct tf_rm_new_db *rm_db;
 	enum tf_rm_elem_cfg_type cfg_type;
+	struct bitalloc *pool;
+	uint16_t subtype;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
+
 
 	/* Bail out if not controlled by RM */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
 		return -ENOTSUP;
 
-	/* Bail out if the pool is not valid, should never happen */
-	if (rm_db->db[parms->db_index].pool == NULL) {
-		rc = -ENOTSUP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Invalid pool for this type:%d, rc:%s\n",
-			    tf_dir_2_str(rm_db->dir),
-			    parms->db_index,
-			    strerror(-rc));
+	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
+	if (rc)
 		return rc;
-	}
 
 	/* Adjust for any non zero start value */
 	rc = tf_rm_adjust_index(rm_db->db,
 				TF_RM_ADJUST_RM_BASE,
-				parms->db_index,
+				subtype,
 				parms->index,
 				&adj_index);
 	if (rc)
@@ -853,8 +1015,7 @@ tf_rm_is_allocated(struct tf_rm_is_allocated_parms *parms)
 
 	if (parms->base_index)
 		*parms->base_index = adj_index;
-	*parms->allocated = ba_inuse(rm_db->db[parms->db_index].pool,
-				     adj_index);
+	*parms->allocated = ba_inuse(pool, adj_index);
 
 	return rc;
 }
@@ -866,19 +1027,17 @@ tf_rm_get_info(struct tf_rm_get_alloc_info_parms *parms)
 	enum tf_rm_elem_cfg_type cfg_type;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
 
 	/* Bail out if not controlled by HCAPI */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
-	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	if (cfg_type == TF_RM_ELEM_CFG_NULL)
 		return -ENOTSUP;
 
 	memcpy(parms->info,
-	       &rm_db->db[parms->db_index].alloc,
+	       &rm_db->db[parms->subtype].alloc,
 	       sizeof(struct tf_rm_alloc_info));
 
 	return 0;
@@ -891,18 +1050,16 @@ tf_rm_get_hcapi_type(struct tf_rm_get_hcapi_parms *parms)
 	enum tf_rm_elem_cfg_type cfg_type;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
+
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
 
 	/* Bail out if not controlled by HCAPI */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI &&
-	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	if (cfg_type == TF_RM_ELEM_CFG_NULL)
 		return -ENOTSUP;
 
-	*parms->hcapi_type = rm_db->db[parms->db_index].hcapi_type;
+	*parms->hcapi_type = rm_db->db[parms->subtype].hcapi_type;
 
 	return 0;
 }
@@ -915,30 +1072,31 @@ tf_rm_get_inuse_count(struct tf_rm_get_inuse_count_parms *parms)
 	enum tf_rm_elem_cfg_type cfg_type;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
 
-	/* Bail out if not controlled by RM */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
+
+	/* Bail out if not a BA pool */
+	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
 		return -ENOTSUP;
 
 	/* Bail silently (no logging), if the pool is not valid there
 	 * was no elements allocated for it.
 	 */
-	if (rm_db->db[parms->db_index].pool == NULL) {
+	if (rm_db->db[parms->subtype].pool == NULL) {
 		*parms->count = 0;
 		return 0;
 	}
 
-	*parms->count = ba_inuse_count(rm_db->db[parms->db_index].pool);
+	*parms->count = ba_inuse_count(rm_db->db[parms->subtype].pool);
 
 	return rc;
-
 }
-
+/* Only used for table bulk get at this time
+ */
 int
 tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
 {
@@ -947,31 +1105,27 @@ tf_rm_check_indexes_in_range(struct tf_rm_check_indexes_in_range_parms *parms)
 	uint32_t base_index;
 	uint32_t stride;
 	int rc = 0;
+	struct bitalloc *pool;
+	uint16_t subtype;
 
 	TF_CHECK_PARMS2(parms, parms->rm_db);
-
 	rm_db = (struct tf_rm_new_db *)parms->rm_db;
-	if (!rm_db->db)
-		return -EINVAL;
-	cfg_type = rm_db->db[parms->db_index].cfg_type;
+	TF_CHECK_PARMS1(rm_db->db);
 
-	/* Bail out if not controlled by RM */
-	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA)
+	cfg_type = rm_db->db[parms->subtype].cfg_type;
+
+	/* Bail out if not a BA pool */
+	if (cfg_type != TF_RM_ELEM_CFG_HCAPI_BA &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_PARENT &&
+	    cfg_type != TF_RM_ELEM_CFG_HCAPI_BA_CHILD)
 		return -ENOTSUP;
 
-	/* Bail out if the pool is not valid, should never happen */
-	if (rm_db->db[parms->db_index].pool == NULL) {
-		rc = -ENOTSUP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Invalid pool for this type:%d, rc:%s\n",
-			    tf_dir_2_str(rm_db->dir),
-			    parms->db_index,
-			    strerror(-rc));
+	rc = tf_rm_get_pool(rm_db, parms->subtype, &pool, &subtype);
+	if (rc)
 		return rc;
-	}
 
-	base_index = rm_db->db[parms->db_index].alloc.entry.start;
-	stride = rm_db->db[parms->db_index].alloc.entry.stride;
+	base_index = rm_db->db[subtype].alloc.entry.start;
+	stride = rm_db->db[subtype].alloc.entry.stride;
 
 	if (parms->starting_index < base_index ||
 	    parms->starting_index + parms->num_entries > base_index + stride)
diff --git a/drivers/net/bnxt/tf_core/tf_rm.h b/drivers/net/bnxt/tf_core/tf_rm.h
index 291086c7c7..407c7d5bf9 100644
--- a/drivers/net/bnxt/tf_core/tf_rm.h
+++ b/drivers/net/bnxt/tf_core/tf_rm.h
@@ -35,11 +35,11 @@ struct tf;
  * The RM DB will work on its initial allocated sizes so the
  * capability of dynamically growing a particular resource is not
  * possible. If this capability later becomes a requirement then the
- * MAX pool size of the Chip œneeds to be added to the tf_rm_elem_info
+ * MAX pool size of the chip needs to be added to the tf_rm_elem_info
  * structure and several new APIs would need to be added to allow for
  * growth of a single TF resource type.
  *
- * The access functions does not check for NULL pointers as it's a
+ * The access functions do not check for NULL pointers as they are a
  * support module, not called directly.
  */
 
@@ -65,19 +65,28 @@ enum tf_rm_elem_cfg_type {
 	 * No configuration
 	 */
 	TF_RM_ELEM_CFG_NULL,
-	/** HCAPI 'controlled', no RM storage thus the Device Module
+	/** HCAPI 'controlled', no RM storage so the module
 	 *  using the RM can chose to handle storage locally.
 	 */
 	TF_RM_ELEM_CFG_HCAPI,
-	/** HCAPI 'controlled', uses a Bit Allocator Pool for internal
+	/** HCAPI 'controlled', uses a bit allocator pool for internal
 	 *  storage in the RM.
 	 */
 	TF_RM_ELEM_CFG_HCAPI_BA,
 	/**
-	 * Shared element thus it belongs to a shared FW Session and
-	 * is not controlled by the Host.
+	 * HCAPI 'controlled', uses a bit allocator pool for internal
+	 * storage in the RM but multiple TF types map to a single
+	 * HCAPI type.  Parent manages the table.
 	 */
-	TF_RM_ELEM_CFG_SHARED,
+	TF_RM_ELEM_CFG_HCAPI_BA_PARENT,
+	/**
+	 * HCAPI 'controlled', uses a bit allocator pool for internal
+	 * storage in the RM but multiple TF types map to a single
+	 * HCAPI type.  Child accesses the parent db.
+	 */
+	TF_RM_ELEM_CFG_HCAPI_BA_CHILD,
+
+
 	TF_RM_TYPE_MAX
 };
 
@@ -114,6 +123,30 @@ struct tf_rm_element_cfg {
 	 * conversion.
 	 */
 	uint16_t hcapi_type;
+
+	/**
+	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+	 *
+	 * Parent Truflow module subtype associated with this resource type.
+	 */
+	uint16_t parent_subtype;
+
+	/**
+	 * if cfg_type == TF_RM_ELEM_CFG_HCAPI_BA_CHILD
+	 *
+	 * Resource slices.  How many slices will fit in the
+	 * resource pool chunk size.
+	 */
+	uint8_t slices;
+	/**
+	 * Pool element divider count
+	 * If 0 or 1, there is 1:1 correspondence between the RM
+	 * BA pool resource element and the HCAPI RM firmware
+	 * resource.  If > 1, the RM BA pool element has a 1:n
+	 * correspondence to the HCAPI RM firmware resource.
+	 */
+	uint8_t divider;
+
 };
 
 /**
@@ -135,9 +168,9 @@ struct tf_rm_alloc_info {
  */
 struct tf_rm_create_db_parms {
 	/**
-	 * [in] Device module type. Used for logging purposes.
+	 * [in] Module type. Used for logging purposes.
 	 */
-	enum tf_device_module_type type;
+	enum tf_module_type module;
 	/**
 	 * [in] Receive or transmit direction.
 	 */
@@ -153,8 +186,7 @@ struct tf_rm_create_db_parms {
 	/**
 	 * Resource allocation count array. This array content
 	 * originates from the tf_session_resources that is passed in
-	 * on session open.
-	 * Array size is num_elements.
+	 * on session open. Array size is num_elements.
 	 */
 	uint16_t *alloc_cnt;
 	/**
@@ -186,10 +218,11 @@ struct tf_rm_allocate_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] Module subtype indicates which DB entry to perform the
+	 * action on.  (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
 	 * [in] Pointer to the allocated index in normalized
 	 * form. Normalized means the index has been adjusted,
@@ -219,10 +252,11 @@ struct tf_rm_free_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
 	 * [in] Index to free
 	 */
@@ -238,10 +272,11 @@ struct tf_rm_is_allocated_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
 	 * [in] Index to free
 	 */
@@ -265,13 +300,14 @@ struct tf_rm_get_alloc_info_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
 	 * [out] Pointer to the requested allocation information for
-	 * the specified db_index
+	 * the specified subtype
 	 */
 	struct tf_rm_alloc_info *info;
 };
@@ -285,12 +321,13 @@ struct tf_rm_get_hcapi_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
-	 * [out] Pointer to the hcapi type for the specified db_index
+	 * [out] Pointer to the hcapi type for the specified subtype
 	 */
 	uint16_t *hcapi_type;
 };
@@ -304,12 +341,13 @@ struct tf_rm_get_inuse_count_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
-	 * [out] Pointer to the inuse count for the specified db_index
+	 * [out] Pointer to the inuse count for the specified subtype
 	 */
 	uint16_t *count;
 };
@@ -323,10 +361,11 @@ struct tf_rm_check_indexes_in_range_parms {
 	 */
 	void *rm_db;
 	/**
-	 * [in] DB Index, indicates which DB entry to perform the
-	 * action on.
+	 * [in] TF subtype indicates which DB entry to perform the
+	 * action on. (e.g. TF_TCAM_TBL_TYPE_L2_CTXT subtype of module
+	 * TF_MODULE_TYPE_TCAM)
 	 */
-	uint16_t db_index;
+	uint16_t subtype;
 	/**
 	 * [in] Starting index
 	 */
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c b/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
deleted file mode 100644
index 396ebdb0a9..0000000000
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2021 Broadcom
- * All rights reserved.
- */
-
-#include "tf_common.h"
-#include "tf_util.h"
-#include "tfp.h"
-#include "tf_core.h"
-#include "tf_shadow_tbl.h"
-#include "tf_hash.h"
-
-/**
- * The implementation includes 3 tables per table table type.
- * - hash table
- *   - sized so that a minimum of 4 slots per shadow entry are available to
- *   minimize the likelihood of collisions.
- * - shadow key table
- *   - sized to the number of entries requested and is directly indexed
- *   - the index is zero based and is the table index - the base address
- *   - the data associated with the entry is stored in the key table.
- *   - The stored key is actually the data associated with the entry.
- * - shadow result table
- *   - the result table is stored separately since it only needs to be accessed
- *   when the key matches.
- *   - the result has a back pointer to the hash table via the hb handle.  The
- *   hb handle is a 32 bit represention of the hash with a valid bit, bucket
- *   element index, and the hash index.  It is necessary to store the hb handle
- *   with the result since subsequent removes only provide the table index.
- *
- * - Max entries is limited in the current implementation since bit 15 is the
- *   valid bit in the hash table.
- * - A 16bit hash is calculated and masked based on the number of entries
- * - 64b wide bucket is used and broken into 4x16bit elements.
- *   This decision is based on quicker bucket scanning to determine if any
- *   elements are in use.
- * - bit 15 of each bucket element is the valid, this is done to prevent having
- *   to read the larger key/result data for determining VALID.  It also aids
- *   in the more efficient scanning of the bucket for slot usage.
- */
-
-/*
- * The maximum number of shadow entries supported.  The value also doubles as
- * the maximum number of hash buckets.  There are only 15 bits of data per
- * bucket to point to the shadow tables.
- */
-#define TF_SHADOW_ENTRIES_MAX (1 << 15)
-
-/* The number of elements(BE) per hash bucket (HB) */
-#define TF_SHADOW_HB_NUM_ELEM (4)
-#define TF_SHADOW_BE_VALID (1 << 15)
-#define TF_SHADOW_BE_IS_VALID(be) (((be) & TF_SHADOW_BE_VALID) != 0)
-
-/**
- * The hash bucket handle is 32b
- * - bit 31, the Valid bit
- * - bit 29-30, the element
- * - bits 0-15, the hash idx (is masked based on the allocated size)
- */
-#define TF_SHADOW_HB_HANDLE_IS_VALID(hndl) (((hndl) & (1 << 31)) != 0)
-#define TF_SHADOW_HB_HANDLE_CREATE(idx, be) ((1 << 31) | \
-					     ((be) << 29) | (idx))
-
-#define TF_SHADOW_HB_HANDLE_BE_GET(hdl) (((hdl) >> 29) & \
-					 (TF_SHADOW_HB_NUM_ELEM - 1))
-
-#define TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hdl)((hdl) & \
-						(ctxt)->hash_ctxt.hid_mask)
-
-/**
- * The idx provided by the caller is within a region, so currently the base is
- * either added or subtracted from the idx to ensure it can be used as a
- * compressed index
- */
-
-/* Convert the table index to a shadow index */
-#define TF_SHADOW_IDX_TO_SHIDX(ctxt, idx) ((idx) - \
-					   (ctxt)->shadow_ctxt.base_addr)
-
-/* Convert the shadow index to a tbl index */
-#define TF_SHADOW_SHIDX_TO_IDX(ctxt, idx) ((idx) + \
-					   (ctxt)->shadow_ctxt.base_addr)
-
-/* Simple helper masks for clearing en element from the bucket */
-#define TF_SHADOW_BE0_MASK_CLEAR(hb) ((hb) & 0xffffffffffff0000ull)
-#define TF_SHADOW_BE1_MASK_CLEAR(hb) ((hb) & 0xffffffff0000ffffull)
-#define TF_SHADOW_BE2_MASK_CLEAR(hb) ((hb) & 0xffff0000ffffffffull)
-#define TF_SHADOW_BE3_MASK_CLEAR(hb) ((hb) & 0x0000ffffffffffffull)
-
-/**
- * This should be coming from external, but for now it is assumed that no key
- * is greater than 512 bits (64B).  This makes allocation of the key table
- * easier without having to allocate on the fly.
- */
-#define TF_SHADOW_MAX_KEY_SZ 64
-
-/*
- * Local only defines for the internal data.
- */
-
-/**
- * tf_shadow_tbl_shadow_key_entry is the key entry of the key table.
- * The key stored in the table is the result data of the index table.
- */
-struct tf_shadow_tbl_shadow_key_entry {
-	uint8_t key[TF_SHADOW_MAX_KEY_SZ];
-};
-
-/**
- * tf_shadow_tbl_shadow_result_entry is the result table entry.
- * The result table writes are broken into two phases:
- * - The search phase, which stores the hb_handle and key size and
- * - The set phase, which writes the refcnt
- */
-struct tf_shadow_tbl_shadow_result_entry {
-	uint16_t key_size;
-	uint32_t refcnt;
-	uint32_t hb_handle;
-};
-
-/**
- * tf_shadow_tbl_shadow_ctxt holds all information for accessing the key and
- * result tables.
- */
-struct tf_shadow_tbl_shadow_ctxt {
-	struct tf_shadow_tbl_shadow_key_entry *sh_key_tbl;
-	struct tf_shadow_tbl_shadow_result_entry *sh_res_tbl;
-	uint32_t base_addr;
-	uint16_t num_entries;
-	uint16_t alloc_idx;
-};
-
-/**
- * tf_shadow_tbl_hash_ctxt holds all information related to accessing the hash
- * table.
- */
-struct tf_shadow_tbl_hash_ctxt {
-	uint64_t *hashtbl;
-	uint16_t hid_mask;
-	uint16_t hash_entries;
-};
-
-/**
- * tf_shadow_tbl_ctxt holds the hash and shadow tables for the current shadow
- * table db.  This structure is per table table type as each table table has
- * it's own shadow and hash table.
- */
-struct tf_shadow_tbl_ctxt {
-	struct tf_shadow_tbl_shadow_ctxt shadow_ctxt;
-	struct tf_shadow_tbl_hash_ctxt hash_ctxt;
-};
-
-/**
- * tf_shadow_tbl_db is the allocated db structure returned as an opaque
- * void * pointer to the caller during create db.  It holds the pointers for
- * each table associated with the db.
- */
-struct tf_shadow_tbl_db {
-	/* Each context holds the shadow and hash table information */
-	struct tf_shadow_tbl_ctxt *ctxt[TF_TBL_TYPE_MAX];
-};
-
-/**
- * Simple routine that decides what table types can be searchable.
- *
- */
-static int tf_shadow_tbl_is_searchable(enum tf_tbl_type type)
-{
-	int rc = 0;
-
-	switch (type) {
-	case TF_TBL_TYPE_ACT_ENCAP_8B:
-	case TF_TBL_TYPE_ACT_ENCAP_16B:
-	case TF_TBL_TYPE_ACT_ENCAP_32B:
-	case TF_TBL_TYPE_ACT_ENCAP_64B:
-	case TF_TBL_TYPE_ACT_SP_SMAC:
-	case TF_TBL_TYPE_ACT_SP_SMAC_IPV4:
-	case TF_TBL_TYPE_ACT_SP_SMAC_IPV6:
-	case TF_TBL_TYPE_ACT_MODIFY_IPV4:
-		rc = 1;
-		break;
-	default:
-		rc = 0;
-		break;
-	};
-
-	return rc;
-}
-
-/**
- * Returns the number of entries in the contexts shadow table.
- */
-static inline uint16_t
-tf_shadow_tbl_sh_num_entries_get(struct tf_shadow_tbl_ctxt *ctxt)
-{
-	return ctxt->shadow_ctxt.num_entries;
-}
-
-/**
- * Compare the give key with the key in the shadow table.
- *
- * Returns 0 if the keys match
- */
-static int
-tf_shadow_tbl_key_cmp(struct tf_shadow_tbl_ctxt *ctxt,
-		      uint8_t *key,
-		      uint16_t sh_idx,
-		      uint16_t size)
-{
-	if (size != ctxt->shadow_ctxt.sh_res_tbl[sh_idx].key_size ||
-	    sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) || !key)
-		return -1;
-
-	return memcmp(key, ctxt->shadow_ctxt.sh_key_tbl[sh_idx].key, size);
-}
-
-/**
- * Free the memory associated with the context.
- */
-static void
-tf_shadow_tbl_ctxt_delete(struct tf_shadow_tbl_ctxt *ctxt)
-{
-	if (!ctxt)
-		return;
-
-	tfp_free(ctxt->hash_ctxt.hashtbl);
-	tfp_free(ctxt->shadow_ctxt.sh_key_tbl);
-	tfp_free(ctxt->shadow_ctxt.sh_res_tbl);
-}
-
-/**
- * The TF Shadow TBL context is per TBL and holds all information relating to
- * managing the shadow and search capability.  This routine allocated data that
- * needs to be deallocated by the tf_shadow_tbl_ctxt_delete prior when deleting
- * the shadow db.
- */
-static int
-tf_shadow_tbl_ctxt_create(struct tf_shadow_tbl_ctxt *ctxt,
-			  uint16_t num_entries,
-			  uint16_t base_addr)
-{
-	struct tfp_calloc_parms cparms;
-	uint16_t hash_size = 1;
-	uint16_t hash_mask;
-	int rc;
-
-	/* Hash table is a power of two that holds the number of entries */
-	if (num_entries > TF_SHADOW_ENTRIES_MAX) {
-		TFP_DRV_LOG(ERR, "Too many entries for shadow %d > %d\n",
-			    num_entries,
-			    TF_SHADOW_ENTRIES_MAX);
-		return -ENOMEM;
-	}
-
-	while (hash_size < num_entries)
-		hash_size = hash_size << 1;
-
-	hash_mask = hash_size - 1;
-
-	/* Allocate the hash table */
-	cparms.nitems = hash_size;
-	cparms.size = sizeof(uint64_t);
-	cparms.alignment = 0;
-	rc = tfp_calloc(&cparms);
-	if (rc)
-		goto error;
-	ctxt->hash_ctxt.hashtbl = cparms.mem_va;
-	ctxt->hash_ctxt.hid_mask = hash_mask;
-	ctxt->hash_ctxt.hash_entries = hash_size;
-
-	/* allocate the shadow tables */
-	/* allocate the shadow key table */
-	cparms.nitems = num_entries;
-	cparms.size = sizeof(struct tf_shadow_tbl_shadow_key_entry);
-	cparms.alignment = 0;
-	rc = tfp_calloc(&cparms);
-	if (rc)
-		goto error;
-	ctxt->shadow_ctxt.sh_key_tbl = cparms.mem_va;
-
-	/* allocate the shadow result table */
-	cparms.nitems = num_entries;
-	cparms.size = sizeof(struct tf_shadow_tbl_shadow_result_entry);
-	cparms.alignment = 0;
-	rc = tfp_calloc(&cparms);
-	if (rc)
-		goto error;
-	ctxt->shadow_ctxt.sh_res_tbl = cparms.mem_va;
-
-	ctxt->shadow_ctxt.num_entries = num_entries;
-	ctxt->shadow_ctxt.base_addr = base_addr;
-
-	return 0;
-error:
-	tf_shadow_tbl_ctxt_delete(ctxt);
-
-	return -ENOMEM;
-}
-
-/**
- * Get a shadow table context given the db and the table type
- */
-static struct tf_shadow_tbl_ctxt *
-tf_shadow_tbl_ctxt_get(struct tf_shadow_tbl_db *shadow_db,
-		       enum tf_tbl_type type)
-{
-	if (type >= TF_TBL_TYPE_MAX ||
-	    !shadow_db ||
-	    !shadow_db->ctxt[type])
-		return NULL;
-
-	return shadow_db->ctxt[type];
-}
-
-/**
- * Sets the hash entry into the table given the table context, hash bucket
- * handle, and shadow index.
- */
-static inline int
-tf_shadow_tbl_set_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
-			     uint32_t hb_handle,
-			     uint16_t sh_idx)
-{
-	uint16_t hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
-	uint16_t be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
-	uint64_t entry = sh_idx | TF_SHADOW_BE_VALID;
-
-	if (hid >= ctxt->hash_ctxt.hash_entries)
-		return -EINVAL;
-
-	ctxt->hash_ctxt.hashtbl[hid] |= entry << (be * 16);
-	return 0;
-}
-
-/**
- * Clears the hash entry given the TBL context and hash bucket handle.
- */
-static inline void
-tf_shadow_tbl_clear_hash_entry(struct tf_shadow_tbl_ctxt *ctxt,
-			       uint32_t hb_handle)
-{
-	uint16_t hid, be;
-	uint64_t *bucket;
-
-	if (!TF_SHADOW_HB_HANDLE_IS_VALID(hb_handle))
-		return;
-
-	hid = TF_SHADOW_HB_HANDLE_HASH_GET(ctxt, hb_handle);
-	be = TF_SHADOW_HB_HANDLE_BE_GET(hb_handle);
-	bucket = &ctxt->hash_ctxt.hashtbl[hid];
-
-	switch (be) {
-	case 0:
-		*bucket = TF_SHADOW_BE0_MASK_CLEAR(*bucket);
-		break;
-	case 1:
-		*bucket = TF_SHADOW_BE1_MASK_CLEAR(*bucket);
-		break;
-	case 2:
-		*bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
-		break;
-	case 3:
-		*bucket = TF_SHADOW_BE2_MASK_CLEAR(*bucket);
-		break;
-	default:
-		/*
-		 * Since the BE_GET masks non-inclusive bits, this will not
-		 * happen.
-		 */
-		break;
-	}
-}
-
-/**
- * Clears the shadow key and result entries given the table context and
- * shadow index.
- */
-static void
-tf_shadow_tbl_clear_sh_entry(struct tf_shadow_tbl_ctxt *ctxt,
-			     uint16_t sh_idx)
-{
-	struct tf_shadow_tbl_shadow_key_entry *sk_entry;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (sh_idx >= tf_shadow_tbl_sh_num_entries_get(ctxt))
-		return;
-
-	sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[sh_idx];
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[sh_idx];
-
-	/*
-	 * memset key/result to zero for now, possibly leave the data alone
-	 * in the future and rely on the valid bit in the hash table.
-	 */
-	memset(sk_entry, 0, sizeof(struct tf_shadow_tbl_shadow_key_entry));
-	memset(sr_entry, 0, sizeof(struct tf_shadow_tbl_shadow_result_entry));
-}
-
-/**
- * Binds the allocated tbl index with the hash and shadow tables.
- * The entry will be incomplete until the set has happened with the result
- * data.
- */
-int
-tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms)
-{
-	int rc;
-	uint16_t idx, len;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_shadow_tbl_db *shadow_db;
-	struct tf_shadow_tbl_shadow_key_entry *sk_entry;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (!parms || !TF_SHADOW_HB_HANDLE_IS_VALID(parms->hb_handle) ||
-	    !parms->data) {
-		TFP_DRV_LOG(ERR, "Invalid parms\n");
-		return -EINVAL;
-	}
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, parms->type);
-	if (!ctxt) {
-		TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
-			    tf_tbl_type_2_str(parms->type));
-		return -EINVAL;
-	}
-
-	idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, parms->idx);
-	len = parms->data_sz_in_bytes;
-	if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt) ||
-	    len > TF_SHADOW_MAX_KEY_SZ) {
-		TFP_DRV_LOG(ERR, "%s:%s Invalid len (%d) > %d || oob idx %d\n",
-			    tf_dir_2_str(parms->dir),
-			    tf_tbl_type_2_str(parms->type),
-			    len,
-			    TF_SHADOW_MAX_KEY_SZ, idx);
-
-		return -EINVAL;
-	}
-
-	rc = tf_shadow_tbl_set_hash_entry(ctxt, parms->hb_handle, idx);
-	if (rc)
-		return -EINVAL;
-
-	sk_entry = &ctxt->shadow_ctxt.sh_key_tbl[idx];
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
-
-	/* For tables, the data is the key */
-	memcpy(sk_entry->key, parms->data, len);
-
-	/* Write the result table */
-	sr_entry->key_size = len;
-	sr_entry->hb_handle = parms->hb_handle;
-	sr_entry->refcnt = 1;
-
-	return 0;
-}
-
-/**
- * Deletes hash/shadow information if no more references.
- *
- * Returns 0 - The caller should delete the table entry in hardware.
- * Returns non-zero - The number of references to the entry
- */
-int
-tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms)
-{
-	uint16_t idx;
-	uint32_t hb_handle;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_shadow_tbl_db *shadow_db;
-	struct tf_tbl_free_parms *fparms;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (!parms || !parms->fparms) {
-		TFP_DRV_LOG(ERR, "Invalid parms\n");
-		return -EINVAL;
-	}
-
-	fparms = parms->fparms;
-	if (!tf_shadow_tbl_is_searchable(fparms->type))
-		return 0;
-	/*
-	 * Initialize the ref count to zero.  The default would be to remove
-	 * the entry.
-	 */
-	fparms->ref_cnt = 0;
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, fparms->type);
-	if (!ctxt) {
-		TFP_DRV_LOG(DEBUG, "%s no ctxt for table\n",
-			    tf_tbl_type_2_str(fparms->type));
-		return 0;
-	}
-
-	idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, fparms->idx);
-	if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
-		TFP_DRV_LOG(DEBUG, "%s %d >= %d\n",
-			    tf_tbl_type_2_str(fparms->type),
-			    fparms->idx,
-			    tf_shadow_tbl_sh_num_entries_get(ctxt));
-		return 0;
-	}
-
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
-	if (sr_entry->refcnt <= 1) {
-		hb_handle = sr_entry->hb_handle;
-		tf_shadow_tbl_clear_hash_entry(ctxt, hb_handle);
-		tf_shadow_tbl_clear_sh_entry(ctxt, idx);
-	} else {
-		sr_entry->refcnt--;
-		fparms->ref_cnt = sr_entry->refcnt;
-	}
-
-	return 0;
-}
-
-int
-tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms)
-{
-	uint16_t len;
-	uint64_t bucket;
-	uint32_t i, hid32;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_shadow_tbl_db *shadow_db;
-	uint16_t hid16, hb_idx, hid_mask, shtbl_idx, shtbl_key, be_valid;
-	struct tf_tbl_alloc_search_parms *sparms;
-	uint32_t be_avail = TF_SHADOW_HB_NUM_ELEM;
-
-	if (!parms || !parms->sparms) {
-		TFP_DRV_LOG(ERR, "tbl search with invalid parms\n");
-		return -EINVAL;
-	}
-
-	sparms = parms->sparms;
-	/* Check that caller was supposed to call search */
-	if (!tf_shadow_tbl_is_searchable(sparms->type))
-		return -EINVAL;
-
-	/* Initialize return values to invalid */
-	sparms->hit = 0;
-	sparms->search_status = REJECT;
-	parms->hb_handle = 0;
-	sparms->ref_cnt = 0;
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
-	if (!ctxt) {
-		TFP_DRV_LOG(ERR, "%s Unable to get tbl mgr context\n",
-			    tf_tbl_type_2_str(sparms->type));
-		return -EINVAL;
-	}
-
-	len = sparms->result_sz_in_bytes;
-	if (len > TF_SHADOW_MAX_KEY_SZ || !sparms->result || !len) {
-		TFP_DRV_LOG(ERR, "%s:%s Invalid parms %d : %p\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type),
-			    len,
-			    sparms->result);
-		return -EINVAL;
-	}
-
-	/*
-	 * Calculate the crc32
-	 * Fold it to create a 16b value
-	 * Reduce it to fit the table
-	 */
-	hid32 = tf_hash_calc_crc32(sparms->result, len);
-	hid16 = (uint16_t)(((hid32 >> 16) & 0xffff) ^ (hid32 & 0xffff));
-	hid_mask = ctxt->hash_ctxt.hid_mask;
-	hb_idx = hid16 & hid_mask;
-
-	bucket = ctxt->hash_ctxt.hashtbl[hb_idx];
-	if (!bucket) {
-		/* empty bucket means a miss and available entry */
-		sparms->search_status = MISS;
-		parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, 0);
-		sparms->idx = 0;
-		return 0;
-	}
-
-	/* Set the avail to max so we can detect when there is an avail entry */
-	be_avail = TF_SHADOW_HB_NUM_ELEM;
-	for (i = 0; i < TF_SHADOW_HB_NUM_ELEM; i++) {
-		shtbl_idx = (uint16_t)((bucket >> (i * 16)) & 0xffff);
-		be_valid = TF_SHADOW_BE_IS_VALID(shtbl_idx);
-		if (!be_valid) {
-			/* The element is avail, keep going */
-			be_avail = i;
-			continue;
-		}
-		/* There is a valid entry, compare it */
-		shtbl_key = shtbl_idx & ~TF_SHADOW_BE_VALID;
-		if (!tf_shadow_tbl_key_cmp(ctxt,
-					   sparms->result,
-					   shtbl_key,
-					   len)) {
-			/*
-			 * It matches, increment the ref count if the caller
-			 * requested allocation and return the info
-			 */
-			if (sparms->alloc)
-				ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt =
-			ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt + 1;
-
-			sparms->hit = 1;
-			sparms->search_status = HIT;
-			parms->hb_handle =
-				TF_SHADOW_HB_HANDLE_CREATE(hb_idx, i);
-			sparms->idx = TF_SHADOW_SHIDX_TO_IDX(ctxt, shtbl_key);
-			sparms->ref_cnt =
-				ctxt->shadow_ctxt.sh_res_tbl[shtbl_key].refcnt;
-
-			return 0;
-		}
-	}
-
-	/* No hits, return avail entry if exists */
-	if (be_avail < TF_SHADOW_HB_NUM_ELEM) {
-		/*
-		 * There is an available hash entry, so return MISS and the
-		 * hash handle for the subsequent bind.
-		 */
-		parms->hb_handle = TF_SHADOW_HB_HANDLE_CREATE(hb_idx, be_avail);
-		sparms->search_status = MISS;
-		sparms->hit = 0;
-		sparms->idx = 0;
-	} else {
-		/* No room for the entry in the hash table, must REJECT */
-		sparms->search_status = REJECT;
-	}
-
-	return 0;
-}
-
-int
-tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms)
-{
-	uint16_t idx;
-	struct tf_shadow_tbl_ctxt *ctxt;
-	struct tf_tbl_set_parms *sparms;
-	struct tf_shadow_tbl_db *shadow_db;
-	struct tf_shadow_tbl_shadow_result_entry *sr_entry;
-
-	if (!parms || !parms->sparms) {
-		TFP_DRV_LOG(ERR, "Null parms\n");
-		return -EINVAL;
-	}
-
-	sparms = parms->sparms;
-	if (!sparms->data || !sparms->data_sz_in_bytes) {
-		TFP_DRV_LOG(ERR, "%s:%s No result to set.\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type));
-		return -EINVAL;
-	}
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	ctxt = tf_shadow_tbl_ctxt_get(shadow_db, sparms->type);
-	if (!ctxt) {
-		/* We aren't tracking this table, so return success */
-		TFP_DRV_LOG(DEBUG, "%s Unable to get tbl mgr context\n",
-			    tf_tbl_type_2_str(sparms->type));
-		return 0;
-	}
-
-	idx = TF_SHADOW_IDX_TO_SHIDX(ctxt, sparms->idx);
-	if (idx >= tf_shadow_tbl_sh_num_entries_get(ctxt)) {
-		TFP_DRV_LOG(ERR, "%s:%s Invalid idx(0x%x)\n",
-			    tf_dir_2_str(sparms->dir),
-			    tf_tbl_type_2_str(sparms->type),
-			    sparms->idx);
-		return -EINVAL;
-	}
-
-	/* Write the result table, the key/hash has been written already */
-	sr_entry = &ctxt->shadow_ctxt.sh_res_tbl[idx];
-
-	/*
-	 * If the handle is not valid, the bind was never called.  We aren't
-	 * tracking this entry.
-	 */
-	if (!TF_SHADOW_HB_HANDLE_IS_VALID(sr_entry->hb_handle))
-		return 0;
-
-	return 0;
-}
-
-int
-tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms)
-{
-	struct tf_shadow_tbl_db *shadow_db;
-	int i;
-
-	TF_CHECK_PARMS1(parms);
-
-	shadow_db = (struct tf_shadow_tbl_db *)parms->shadow_db;
-	if (!shadow_db) {
-		TFP_DRV_LOG(DEBUG, "Shadow db is NULL cannot be freed\n");
-		return -EINVAL;
-	}
-
-	for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
-		if (shadow_db->ctxt[i]) {
-			tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
-			tfp_free(shadow_db->ctxt[i]);
-		}
-	}
-
-	tfp_free(shadow_db);
-
-	return 0;
-}
-
-/**
- * Allocate the table resources for search and allocate
- *
- */
-int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms)
-{
-	int rc;
-	int i;
-	uint16_t base;
-	struct tfp_calloc_parms cparms;
-	struct tf_shadow_tbl_db *shadow_db = NULL;
-
-	TF_CHECK_PARMS1(parms);
-
-	/* Build the shadow DB per the request */
-	cparms.nitems = 1;
-	cparms.size = sizeof(struct tf_shadow_tbl_db);
-	cparms.alignment = 0;
-	rc = tfp_calloc(&cparms);
-	if (rc)
-		return rc;
-	shadow_db = (void *)cparms.mem_va;
-
-	for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
-		/* If the element didn't request an allocation no need
-		 * to create a pool nor verify if we got a reservation.
-		 */
-		if (!parms->cfg->alloc_cnt[i] ||
-		    !tf_shadow_tbl_is_searchable(i)) {
-			shadow_db->ctxt[i] = NULL;
-			continue;
-		}
-
-		cparms.nitems = 1;
-		cparms.size = sizeof(struct tf_shadow_tbl_ctxt);
-		cparms.alignment = 0;
-		rc = tfp_calloc(&cparms);
-		if (rc)
-			goto error;
-
-		shadow_db->ctxt[i] = cparms.mem_va;
-		base = parms->cfg->base_addr[i];
-		rc = tf_shadow_tbl_ctxt_create(shadow_db->ctxt[i],
-						parms->cfg->alloc_cnt[i],
-						base);
-		if (rc)
-			goto error;
-	}
-
-	*parms->shadow_db = (void *)shadow_db;
-
-	TFP_DRV_LOG(INFO,
-		    "TF SHADOW TABLE - initialized\n");
-
-	return 0;
-error:
-	for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
-		if (shadow_db->ctxt[i]) {
-			tf_shadow_tbl_ctxt_delete(shadow_db->ctxt[i]);
-			tfp_free(shadow_db->ctxt[i]);
-		}
-	}
-
-	tfp_free(shadow_db);
-
-	return -ENOMEM;
-}
diff --git a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h b/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
deleted file mode 100644
index 354240efce..0000000000
--- a/drivers/net/bnxt/tf_core/tf_shadow_tbl.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2019-2021 Broadcom
- * All rights reserved.
- */
-
-#ifndef _TF_SHADOW_TBL_H_
-#define _TF_SHADOW_TBL_H_
-
-#include "tf_core.h"
-
-/**
- * The Shadow Table module provides shadow DB handling for table based
- * TF types. A shadow DB provides the capability that allows for reuse
- * of TF resources.
- *
- * A Shadow table DB is intended to be used by the Table Type module
- * only.
- */
-
-/**
- * Shadow DB configuration information for a single table type.
- *
- * During Device initialization the HCAPI device specifics are learned
- * and as well as the RM DB creation. From that those initial steps
- * this structure can be populated.
- *
- * NOTE:
- * If used in an array of table types then such array must be ordered
- * by the TF type is represents.
- */
-struct tf_shadow_tbl_cfg_parms {
-	/**
-	 * [in] The number of elements in the alloc_cnt and base_addr
-	 * For now, it should always be equal to TF_TBL_TYPE_MAX
-	 */
-	int num_entries;
-
-	/**
-	 * [in] Resource allocation count array
-	 * This array content originates from the tf_session_resources
-	 * that is passed in on session open
-	 * Array size is TF_TBL_TYPE_MAX
-	 */
-	uint16_t *alloc_cnt;
-	/**
-	 * [in] The base index for each table
-	 */
-	uint16_t base_addr[TF_TBL_TYPE_MAX];
-};
-
-/**
- * Shadow table DB creation parameters
- */
-struct tf_shadow_tbl_create_db_parms {
-	/**
-	 * [in] Receive or transmit direction
-	 */
-	enum tf_dir dir;
-	/**
-	 * [in] Configuration information for the shadow db
-	 */
-	struct tf_shadow_tbl_cfg_parms *cfg;
-	/**
-	 * [out] Shadow table DB handle
-	 */
-	void **shadow_db;
-};
-
-/**
- * Shadow table DB free parameters
- */
-struct tf_shadow_tbl_free_db_parms {
-	/**
-	 * [in] Shadow table DB handle
-	 */
-	void *shadow_db;
-};
-
-/**
- * Shadow table search parameters
- */
-struct tf_shadow_tbl_search_parms {
-	/**
-	 * [in] Shadow table DB handle
-	 */
-	void *shadow_db;
-	/**
-	 * [in,out] The search parms from tf core
-	 */
-	struct tf_tbl_alloc_search_parms *sparms;
-	/**
-	 * [out] Reference count incremented if hit
-	 */
-	uint32_t hb_handle;
-};
-
-/**
- * Shadow Table bind index parameters
- */
-struct tf_shadow_tbl_bind_index_parms {
-	/**
-	 * [in] Shadow tcam DB handle
-	 */
-	void *shadow_db;
-	/**
-	 * [in] receive or transmit direction
-	 */
-	enum tf_dir dir;
-	/**
-	 * [in] TCAM table type
-	 */
-	enum tf_tbl_type type;
-	/**
-	 * [in] index of the entry to program
-	 */
-	uint16_t idx;
-	/**
-	 * [in] struct containing key
-	 */
-	uint8_t *data;
-	/**
-	 * [in] data size in bytes
-	 */
-	uint16_t data_sz_in_bytes;
-	/**
-	 * [in] The hash bucket handled returned from the search
-	 */
-	uint32_t hb_handle;
-};
-
-/**
- * Shadow table insert parameters
- */
-struct tf_shadow_tbl_insert_parms {
-	/**
-	 * [in] Shadow table DB handle
-	 */
-	void *shadow_db;
-	/**
-	 * [in] The insert parms from tf core
-	 */
-	struct tf_tbl_set_parms *sparms;
-};
-
-/**
- * Shadow table remove parameters
- */
-struct tf_shadow_tbl_remove_parms {
-	/**
-	 * [in] Shadow table DB handle
-	 */
-	void *shadow_db;
-	/**
-	 * [in] The free parms from tf core
-	 */
-	struct tf_tbl_free_parms *fparms;
-};
-
-/**
- * @page shadow_tbl Shadow table DB
- *
- * @ref tf_shadow_tbl_create_db
- *
- * @ref tf_shadow_tbl_free_db
- *
- * @reg tf_shadow_tbl_search
- *
- * @reg tf_shadow_tbl_insert
- *
- * @reg tf_shadow_tbl_remove
- */
-
-/**
- * Creates and fills a Shadow table DB. The DB is indexed per the
- * parms structure.
- *
- * [in] parms
- *   Pointer to create db parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_create_db(struct tf_shadow_tbl_create_db_parms *parms);
-
-/**
- * Closes the Shadow table DB and frees all allocated
- * resources per the associated database.
- *
- * [in] parms
- *   Pointer to the free DB parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_free_db(struct tf_shadow_tbl_free_db_parms *parms);
-
-/**
- * Search Shadow table db for matching result
- *
- * [in] parms
- *   Pointer to the search parameters
- *
- * Returns
- *   - (0) if successful, element was found.
- *   - (-EINVAL) on failure.
- *
- * If there is a miss, but there is room for insertion, the hb_handle returned
- * is used for insertion during the bind index API
- */
-int tf_shadow_tbl_search(struct tf_shadow_tbl_search_parms *parms);
-
-/**
- * Bind Shadow table db hash and result tables with result from search/alloc
- *
- * [in] parms
- *   Pointer to the search parameters
- *
- * Returns
- *   - (0) if successful
- *   - (-EINVAL) on failure.
- *
- * This is only called after a MISS in the search returns a hb_handle
- */
-int tf_shadow_tbl_bind_index(struct tf_shadow_tbl_bind_index_parms *parms);
-
-/**
- * Inserts an element into the Shadow table DB. Will fail if the
- * elements ref_count is different from 0. Ref_count after insert will
- * be incremented.
- *
- * [in] parms
- *   Pointer to insert parameters
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_insert(struct tf_shadow_tbl_insert_parms *parms);
-
-/**
- * Removes an element from the Shadow table DB. Will fail if the
- * elements ref_count is 0. Ref_count after removal will be
- * decremented.
- *
- * [in] parms
- *   Pointer to remove parameter
- *
- * Returns
- *   - (0) if successful.
- *   - (-EINVAL) on failure.
- */
-int tf_shadow_tbl_remove(struct tf_shadow_tbl_remove_parms *parms);
-
-#endif /* _TF_SHADOW_TBL_H_ */
diff --git a/drivers/net/bnxt/tf_core/tf_tbl.c b/drivers/net/bnxt/tf_core/tf_tbl.c
index 67a43311cc..7d15c3c5d4 100644
--- a/drivers/net/bnxt/tf_core/tf_tbl.c
+++ b/drivers/net/bnxt/tf_core/tf_tbl.c
@@ -13,11 +13,9 @@
 #include "tf_util.h"
 #include "tf_msg.h"
 #include "tfp.h"
-#include "tf_shadow_tbl.h"
 #include "tf_session.h"
 #include "tf_device.h"
 
-
 struct tf;
 
 /**
@@ -44,13 +42,7 @@ int
 tf_tbl_bind(struct tf *tfp,
 	    struct tf_tbl_cfg_parms *parms)
 {
-	int rc, d, i;
-	struct tf_rm_alloc_info info;
-	struct tf_rm_free_db_parms fparms;
-	struct tf_shadow_tbl_free_db_parms fshadow;
-	struct tf_rm_get_alloc_info_parms ainfo;
-	struct tf_shadow_tbl_cfg_parms shadow_cfg;
-	struct tf_shadow_tbl_create_db_parms shadow_cdb;
+	int rc, d;
 	struct tf_rm_create_db_parms db_cfg = { 0 };
 
 	TF_CHECK_PARMS2(tfp, parms);
@@ -62,7 +54,7 @@ tf_tbl_bind(struct tf *tfp,
 	}
 
 	db_cfg.num_elements = parms->num_elements;
-	db_cfg.type = TF_DEVICE_MODULE_TYPE_TABLE;
+	db_cfg.module = TF_MODULE_TYPE_TABLE;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
 
@@ -80,72 +72,12 @@ tf_tbl_bind(struct tf *tfp,
 		}
 	}
 
-	/* Initialize the Shadow Table. */
-	if (parms->shadow_copy) {
-		for (d = 0; d < TF_DIR_MAX; d++) {
-			memset(&shadow_cfg, 0, sizeof(shadow_cfg));
-			memset(&shadow_cdb, 0, sizeof(shadow_cdb));
-			/* Get the base addresses of the tables */
-			for (i = 0; i < TF_TBL_TYPE_MAX; i++) {
-				memset(&info, 0, sizeof(info));
-
-				if (!parms->resources->tbl_cnt[d].cnt[i])
-					continue;
-				ainfo.rm_db = tbl_db[d];
-				ainfo.db_index = i;
-				ainfo.info = &info;
-				rc = tf_rm_get_info(&ainfo);
-				if (rc)
-					goto error;
-
-				shadow_cfg.base_addr[i] = info.entry.start;
-			}
-
-			/* Create the shadow db */
-			shadow_cfg.alloc_cnt =
-				parms->resources->tbl_cnt[d].cnt;
-			shadow_cfg.num_entries = parms->num_elements;
-
-			shadow_cdb.shadow_db = &shadow_tbl_db[d];
-			shadow_cdb.cfg = &shadow_cfg;
-			rc = tf_shadow_tbl_create_db(&shadow_cdb);
-			if (rc) {
-				TFP_DRV_LOG(ERR,
-					    "Shadow TBL DB creation failed "
-					    "rc=%d\n", rc);
-				goto error;
-			}
-		}
-		shadow_init = 1;
-	}
-
 	init = 1;
 
 	TFP_DRV_LOG(INFO,
 		    "Table Type - initialized\n");
 
 	return 0;
-error:
-	for (d = 0; d < TF_DIR_MAX; d++) {
-		memset(&fparms, 0, sizeof(fparms));
-		fparms.dir = d;
-		fparms.rm_db = tbl_db[d];
-		/* Ignoring return here since we are in the error case */
-		(void)tf_rm_free_db(tfp, &fparms);
-
-		if (parms->shadow_copy) {
-			fshadow.shadow_db = shadow_tbl_db[d];
-			tf_shadow_tbl_free_db(&fshadow);
-			shadow_tbl_db[d] = NULL;
-		}
-
-		tbl_db[d] = NULL;
-	}
-
-	shadow_init = 0;
-	init = 0;
-
-	return rc;
 }
 
 int
@@ -154,8 +86,6 @@ tf_tbl_unbind(struct tf *tfp)
 	int rc;
 	int i;
 	struct tf_rm_free_db_parms fparms = { 0 };
-	struct tf_shadow_tbl_free_db_parms fshadow;
-
 	TF_CHECK_PARMS1(tfp);
 
 	/* Bail if nothing has been initialized */
@@ -173,13 +103,6 @@ tf_tbl_unbind(struct tf *tfp)
 			return rc;
 
 		tbl_db[i] = NULL;
-
-		if (shadow_init) {
-			memset(&fshadow, 0, sizeof(fshadow));
-			fshadow.shadow_db = shadow_tbl_db[i];
-			tf_shadow_tbl_free_db(&fshadow);
-			shadow_tbl_db[i] = NULL;
-		}
 	}
 
 	init = 0;
@@ -207,7 +130,7 @@ tf_tbl_alloc(struct tf *tfp __rte_unused,
 
 	/* Allocate requested element */
 	aparms.rm_db = tbl_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = &idx;
 	rc = tf_rm_allocate(&aparms);
 	if (rc) {
@@ -230,7 +153,6 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 	int rc;
 	struct tf_rm_is_allocated_parms aparms = { 0 };
 	struct tf_rm_free_parms fparms = { 0 };
-	struct tf_shadow_tbl_remove_parms shparms;
 	int allocated = 0;
 
 	TF_CHECK_PARMS2(tfp, parms);
@@ -244,7 +166,7 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 
 	/* Check if element is in use */
 	aparms.rm_db = tbl_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -259,40 +181,9 @@ tf_tbl_free(struct tf *tfp __rte_unused,
 			    parms->idx);
 		return -EINVAL;
 	}
-
-	/*
-	 * The Shadow mgmt, if enabled, determines if the entry needs
-	 * to be deleted.
-	 */
-	if (shadow_init) {
-		memset(&shparms, 0, sizeof(shparms));
-		shparms.shadow_db = shadow_tbl_db[parms->dir];
-		shparms.fparms = parms;
-		rc = tf_shadow_tbl_remove(&shparms);
-		if (rc) {
-			/*
-			 * Should not get here, log it and let the entry be
-			 * deleted.
-			 */
-			TFP_DRV_LOG(ERR, "%s: Shadow free fail, "
-				    "type:%d index:%d deleting the entry.\n",
-				    tf_dir_2_str(parms->dir),
-				    parms->type,
-				    parms->idx);
-		} else {
-			/*
-			 * If the entry still has references, just return the
-			 * ref count to the caller.  No need to remove entry
-			 * from rm.
-			 */
-			if (parms->ref_cnt >= 1)
-				return rc;
-		}
-	}
-
 	/* Free requested element */
 	fparms.rm_db = tbl_db[parms->dir];
-	fparms.db_index = parms->type;
+	fparms.subtype = parms->type;
 	fparms.index = parms->idx;
 	rc = tf_rm_free(&fparms);
 	if (rc) {
@@ -311,15 +202,7 @@ int
 tf_tbl_alloc_search(struct tf *tfp,
 		    struct tf_tbl_alloc_search_parms *parms)
 {
-	int rc, frc;
-	uint32_t idx;
-	struct tf_session *tfs;
-	struct tf_dev_info *dev;
-	struct tf_tbl_alloc_parms aparms;
-	struct tf_shadow_tbl_search_parms sparms;
-	struct tf_shadow_tbl_bind_index_parms bparms;
-	struct tf_tbl_free_parms fparms;
-
+	int rc = 0;
 	TF_CHECK_PARMS2(tfp, parms);
 
 	if (!shadow_init || !shadow_tbl_db[parms->dir]) {
@@ -328,103 +211,6 @@ tf_tbl_alloc_search(struct tf *tfp,
 		return -EINVAL;
 	}
 
-	memset(&sparms, 0, sizeof(sparms));
-	sparms.sparms = parms;
-	sparms.shadow_db = shadow_tbl_db[parms->dir];
-	rc = tf_shadow_tbl_search(&sparms);
-	if (rc)
-		return rc;
-
-	/*
-	 * The app didn't request us to alloc the entry, so return now.
-	 * The hit should have been updated in the original search parm.
-	 */
-	if (!parms->alloc || parms->search_status != MISS)
-		return rc;
-
-	/* Retrieve the session information */
-	rc = tf_session_get_session(tfp, &tfs);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup session, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Retrieve the device information */
-	rc = tf_session_get_device(tfs, &dev);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Failed to lookup device, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Allocate the index */
-	if (dev->ops->tf_dev_alloc_tbl == NULL) {
-		rc = -EOPNOTSUPP;
-		TFP_DRV_LOG(ERR,
-			    "%s: Operation not supported, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return -EOPNOTSUPP;
-	}
-
-	memset(&aparms, 0, sizeof(aparms));
-	aparms.dir = parms->dir;
-	aparms.type = parms->type;
-	aparms.tbl_scope_id = parms->tbl_scope_id;
-	aparms.idx = &idx;
-	rc = dev->ops->tf_dev_alloc_tbl(tfp, &aparms);
-	if (rc) {
-		TFP_DRV_LOG(ERR,
-			    "%s: Table allocation failed, rc:%s\n",
-			    tf_dir_2_str(parms->dir),
-			    strerror(-rc));
-		return rc;
-	}
-
-	/* Bind the allocated index to the data */
-	memset(&bparms, 0, sizeof(bparms));
-	bparms.shadow_db = shadow_tbl_db[parms->dir];
-	bparms.dir = parms->dir;
-	bparms.type = parms->type;
-	bparms.idx = idx;
-	bparms.data = parms->result;
-	bparms.data_sz_in_bytes = parms->result_sz_in_bytes;
-	bparms.hb_handle = sparms.hb_handle;
-	rc = tf_shadow_tbl_bind_index(&bparms);
-	if (rc) {
-		/* Error binding entry, need to free the allocated idx */
-		if (dev->ops->tf_dev_free_tbl == NULL) {
-			rc = -EOPNOTSUPP;
-			TFP_DRV_LOG(ERR,
-				    "%s: Operation not supported, rc:%s\n",
-				    tf_dir_2_str(parms->dir),
-				    strerror(-rc));
-			return rc;
-		}
-
-		memset(&fparms, 0, sizeof(fparms));
-		fparms.dir = parms->dir;
-		fparms.type = parms->type;
-		fparms.idx = idx;
-		frc = dev->ops->tf_dev_free_tbl(tfp, &fparms);
-		if (frc) {
-			TFP_DRV_LOG(ERR,
-				    "%s: Failed free index allocated during "
-				    "search. rc=%s\n",
-				    tf_dir_2_str(parms->dir),
-				    strerror(-frc));
-			/* return the original failure. */
-			return rc;
-		}
-	}
-
-	parms->idx = idx;
-
 	return rc;
 }
 
@@ -449,7 +235,7 @@ tf_tbl_set(struct tf *tfp,
 
 	/* Verify that the entry has been previously allocated */
 	aparms.rm_db = tbl_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -467,7 +253,7 @@ tf_tbl_set(struct tf *tfp,
 
 	/* Set the entry */
 	hparms.rm_db = tbl_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &hcapi_type;
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
@@ -518,7 +304,7 @@ tf_tbl_get(struct tf *tfp,
 
 	/* Verify that the entry has been previously allocated */
 	aparms.rm_db = tbl_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -536,7 +322,7 @@ tf_tbl_get(struct tf *tfp,
 
 	/* Set the entry */
 	hparms.rm_db = tbl_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &hcapi_type;
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
@@ -588,7 +374,7 @@ tf_tbl_bulk_get(struct tf *tfp,
 
 	/* Verify that the entries are in the range of reserved resources. */
 	cparms.rm_db = tbl_db[parms->dir];
-	cparms.db_index = parms->type;
+	cparms.subtype = parms->type;
 	cparms.starting_index = parms->starting_idx;
 	cparms.num_entries = parms->num_entries;
 
@@ -605,7 +391,7 @@ tf_tbl_bulk_get(struct tf *tfp,
 	}
 
 	hparms.rm_db = tbl_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &hcapi_type;
 	rc = tf_rm_get_hcapi_type(&hparms);
 	if (rc) {
diff --git a/drivers/net/bnxt/tf_core/tf_tcam.c b/drivers/net/bnxt/tf_core/tf_tcam.c
index a18d0e1e19..42d503f500 100644
--- a/drivers/net/bnxt/tf_core/tf_tcam.c
+++ b/drivers/net/bnxt/tf_core/tf_tcam.c
@@ -71,7 +71,7 @@ tf_tcam_bind(struct tf *tfp,
 
 	memset(&db_cfg, 0, sizeof(db_cfg));
 
-	db_cfg.type = TF_DEVICE_MODULE_TYPE_TCAM;
+	db_cfg.module = TF_MODULE_TYPE_TCAM;
 	db_cfg.num_elements = parms->num_elements;
 	db_cfg.cfg = parms->cfg;
 
@@ -100,7 +100,7 @@ tf_tcam_bind(struct tf *tfp,
 				if (!parms->resources->tcam_cnt[d].cnt[i])
 					continue;
 				ainfo.rm_db = tcam_db[d];
-				ainfo.db_index = i;
+				ainfo.subtype = i;
 				ainfo.info = &info;
 				rc = tf_rm_get_info(&ainfo);
 				if (rc)
@@ -248,7 +248,7 @@ tf_tcam_alloc(struct tf *tfp,
 	memset(&aparms, 0, sizeof(aparms));
 
 	aparms.rm_db = tcam_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.priority = parms->priority;
 	aparms.index = (uint32_t *)&parms->idx;
 	rc = tf_rm_allocate(&aparms);
@@ -331,7 +331,7 @@ tf_tcam_free(struct tf *tfp,
 	memset(&aparms, 0, sizeof(aparms));
 
 	aparms.rm_db = tcam_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx / num_slice_per_row;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -379,7 +379,7 @@ tf_tcam_free(struct tf *tfp,
 	/* Free requested element */
 	memset(&fparms, 0, sizeof(fparms));
 	fparms.rm_db = tcam_db[parms->dir];
-	fparms.db_index = parms->type;
+	fparms.subtype = parms->type;
 	fparms.index = parms->idx / num_slice_per_row;
 	rc = tf_rm_free(&fparms);
 	if (rc) {
@@ -421,7 +421,7 @@ tf_tcam_free(struct tf *tfp,
 	memset(&hparms, 0, sizeof(hparms));
 
 	hparms.rm_db = tcam_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &parms->hcapi_type;
 
 	rc = tf_rm_get_hcapi_type(&hparms);
@@ -625,7 +625,7 @@ tf_tcam_set(struct tf *tfp __rte_unused,
 	memset(&aparms, 0, sizeof(aparms));
 
 	aparms.rm_db = tcam_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx / num_slice_per_row;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -645,7 +645,7 @@ tf_tcam_set(struct tf *tfp __rte_unused,
 	memset(&hparms, 0, sizeof(hparms));
 
 	hparms.rm_db = tcam_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &parms->hcapi_type;
 
 	rc = tf_rm_get_hcapi_type(&hparms);
@@ -736,7 +736,7 @@ tf_tcam_get(struct tf *tfp __rte_unused,
 	memset(&aparms, 0, sizeof(aparms));
 
 	aparms.rm_db = tcam_db[parms->dir];
-	aparms.db_index = parms->type;
+	aparms.subtype = parms->type;
 	aparms.index = parms->idx / num_slice_per_row;
 	aparms.allocated = &allocated;
 	rc = tf_rm_is_allocated(&aparms);
@@ -756,7 +756,7 @@ tf_tcam_get(struct tf *tfp __rte_unused,
 	memset(&hparms, 0, sizeof(hparms));
 
 	hparms.rm_db = tcam_db[parms->dir];
-	hparms.db_index = parms->type;
+	hparms.subtype = parms->type;
 	hparms.hcapi_type = &parms->hcapi_type;
 
 	rc = tf_rm_get_hcapi_type(&hparms);
diff --git a/drivers/net/bnxt/tf_core/tf_util.c b/drivers/net/bnxt/tf_core/tf_util.c
index 74c8f26204..b4d47d5a8c 100644
--- a/drivers/net/bnxt/tf_core/tf_util.c
+++ b/drivers/net/bnxt/tf_core/tf_util.c
@@ -137,34 +137,34 @@ tf_em_tbl_type_2_str(enum tf_em_tbl_type em_type)
 }
 
 const char *
-tf_device_module_type_subtype_2_str(enum tf_device_module_type dm_type,
-				    uint16_t mod_type)
+tf_module_subtype_2_str(enum tf_module_type module,
+			uint16_t subtype)
 {
-	switch (dm_type) {
-	case TF_DEVICE_MODULE_TYPE_IDENTIFIER:
-		return tf_ident_2_str(mod_type);
-	case TF_DEVICE_MODULE_TYPE_TABLE:
-		return tf_tbl_type_2_str(mod_type);
-	case TF_DEVICE_MODULE_TYPE_TCAM:
-		return tf_tcam_tbl_2_str(mod_type);
-	case TF_DEVICE_MODULE_TYPE_EM:
-		return tf_em_tbl_type_2_str(mod_type);
+	switch (module) {
+	case TF_MODULE_TYPE_IDENTIFIER:
+		return tf_ident_2_str(subtype);
+	case TF_MODULE_TYPE_TABLE:
+		return tf_tbl_type_2_str(subtype);
+	case TF_MODULE_TYPE_TCAM:
+		return tf_tcam_tbl_2_str(subtype);
+	case TF_MODULE_TYPE_EM:
+		return tf_em_tbl_type_2_str(subtype);
 	default:
-		return "Invalid Device Module type";
+		return "Invalid Module type";
 	}
 }
 
 const char *
-tf_device_module_type_2_str(enum tf_device_module_type dm_type)
+tf_module_2_str(enum tf_module_type module)
 {
-	switch (dm_type) {
-	case TF_DEVICE_MODULE_TYPE_IDENTIFIER:
+	switch (module) {
+	case TF_MODULE_TYPE_IDENTIFIER:
 		return "Identifier";
-	case TF_DEVICE_MODULE_TYPE_TABLE:
+	case TF_MODULE_TYPE_TABLE:
 		return "Table";
-	case TF_DEVICE_MODULE_TYPE_TCAM:
+	case TF_MODULE_TYPE_TCAM:
 		return "TCAM";
-	case TF_DEVICE_MODULE_TYPE_EM:
+	case TF_MODULE_TYPE_EM:
 		return "EM";
 	default:
 		return "Invalid Device Module type";
diff --git a/drivers/net/bnxt/tf_core/tf_util.h b/drivers/net/bnxt/tf_core/tf_util.h
index 4225c756f6..1aa35b6b82 100644
--- a/drivers/net/bnxt/tf_core/tf_util.h
+++ b/drivers/net/bnxt/tf_core/tf_util.h
@@ -65,34 +65,30 @@ const char *tf_tbl_type_2_str(enum tf_tbl_type tbl_type);
 const char *tf_em_tbl_type_2_str(enum tf_em_tbl_type em_type);
 
 /**
- * Helper function converting device module type and module type to
+ * Helper function converting module and submodule type to
  * text string.
  *
- * [in] dm_type
- *   Device Module type
+ * [in] module
+ *   Module type
  *
- * [in] mod_type
- *   Module specific type
+ * [in] submodule
+ *   Module specific subtype
  *
  * Returns:
  *   Pointer to a char string holding the string for the EM type
  */
-const char *tf_device_module_type_subtype_2_str
-					(enum tf_device_module_type dm_type,
-					 uint16_t mod_type);
+const char *tf_module_subtype_2_str(enum tf_module_type module,
+				    uint16_t subtype);
 
 /**
- * Helper function converting device module type to text string
+ * Helper function converting module type to text string
  *
- * [in] dm_type
- *   Device Module type
- *
- * [in] mod_type
- *   Module specific type
+ * [in] module
+ *   Module type
  *
  * Returns:
  *   Pointer to a char string holding the string for the EM type
  */
-const char *tf_device_module_type_2_str(enum tf_device_module_type dm_type);
+const char *tf_module_2_str(enum tf_module_type module);
 
 #endif /* _TF_UTIL_H_ */
-- 
2.17.1


  parent reply	other threads:[~2021-05-30  9:01 UTC|newest]

Thread overview: 129+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-30  8:58 [dpdk-dev] [PATCH 00/58] enhancements to host based flow table management Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 01/58] net/bnxt: add CFA folder to HCAPI directory Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 02/58] net/bnxt: add base TRUFLOW support for Thor Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 03/58] net/bnxt: add mailbox selection via dev op Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 04/58] net/bnxt: check resource reservation in TRUFLOW Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 05/58] net/bnxt: update TRUFLOW resources Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 06/58] net/bnxt: add support for EM with FKB Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 07/58] net/bnxt: add L2 Context TCAM get support Venkat Duvvuru
2021-05-30  8:58 ` Venkat Duvvuru [this message]
2021-05-30  8:58 ` [dpdk-dev] [PATCH 09/58] net/bnxt: add Thor WC TCAM support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 10/58] net/bnxt: add 64B SRAM record management with RM Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 11/58] net/bnxt: add hashing changes for Thor Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 12/58] net/bnxt: modify TRUFLOW HWRM messages Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 13/58] net/bnxt: change RM database type Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 14/58] net/bnxt: add shared session support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 15/58] net/bnxt: add dpool allocator for EM allocation Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 16/58] net/bnxt: update shared session functionality Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 17/58] net/bnxt: modify resource reservation strategy Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 18/58] net/bnxt: shared TCAM region support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 19/58] net/bnxt: cleanup session open/close messages Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 20/58] net/bnxt: add WC TCAM hi/lo move support Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 21/58] net/bnxt: add API to get shared table increments Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 22/58] net/bnxt: modify host session failure cleanup Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 23/58] net/bnxt: cleanup of WC TCAM shared unbind Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 24/58] net/bnxt: add support for WC TCAM shared session Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 25/58] net/bnxt: add API to clear hi/lo WC region Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 26/58] net/bnxt: check FW capability to support TRUFLOW Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 27/58] net/bnxt: add support for generic table processing Venkat Duvvuru
2021-05-30  8:58 ` [dpdk-dev] [PATCH 28/58] net/bnxt: add support for mapper flow database opcodes Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 29/58] net/bnxt: add conditional execution and rejection Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 30/58] net/bnxt: modify TCAM opcode processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 31/58] net/bnxt: modify VXLAN decap for multichannel mode Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 32/58] net/bnxt: modify table processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 33/58] net/bnxt: modify ULP priority opcode processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 34/58] net/bnxt: add support for conflict resolution Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 35/58] net/bnxt: add support for conditional goto processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 36/58] net/bnxt: set shared handle for generic table Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 37/58] net/bnxt: modify ULP template Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 38/58] net/bnxt: add conditional opcode and L4 port fields Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 39/58] net/bnxt: refactor TF ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 40/58] net/bnxt: add partial header field processing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 41/58] net/bnxt: add support for wild card pattern match Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 42/58] net/bnxt: add support for GRE flows Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 43/58] net/bnxt: enable extended exact match support Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 44/58] net/bnxt: refactor ULP mapper and parser Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 45/58] net/bnxt: add support for generic hash table Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 46/58] net/bnxt: add support for Thor platform Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 47/58] net/bnxt: refactor flow parser in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 48/58] net/bnxt: add shared session support to ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 49/58] net/bnxt: add field opcodes in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 50/58] net/bnxt: add support for application ID in ULP matcher Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 51/58] net/bnxt: process resource lists before session open Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 52/58] net/bnxt: add support for shared sessions in ULP Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 53/58] net/bnxt: add HA support " Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 54/58] net/bnxt: add support for icmp6 ULP parsing Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 55/58] net/bnxt: add support for ULP context list for timers Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 56/58] net/bnxt: cleanup ULP parser and mapper Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 57/58] net/bnxt: reorganize ULP template directory structure Venkat Duvvuru
2021-05-30  8:59 ` [dpdk-dev] [PATCH 58/58] net/bnxt: add Thor template support Venkat Duvvuru
2021-06-13  0:05 ` [dpdk-dev] [PATCH v2 00/58] enhancements to host based flow table management Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 01/58] net/bnxt: add CFA folder to HCAPI directory Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 02/58] net/bnxt: add base TRUFLOW support for Thor Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 03/58] net/bnxt: add mailbox selection via dev op Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 04/58] net/bnxt: check resource reservation in TRUFLOW Ajit Khaparde
2021-06-13  0:05   ` [dpdk-dev] [PATCH v2 05/58] net/bnxt: update TRUFLOW resources Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 06/58] net/bnxt: add support for EM with FKB Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 07/58] net/bnxt: support L2 Context TCAM ops Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 08/58] net/bnxt: add action SRAM translation Ajit Khaparde
2021-07-05 21:23     ` Thomas Monjalon
2021-07-06 22:37       ` [dpdk-dev] [PATCH v3] " Ajit Khaparde
2021-07-06 22:58       ` [dpdk-dev] [PATCH v2 08/58] " Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 09/58] net/bnxt: add Thor WC TCAM support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 10/58] net/bnxt: add 64B SRAM record management with RM Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 11/58] net/bnxt: add hashing changes for Thor Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 12/58] net/bnxt: modify TRUFLOW HWRM messages Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 13/58] net/bnxt: change RM database type Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 14/58] net/bnxt: add shared session support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 15/58] net/bnxt: add dpool allocator for EM allocation Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 16/58] net/bnxt: update shared session functionality Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 17/58] net/bnxt: modify resource reservation strategy Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 18/58] net/bnxt: shared TCAM region support Ajit Khaparde
2021-07-05 21:27     ` Thomas Monjalon
2021-07-06 22:39       ` [dpdk-dev] [PATCH v3] " Ajit Khaparde
2021-07-06 22:57       ` [dpdk-dev] [PATCH v2 18/58] " Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 19/58] net/bnxt: cleanup logs in session handling paths Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 20/58] net/bnxt: add WC TCAM management support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 21/58] net/bnxt: add API to get shared table increments Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 22/58] net/bnxt: refactor host session failure cleanup Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 23/58] net/bnxt: cleanup WC TCAM shared pool Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 24/58] net/bnxt: add support for WC TCAM shared session Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 25/58] net/bnxt: add API to clear TCAM regions Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 26/58] net/bnxt: check FW capability to support TRUFLOW Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 27/58] net/bnxt: add support for generic table processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 28/58] net/bnxt: add support for mapper flow database opcodes Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 29/58] net/bnxt: add conditional processing of templates Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 30/58] net/bnxt: modify TCAM opcode processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 31/58] net/bnxt: modify VXLAN decap for multichannel mode Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 32/58] net/bnxt: modify table processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 33/58] net/bnxt: add ULP priority opcode processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 34/58] net/bnxt: add support to identify duplicate flows Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 35/58] net/bnxt: add conditional goto processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 36/58] net/bnxt: set shared handle for generic table Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 37/58] net/bnxt: modify ULP template Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 38/58] net/bnxt: add conditional opcode and L4 port fields Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 39/58] net/bnxt: refactor TRUFLOW processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 40/58] net/bnxt: add partial header field processing Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 41/58] net/bnxt: add support for wild card pattern match Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 42/58] net/bnxt: add support for GRE flows Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 43/58] net/bnxt: enable extended exact match support Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 44/58] net/bnxt: refactor ULP mapper Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 45/58] net/bnxt: add support for generic hash table Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 46/58] net/bnxt: add support for Thor platform Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 47/58] net/bnxt: refactor flow parser in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 48/58] net/bnxt: add shared session support to ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 49/58] net/bnxt: add field opcodes in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 50/58] net/bnxt: add support for application ID in ULP matcher Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 51/58] net/bnxt: process resource lists before session open Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 52/58] net/bnxt: add templates for shared sessions Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 53/58] net/bnxt: add HA support in ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 54/58] net/bnxt: add ICMPv6 parser to ULP Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 55/58] net/bnxt: add context list for timers Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 56/58] net/bnxt: cleanup ULP parser and mapper Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 57/58] net/bnxt: reorganize ULP template directory structure Ajit Khaparde
2021-06-13  0:06   ` [dpdk-dev] [PATCH v2 58/58] net/bnxt: add Thor template support Ajit Khaparde
2021-06-15 19:33   ` [dpdk-dev] [PATCH v2 00/58] enhancements to host based flow table management Ajit Khaparde
2021-07-07  8:43     ` Thomas Monjalon
2021-07-08  3:57       ` Ajit Khaparde
2021-07-08 12:51         ` Thomas Monjalon
2021-07-08 14:37           ` Ajit Khaparde

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210530085929.29695-9-venkatkumar.duvvuru@broadcom.com \
    --to=venkatkumar.duvvuru@broadcom.com \
    --cc=dev@dpdk.org \
    --cc=farah.smith@broadcom.com \
    --cc=stuart.schacher@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.