ipacm: SRAM NAT, DDR NAT, back-and-forth NAT

NAT can be run out SRAM, then DDR, then back-and-forth all relative to
the number of connections the NAT can hold.

Change-Id: I385245a94f42472e6cb38db921f0133e71235020
CRs-Fixed: 2771990
Signed-off-by: Perry Randise <prandise@codeaurora.org>
diff --git a/ipacm/inc/IPACM_Config.h b/ipacm/inc/IPACM_Config.h
index 1915fbc..cc89c83 100644
--- a/ipacm/inc/IPACM_Config.h
+++ b/ipacm/inc/IPACM_Config.h
@@ -1,5 +1,5 @@
 /*
-Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -109,6 +109,7 @@
 
 	int ipa_num_alg_ports;
 
+	const char* ipa_nat_memtype;
 	int ipa_nat_max_entries;
 
 	bool ipacm_odu_router_mode;
@@ -226,6 +227,11 @@
 		return ipa_nat_max_entries;
 	}
 
+	inline const char* GetNatMemType(void)
+	{
+		return ipa_nat_memtype;
+	}
+
 	inline int GetNatIfacesCnt()
 	{
 		return ipa_nat_iface_entries;
@@ -261,6 +267,8 @@
 
 	enum ipa_hw_type GetIPAVer(bool get = false);
 
+	int ResetClkVote(void);
+
 	bool isEthBridgingSupported();
 
 	bool isIPAv3Supported();
@@ -360,6 +368,10 @@
 	static const char *DEVICE_NAME_ODU;
 
 private:
+
+	static const int DEFAULT_IPV6CT_MAX_ENTRIES = 500;
+	const char* DEFAULT_NAT_MEMTYPE = "DDR";
+
 	enum ipa_hw_type ver;
 	static IPACM_Config *pInstance;
 	static const char *DEVICE_NAME;
diff --git a/ipacm/inc/IPACM_Conntrack_NATApp.h b/ipacm/inc/IPACM_Conntrack_NATApp.h
index b362907..95dcdfe 100644
--- a/ipacm/inc/IPACM_Conntrack_NATApp.h
+++ b/ipacm/inc/IPACM_Conntrack_NATApp.h
@@ -1,5 +1,5 @@
 /*
-Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -87,6 +87,8 @@
 
 	int curCnt, max_entries;
 
+	const char* mem_type;
+
 	ipacm_alg *pALGPorts;
 	uint16_t nALGPort;
 
diff --git a/ipacm/inc/IPACM_Xml.h b/ipacm/inc/IPACM_Xml.h
index 64c00ed..c78eefd 100644
--- a/ipacm/inc/IPACM_Xml.h
+++ b/ipacm/inc/IPACM_Xml.h
@@ -1,5 +1,5 @@
 /* 
-Copyright (c) 2013, The Linux Foundation. All rights reserved.
+Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -109,6 +109,9 @@
 #define Port_TAG                             "Port"
 #define TCP_PROTOCOL_TAG                     "TCP"
 #define UDP_PROTOCOL_TAG                     "UDP"
+#define DDR_TABLETYPE_TAG                    "DDR"
+#define SRAM_TABLETYPE_TAG                   "SRAM"
+#define HYBRID_TABLETYPE_TAG                 "HYBRID"
 
 /* FIREWALL Config Entries */
 #define Firewall_TAG                         "Firewall"
@@ -177,6 +180,7 @@
 
 #define IPACMNat_TAG                         "IPACMNAT"
 #define NAT_MaxEntries_TAG                   "MaxNatEntries"
+#define NAT_TableType_TAG                    "NatTableType"
 
 #define IP_PassthroughFlag_TAG               "IPPassthroughFlag"
 #define IP_PassthroughMode_TAG               "IPPassthroughMode"
@@ -275,6 +279,7 @@
 	ipacm_private_subnet_conf_t private_subnet_config;
 	ipacm_alg_conf_t alg_config;
 	int nat_max_entries;
+	const char* nat_table_memtype;
 	bool odu_enable;
 	bool router_mode_enable;
 	bool odu_embms_enable;
diff --git a/ipacm/src/IPACM_Config.cpp b/ipacm/src/IPACM_Config.cpp
index c396c6c..88dab03 100644
--- a/ipacm/src/IPACM_Config.cpp
+++ b/ipacm/src/IPACM_Config.cpp
@@ -1,5 +1,5 @@
 /*
-Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -137,6 +137,7 @@
 	ipa_num_ipa_interfaces = 0;
 	ipa_num_private_subnet = 0;
 	ipa_num_alg_ports = 0;
+	ipa_nat_memtype = DEFAULT_NAT_MEMTYPE;
 	ipa_nat_max_entries = 0;
 	ipa_nat_iface_entries = 0;
 	ipa_sw_rt_enable = false;
@@ -172,6 +173,7 @@
 
 int IPACM_Config::Init(void)
 {
+	static bool already_reset = false;
 	/* Read IPACM Config file */
 	char	IPACM_config_file[IPA_MAX_FILE_LEN];
 	IPACM_conf_t	*cfg;
@@ -191,12 +193,23 @@
 	{
 		IPACMERR("Failed opening %s.\n", DEVICE_NAME);
 	}
+
 	ver = GetIPAVer(true);
+
+	if ( ! already_reset )
+	{
+		if ( ResetClkVote() == 0 )
+		{
+			already_reset = true;
+		}
+	}
+
 #ifdef FEATURE_IPACM_HAL
 	strlcpy(IPACM_config_file, "/vendor/etc/IPACM_cfg.xml", sizeof(IPACM_config_file));
 #else
 	strlcpy(IPACM_config_file, "/etc/IPACM_cfg.xml", sizeof(IPACM_config_file));
 #endif
+
 	IPACMDBG_H("\n IPACM XML file is %s \n", IPACM_config_file);
 	if (IPACM_SUCCESS == ipacm_read_cfg_xml(IPACM_config_file, cfg))
 	{
@@ -294,6 +307,11 @@
 	ipa_nat_max_entries = cfg->nat_max_entries;
 	IPACMDBG_H("Nat Maximum Entries %d\n", ipa_nat_max_entries);
 
+	ipa_nat_memtype =
+		(cfg->nat_table_memtype) ?
+		cfg->nat_table_memtype   : DEFAULT_NAT_MEMTYPE;
+	IPACMDBG_H("Nat Mem Type %s\n", ipa_nat_memtype);
+
 	/* Find ODU is either router mode or bridge mode*/
 	ipacm_odu_enable = cfg->odu_enable;
 	ipacm_odu_router_mode = cfg->router_mode_enable;
@@ -906,6 +924,24 @@
 	return ver;
 }
 
+int IPACM_Config::ResetClkVote(void)
+{
+	int ret = -1;
+
+	if ( m_fd > 0 )
+	{
+		ret = ioctl(m_fd, IPA_IOC_APP_CLOCK_VOTE, IPA_APP_CLK_RESET_VOTE);
+
+		if ( ret )
+		{
+			IPACMERR("APP_CLOCK_VOTE ioctl failure %d on IPA fd %d\n",
+					 ret, m_fd);
+		}
+	}
+
+	return ret;
+}
+
 bool IPACM_Config::isEthBridgingSupported()
 {
 	enum ipa_hw_type hw_type;
diff --git a/ipacm/src/IPACM_Conntrack_NATApp.cpp b/ipacm/src/IPACM_Conntrack_NATApp.cpp
index a9c7140..111733a 100644
--- a/ipacm/src/IPACM_Conntrack_NATApp.cpp
+++ b/ipacm/src/IPACM_Conntrack_NATApp.cpp
@@ -1,5 +1,5 @@
 /*
-Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
+Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -39,11 +39,21 @@
 #define HDR_METADATA_MUX_ID_BMASK 0x00FF0000
 #define HDR_METADATA_MUX_ID_SHFT 0x10
 
+#undef strcasesame
+#define strcasesame(a, b) (!strcasecmp(a, b))
+
+#undef  SRAM_IN_USE
+#define SRAM_IN_USE() \
+	( strcasesame(mem_type, "HYBRID" ) || \
+	  strcasesame(mem_type, "SRAM" ) )
+
 /* NatApp class Implementation */
 NatApp *NatApp::pInstance = NULL;
 NatApp::NatApp()
 {
 	max_entries = 0;
+	mem_type = NULL;
+
 	cache = NULL;
 
 	nat_table_hdl = 0;
@@ -85,6 +95,8 @@
 		return -1;
 	}
 
+	mem_type = pConfig->GetNatMemType();
+
 	max_entries = pConfig->GetNatMaxEntries();
 
 	size = (sizeof(nat_table_entry) * max_entries);
@@ -158,7 +170,6 @@
 }
 
 /* NAT APP related object function definitions */
-
 int NatApp::AddTable(uint32_t pub_ip, uint8_t mux_id)
 {
 	int ret;
@@ -175,7 +186,7 @@
 		curCnt = 0;
 	}
 #endif
-	ret = ipa_nat_add_ipv4_tbl(pub_ip, max_entries, &nat_table_hdl);
+	ret = ipa_nat_add_ipv4_tbl(pub_ip, mem_type, max_entries, &nat_table_hdl);
 	if(ret)
 	{
 		IPACMERR("unable to create nat table Error:%d\n", ret);
@@ -773,6 +784,20 @@
 	int cnt;
 	uint32_t ts;
 	bool read_to = false;
+	bool keep_awake;
+
+	keep_awake = ( max_entries && SRAM_IN_USE() && ipa_nat_is_sram_supported() );
+
+	if ( keep_awake )
+	{
+		IPACMDBG("Voting clock on\n");
+
+		if ( ipa_nat_vote_clock(IPA_APP_CLK_VOTE) != 0 )
+		{
+			IPACMERR("Voting clock on failed\n");
+			return;
+		}
+	}
 
 	for(cnt = 0; cnt < max_entries; cnt++)
 	{
@@ -804,6 +829,15 @@
 
 	} /* end of for loop */
 
+	if ( keep_awake )
+	{
+		IPACMDBG("Voting clock off\n");
+
+		if ( ipa_nat_vote_clock(IPA_APP_CLK_DEVOTE) != 0 )
+		{
+			IPACMERR("Voting clock off failed\n");
+		}
+	}
 }
 
 bool NatApp::isAlgPort(uint8_t proto, uint16_t port)
diff --git a/ipacm/src/IPACM_Xml.cpp b/ipacm/src/IPACM_Xml.cpp
index d59bbb0..28b7af5 100644
--- a/ipacm/src/IPACM_Xml.cpp
+++ b/ipacm/src/IPACM_Xml.cpp
@@ -1,5 +1,5 @@
 /*
-Copyright (c) 2013, 2019, The Linux Foundation. All rights reserved.
+Copyright (c) 2013, 2019-2020, The Linux Foundation. All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
@@ -448,6 +448,31 @@
 						IPACMDBG_H("Nat Table Max Entries %d\n", config->nat_max_entries);
 					}
 				}
+				else if (IPACM_util_icmp_string((char*)xml_node->name, NAT_TableType_TAG) == 0)
+				{
+					config->nat_table_memtype = DDR_TABLETYPE_TAG;
+					content = IPACM_read_content_element(xml_node);
+					if (content)
+					{
+						str_size = strlen(content);
+						memset(content_buf, 0, sizeof(content_buf));
+						memcpy(content_buf, (void *)content, str_size);
+						content_buf[MAX_XML_STR_LEN-1] = '\0';
+						if (0 == strncasecmp(content_buf, DDR_TABLETYPE_TAG, str_size))
+						{
+							config->nat_table_memtype = DDR_TABLETYPE_TAG;
+						}
+						else if (0 == strncasecmp(content_buf, SRAM_TABLETYPE_TAG, str_size))
+						{
+							config->nat_table_memtype = SRAM_TABLETYPE_TAG;
+						}
+						else if (0 == strncasecmp(content_buf, HYBRID_TABLETYPE_TAG, str_size))
+						{
+							config->nat_table_memtype = HYBRID_TABLETYPE_TAG;
+						}
+					}
+					IPACMDBG_H("NAT Table location %s\n", config->nat_table_memtype);
+				}
 			}
 			break;
 		default:
diff --git a/ipacm/src/IPACM_cfg.xml b/ipacm/src/IPACM_cfg.xml
index 0c125e5..29c4d21 100644
--- a/ipacm/src/IPACM_cfg.xml
+++ b/ipacm/src/IPACM_cfg.xml
@@ -175,6 +175,7 @@
 		</IPACMALG>
 		<IPACMNAT>		
  	        <MaxNatEntries>500</MaxNatEntries>
+ 	        <NatTableType>HYBRID</NatTableType>
 		</IPACMNAT>
 		</IPACM>
 </system>
diff --git a/ipanat/Android.bp b/ipanat/Android.bp
index 310304d..424dd54 100644
--- a/ipanat/Android.bp
+++ b/ipanat/Android.bp
@@ -6,8 +6,13 @@
     header_libs: ["qti_kernel_headers"],
 
     srcs: [
-        "src/ipa_nat_drv.c",
+        "src/ipa_nat_map.cpp",
+        "src/ipa_table.c",
+        "src/ipa_nat_statemach.c",
         "src/ipa_nat_drvi.c",
+        "src/ipa_nat_drv.c",
+        "src/ipa_mem_descriptor.c",
+        "src/ipa_nat_utils.c",
     ],
 
    shared_libs:
diff --git a/ipanat/inc/ipa_ipv6ct.h b/ipanat/inc/ipa_ipv6ct.h
new file mode 100644
index 0000000..d0c84c8
--- /dev/null
+++ b/ipanat/inc/ipa_ipv6ct.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IPA_IPV6CT_H
+#define IPA_IPV6CT_H
+
+#include <stdint.h>
+
+/**
+ * enum ipa_ipv6_ct_direction_settings_type - direction filter settings
+ *
+ * IPA_IPV6CT_DIRECTION_DENY_ALL  - deny inbound and outbound
+ * IPA_IPV6CT_DIRECTION_ALLOW_OUT - allow outbound and deny inbound
+ * IPA_IPV6CT_DIRECTION_ALLOW_IN  - allow inbound and deny outbound
+ * IPA_IPV6CT_DIRECTION_ALLOW_ALL - allow inbound and outbound
+ */
+typedef enum
+{
+	IPA_IPV6CT_DIRECTION_DENY_ALL  = 0,
+	IPA_IPV6CT_DIRECTION_ALLOW_OUT = 1,
+	IPA_IPV6CT_DIRECTION_ALLOW_IN  = 2,
+	IPA_IPV6CT_DIRECTION_ALLOW_ALL = 3
+} ipa_ipv6_ct_direction_settings_type;
+
+/**
+ * struct ipa_ipv6ct_rule - To hold IPv6CT rule
+ * @src_ipv6_lsb: source IPv6 address LSB
+ * @src_ipv6_msb: source IPv6 address MSB
+ * @dest_ipv6_lsb: destination IPv6 address LSB
+ * @dest_ipv6_msb: destination IPv6 address MSB
+ * @direction_settings: direction filter settings (inbound/outbound) (see ipa_ipv6_ct_direction_settings_type)
+ * @src_port: source port
+ * @dest_port: destination port
+ * @protocol: protocol of rule (tcp/udp)
+ */
+typedef struct {
+	uint64_t src_ipv6_lsb;
+	uint64_t src_ipv6_msb;
+	uint64_t dest_ipv6_lsb;
+	uint64_t dest_ipv6_msb;
+	ipa_ipv6_ct_direction_settings_type  direction_settings;
+	uint16_t src_port;
+	uint16_t dest_port;
+	uint8_t  protocol;
+} ipa_ipv6ct_rule;
+
+/**
+ * ipa_ipv6ct_add_tbl() - create IPv6CT table
+ * @number_of_entries: [in] number of IPv6CT entries
+ * @table_handle: [out] handle of new IPv6CT table
+ *
+ * To create new IPv6CT table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_add_tbl(uint16_t number_of_entries, uint32_t* table_handle);
+
+/**
+ * ipa_ipv6ct_del_tbl() - delete IPv6CT table
+ * @table_handle: [in] Handle of IPv6CT table
+ *
+ * To delete given IPv6CT table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_del_tbl(uint32_t table_handle);
+
+/**
+ * ipa_ipv6ct_add_rule() - to insert new IPv6CT rule
+ * @table_handle: [in] handle of IPv6CT table
+ * @user_rule: [in] Pointer to new rule
+ * @rule_handle: [out] Return the handle to rule
+ *
+ * To insert new rule into a IPv6CT table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_add_rule(uint32_t table_handle, const ipa_ipv6ct_rule* user_rule, uint32_t* rule_handle);
+
+/**
+ * ipa_ipv6ct_del_rule() - to delete IPv6CT rule
+ * @table_handle: [in] handle of IPv6CT table
+ * @rule_handle: [in] IPv6CT rule handle
+ *
+ * To delete a rule from a IPv6CT table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_del_rule(uint32_t table_handle, uint32_t rule_handle);
+
+/**
+ * ipa_ipv6ct_query_timestamp() - to query timestamp
+ * @table_handle: [in] handle of IPv6CT table
+ * @rule_handle: [in] IPv6CT rule handle
+ * @time_stamp: [out] time stamp of rule
+ *
+ * To retrieve the timestamp that lastly the IPv6CT rule was accessed
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_query_timestamp(uint32_t table_handle, uint32_t rule_handle, uint32_t* time_stamp);
+
+/**
+ * ipa_ipv6ct_dump_table() - dumps IPv6CT table
+ * @table_handle: [in] handle of IPv6CT table
+ */
+void ipa_ipv6ct_dump_table(uint32_t tbl_hdl);
+
+#endif
+
diff --git a/ipanat/inc/ipa_ipv6cti.h b/ipanat/inc/ipa_ipv6cti.h
new file mode 100644
index 0000000..4d4e160
--- /dev/null
+++ b/ipanat/inc/ipa_ipv6cti.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IPA_IPV6CTI_H
+#define IPA_IPV6CTI_H
+
+#include "ipa_table.h"
+#include "ipa_mem_descriptor.h"
+#include "ipa_nat_utils.h"
+
+#define IPA_IPV6CT_MAX_TBLS   1
+
+#define IPA_IPV6CT_RULE_FLAG_FIELD_OFFSET        34
+#define IPA_IPV6CT_RULE_NEXT_FIELD_OFFSET        40
+#define IPA_IPV6CT_RULE_PROTO_FIELD_OFFSET       38
+
+#define IPA_IPV6CT_FLAG_ENABLE_BIT  1
+
+#define IPA_IPV6CT_DIRECTION_ALLOW_BIT  1
+#define IPA_IPV6CT_DIRECTION_DISALLOW_BIT 0
+
+#define IPA_IPV6CT_INVALID_PROTO_FIELD_VALUE 0xFF00
+#define IPA_IPV6CT_INVALID_PROTO_FIELD_CMP   0xFF
+
+typedef enum
+{
+	IPA_IPV6CT_TABLE_FLAGS,
+	IPA_IPV6CT_TABLE_NEXT_INDEX,
+	IPA_IPV6CT_TABLE_PROTOCOL,
+	IPA_IPV6CT_TABLE_DMA_CMD_MAX
+} ipa_ipv6ct_table_dma_cmd_type;
+
+/*------------------------  IPV6CT Table Entry  ---------------------------------------------------
+
+  -------------------------------------------------------------------------------------------------
+  |     7     |      6      |     5     |     4     |     3     |     2     |     1     |     0     |
+  ---------------------------------------------------------------------------------------------------
+  |                              Outbound Src IPv6 Address (8 LSB Bytes)                            |
+  ---------------------------------------------------------------------------------------------------
+  |                              Outbound Src IPv6 Address (8 MSB Bytes)                            |
+  ---------------------------------------------------------------------------------------------------
+  |                              Outbound Dest IPv6 Address (8 LSB Bytes)                           |
+  ---------------------------------------------------------------------------------------------------
+  |                              Outbound Dest IPv6 Address (8 MSB Bytes)                           |
+  ---------------------------------------------------------------------------------------------------
+  | Protocol  |           TimeStamp (3B)            |       Flags (2B)      |     Reserved (2B)     |
+  |    (1B)   |                                     |Enable|Redirect|Resv   |                       |
+  ---------------------------------------------------------------------------------------------------
+  |Reserved   |Direction(1B)|     Src Port (2B)     |     Dest Port (2B)    |    Next Index (2B)    |
+  |  (1B)     |IN|OUT|Resv  |                       |                       |                       |
+  ---------------------------------------------------------------------------------------------------
+  |           SW Specific Parameters(4B)            |                   Reserved (4B)               |
+  |     Prev Index (2B)     |    Reserved (2B)      |                                               |
+  ---------------------------------------------------------------------------------------------------
+  |                                             Reserved (8B)                                       |
+  ---------------------------------------------------------------------------------------------------
+
+  Dont change below structure definition.
+  It should be same as above(little endian order)
+  -------------------------------------------------------------------------------------------------*/
+typedef struct
+{
+	uint64_t src_ipv6_lsb : 64;
+	uint64_t src_ipv6_msb : 64;
+	uint64_t dest_ipv6_lsb : 64;
+	uint64_t dest_ipv6_msb : 64;
+
+	uint64_t rsvd1 : 30;
+	uint64_t redirect : 1;
+	uint64_t enable : 1;
+	uint64_t time_stamp : 24;
+	uint64_t protocol : 8;
+
+	uint64_t next_index : 16;
+	uint64_t dest_port : 16;
+	uint64_t src_port : 16;
+	uint64_t rsvd2 : 6;
+	uint64_t out_allowed : 1;
+	uint64_t in_allowed : 1;
+	uint64_t rsvd3 : 8;
+
+	uint64_t rsvd4 : 48;
+	uint64_t prev_index : 16;
+
+	uint64_t rsvd5 : 64;
+} ipa_ipv6ct_hw_entry;
+
+/*
+	----------------------
+	|    1    |    0     |
+	----------------------
+	|     Flags(2B)      |
+	|Enable|Redirect|Resv|
+	----------------------
+*/
+typedef struct
+{
+	uint32_t rsvd1 : 14;
+	uint32_t redirect : 1;
+	uint32_t enable : 1;
+} ipa_ipv6ct_flags;
+
+typedef struct
+{
+	ipa_mem_descriptor mem_desc;
+	ipa_table table;
+	ipa_table_dma_cmd_helper table_dma_cmd_helpers[IPA_IPV6CT_TABLE_DMA_CMD_MAX];
+}  ipa_ipv6ct_table;
+
+typedef struct
+{
+	ipa_descriptor* ipa_desc;
+	ipa_ipv6ct_table tables[IPA_IPV6CT_MAX_TBLS];
+	uint8_t table_cnt;
+} ipa_ipv6ct;
+
+#endif
diff --git a/ipanat/inc/ipa_mem_descriptor.h b/ipanat/inc/ipa_mem_descriptor.h
new file mode 100644
index 0000000..e33100c
--- /dev/null
+++ b/ipanat/inc/ipa_mem_descriptor.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IPA_MEM_DESCRIPTOR_H
+#define IPA_MEM_DESCRIPTOR_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/msm_ipa.h>
+
+typedef struct
+{
+	int orig_rqst_size;
+	int mmap_size;
+	void* base_addr;
+	void* mmap_addr;
+	uint32_t addr_offset;
+	unsigned long allocate_ioctl_num;
+	unsigned long delete_ioctl_num;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t table_index;
+	uint8_t valid;
+	bool consider_using_sram;
+	bool sram_available;
+	bool sram_to_be_used;
+	struct ipa_nat_in_sram_info nat_sram_info;
+} ipa_mem_descriptor;
+
+void ipa_mem_descriptor_init(
+	ipa_mem_descriptor* desc,
+	const char* device_name,
+	int size,
+	uint8_t table_index,
+	unsigned long allocate_ioctl_num,
+	unsigned long delete_ioctl_num,
+	bool consider_using_sram );
+
+int ipa_mem_descriptor_allocate_memory(
+	ipa_mem_descriptor* desc,
+	int ipa_fd);
+
+int ipa_mem_descriptor_delete(
+	ipa_mem_descriptor* desc,
+	int ipa_fd);
+
+#endif
+
diff --git a/ipanat/inc/ipa_nat_drv.h b/ipanat/inc/ipa_nat_drv.h
index d5aa0c6..9be97f3 100644
--- a/ipanat/inc/ipa_nat_drv.h
+++ b/ipanat/inc/ipa_nat_drv.h
@@ -1,37 +1,43 @@
 /*
-Copyright (c) 2013 - 2017, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-    * Neither the name of The Linux Foundation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 #ifndef IPA_NAT_DRV_H
 #define IPA_NAT_DRV_H
 
-#include "string.h"  /* memset */
-#include "stdlib.h"  /* free, malloc */
-#include "stdint.h"  /* uint32_t */
+#include "ipa_nat_utils.h"
+
+#include <stdint.h>  /* uint32_t */
+#include <stdbool.h>
+
+/**
+ * ipa_nat_is_sram_supported() - Reports if sram is available for use
+ */
+bool ipa_nat_is_sram_supported(void);
 
 /**
  * struct ipa_nat_ipv4_rule - To hold ipv4 nat rule
@@ -41,6 +47,9 @@
  * @private_port: private port
  * @protocol: protocol of rule (tcp/udp)
  * @pdn_index: PDN index in the PDN config table
+ * @redirect: used internally by various API calls
+ * @enable: used internally by various API calls
+ * @time_stamp: used internally by various API calls
  */
 typedef struct {
 	uint32_t target_ip;
@@ -50,14 +59,46 @@
 	uint16_t public_port;
 	uint8_t  protocol;
 	uint8_t  pdn_index;
+	uint8_t  redirect;
+	uint8_t  enable;
+	uint32_t time_stamp;
 } ipa_nat_ipv4_rule;
 
+static inline char* prep_nat_ipv4_rule_4print(
+	const ipa_nat_ipv4_rule* rule_ptr,
+	char*              buf_ptr,
+	uint32_t           buf_sz )
+{
+	if ( rule_ptr && buf_ptr && buf_sz )
+	{
+		snprintf(
+			buf_ptr, buf_sz,
+			"IPV4 RULE: "
+			"protocol(0x%02X) "
+			"public_port(0x%04X) "
+			"target_ip(0x%08X) "
+			"target_port(0x%04X) "
+			"private_ip(0x%08X) "
+			"private_port(0x%04X) "
+			"pdn_index(0x%02X)",
+			rule_ptr->protocol,
+			rule_ptr->public_port,
+			rule_ptr->target_ip,
+			rule_ptr->target_port,
+			rule_ptr->private_ip,
+			rule_ptr->private_port,
+			rule_ptr->pdn_index);
+	}
+
+	return buf_ptr;
+}
+
 /**
-* struct ipa_nat_pdn_entry - holds a PDN entry data
-* @public_ip: PDN's public ip address
-* @src_metadata: metadata to be used for source NAT metadata replacement
-* @dst_metadata: metadata to be used for destination NAT metadata replacement
-*/
+ * struct ipa_nat_pdn_entry - holds a PDN entry data
+ * @public_ip: PDN's public ip address
+ * @src_metadata: metadata to be used for source NAT metadata replacement
+ * @dst_metadata: metadata to be used for destination NAT metadata replacement
+ */
 typedef struct {
 	uint32_t public_ip;
 	uint32_t src_metadata;
@@ -67,6 +108,7 @@
 /**
  * ipa_nat_add_ipv4_tbl() - create ipv4 nat table
  * @public_ip_addr: [in] public ipv4 address
+ * @mem_type_ptr: [in] type of memory table is to reside in
  * @number_of_entries: [in]  number of nat entries
  * @table_handle: [out] Handle of new ipv4 nat table
  *
@@ -74,9 +116,11 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_add_ipv4_tbl(uint32_t public_ip_addr,
-				uint16_t number_of_entries,
-				uint32_t *table_handle);
+int ipa_nat_add_ipv4_tbl(
+	uint32_t public_ip_addr,
+	const char *mem_type_ptr,
+	uint16_t number_of_entries,
+	uint32_t *table_handle);
 
 /**
  * ipa_nat_del_ipv4_tbl() - delete ipv4 table
@@ -132,16 +176,78 @@
 
 
 /**
-* ipa_nat_modify_pdn() - modify single PDN entry in the PDN config table
-* @table_handle: [in] handle of ipv4 nat table
-* @pdn_index : [in] the index of the entry to be modified
-* @pdn_info : [in] values for the PDN entry to be changed
-*
-* Modify a PDN entry
-*
-* Returns:	0  On Success, negative on failure
-*/
+ * ipa_nat_modify_pdn() - modify single PDN entry in the PDN config table
+ * @table_handle: [in] handle of ipv4 nat table
+ * @pdn_index : [in] the index of the entry to be modified
+ * @pdn_info : [in] values for the PDN entry to be changed
+ *
+ * Modify a PDN entry
+ *
+ * Returns:	0  On Success, negative on failure
+ */
 int ipa_nat_modify_pdn(uint32_t  tbl_hdl,
 	uint8_t pdn_index,
 	ipa_nat_pdn_entry *pdn_info);
-#endif /* IPA_NAT_DRV_H */
\ No newline at end of file
+
+/**
+* ipa_nat_get_pdn_index() - get a PDN index for a public ip
+* @public_ip : [in] IPv4 address of the PDN entry
+* @pdn_index : [out] the index of the requested PDN entry
+*
+* Get a PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_get_pdn_index(uint32_t public_ip, uint8_t *pdn_index);
+
+/**
+* ipa_nat_alloc_pdn() - allocate a PDN for new WAN
+* @pdn_info : [in] values for the PDN entry to be created
+* @pdn_index : [out] the index of the requested PDN entry
+*
+* allocate a new PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_alloc_pdn(ipa_nat_pdn_entry *pdn_info,
+	uint8_t *pdn_index);
+
+/**
+* ipa_nat_get_pdn_count() - get the number of allocated PDNs
+* @pdn_cnt : [out] the number of allocated PDNs
+*
+* get the number of allocated PDNs
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_get_pdn_count(uint8_t *pdn_cnt);
+
+/**
+* ipa_nat_dealloc_pdn() - deallocate a PDN entry
+* @pdn_index : [in] pdn index to be deallocated
+*
+* deallocate a PDN in specified index - zero the PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_dealloc_pdn(uint8_t pdn_index);
+
+
+/**
+ * ipa_nat_dump_ipv4_table() - dumps IPv4 NAT table
+ * @table_handle: [in] handle of IPv4 NAT table
+ */
+void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl);
+
+/**
+ * ipa_nat_vote_clock() - used for voting clock
+ * @vote_type: [in] desired vote type
+ */
+int ipa_nat_vote_clock(
+	enum ipa_app_clock_vote_type vote_type );
+
+int ipa_nat_switch_to(
+	enum ipa3_nat_mem_in nmi );
+
+#endif
+
diff --git a/ipanat/inc/ipa_nat_drvi.h b/ipanat/inc/ipa_nat_drvi.h
index 292a47b..ae6d363 100644
--- a/ipanat/inc/ipa_nat_drvi.h
+++ b/ipanat/inc/ipa_nat_drvi.h
@@ -1,68 +1,57 @@
 /*
-Copyright (c) 2013 - 2019, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-    * Neither the name of The Linux Foundation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #ifndef IPA_NAT_DRVI_H
 #define IPA_NAT_DRVI_H
 
-#include <stdio.h>
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <linux/msm_ipa.h>
-#include <netinet/in.h>
-#include <sys/inotify.h>
-#include <errno.h>
-#include <pthread.h>
-#include <unistd.h>
+#include "ipa_table.h"
+#include "ipa_mem_descriptor.h"
+#include "ipa_nat_utils.h"
 
-#include "ipa_nat_logi.h"
+#undef MAKE_TBL_HDL
+#define MAKE_TBL_HDL(hdl, mt) \
+	((mt) << 31 | (hdl))
 
-#define NAT_DUMP
+#undef BREAK_TBL_HDL
+#define BREAK_TBL_HDL(hdl_in, mt, hdl_out) \
+	do { \
+		mt      = (hdl_in) >> 31 & 0x0000000001; \
+		hdl_out = (hdl_in)       & 0x00000000FF; \
+	} while ( 0 )
+
+#undef VALID_TBL_HDL
+#define VALID_TBL_HDL(h) \
+	(((h) & 0x00000000FF) == IPA_NAT_MAX_IP4_TBLS)
 
 /*======= IMPLEMENTATION related data structures and functions ======= */
-#ifdef IPA_ON_R3PC
-#define NAT_MMAP_MEM_SIZE (2 * 1024UL * 1024UL - 1)
-#endif
 
-#define IPA_DEV_NAME       "/dev/ipa"
-#define NAT_DEV_DIR        "/dev"
-#define NAT_DEV_NAME       "ipaNatTable"
-#define NAT_DEV_FULL_NAME  "/dev/ipaNatTable"
-
-#define IPA_NAT_TABLE_VALID 1
 #define IPA_NAT_MAX_IP4_TBLS   1
-#define IPA_NAT_BASE_TABLE_PERCENTAGE       .8
-#define IPA_NAT_EXPANSION_TABLE_PERCENTAGE  .2
-
-#define IPA_NAT_NUM_OF_BASE_TABLES      2
-#define IPA_NAT_UNUSED_BASE_ENTRIES     2
 
 #define IPA_NAT_RULE_FLAG_FIELD_OFFSET        18
 #define IPA_NAT_RULE_NEXT_FIELD_OFFSET        8
@@ -71,199 +60,54 @@
 #define IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET       2
 #define IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET  0
 
-#define IPA_NAT_RULE_FLAG_FIELD_SIZE       2
-#define IPA_NAT_RULE_NEXTFIELD_FIELD_SIZE  2
-
-#define IPA_NAT_FLAG_ENABLE_BIT_MASK  0x8000
-#define IPA_NAT_FLAG_DISABLE_BIT_MASK 0x0000
-
 #define IPA_NAT_FLAG_ENABLE_BIT  1
-#define IPA_NAT_FLAG_DISABLE_BIT 0
 
 #define IPA_NAT_INVALID_PROTO_FIELD_VALUE 0xFF00
-
-#define IPA_NAT_INVALID_INDEX 0xFF
-#define IPA_NAT_INVALID_NAT_ENTRY 0x0
-
-#define INDX_TBL_ENTRY_SIZE_IN_BITS  16
-
-/* ----------- Rule id -----------------------
-
-   ------------------------------------------------
-   |  3bits   |    12 bits       |     1 bit      |
-   ------------------------------------------------
-   | reserved | index into table |  0 - base      |
-   |          |                  |  1 - expansion |
-   ------------------------------------------------
-
-*/
-#define IPA_NAT_RULE_HDL_TBL_TYPE_BITS        0x1
-#define IPA_NAT_RULE_HDL_TBL_TYPE_MASK        0x1
-
-/* ----------- sw specif parameter -----
-   ------------------------------------
-   |     16 bits     |     16 bits    |
-   ------------------------------------
-   |  index table    |  prev index    |
-   |     entry       |                |
-   ------------------------------------
------------------------------------------*/
-#define IPA_NAT_SW_PARAM_PREV_INDX_BYTE       0
-#define IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE  1
+/*
+ * IPA_NAT_INVALID_PROTO_FIELD_VALUE above is what's passed to the IPA
+ * in a DMA command.  It is written into the NAT's rule, by the
+ * IPA. After being written, It minifests in the rule in the form
+ * below, hence it will be used when perusing the "struct
+ * ipa_nat_rule".
+ */
+#define IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE  0xFF
 
 typedef enum {
-	IPA_NAT_BASE_TBL        = 0,
-	IPA_NAT_EXPN_TBL        = 1,
-	IPA_NAT_INDX_TBL        = 2,
-	IPA_NAT_INDEX_EXPN_TBL  = 3,
-} nat_table_type;
-
-typedef enum {
-	NEXT_INDEX_FIELD,
-	PUBLIC_PORT_FILED,
-	PRIVATE_PORT_FIELD,
-	TARGET_PORT_FIELD,
-	IP_CHKSUM_FIELD,
-	ENABLE_FIELD,
-	TIME_STAMP_FIELD,
-	PROTOCOL_FIELD,
-	TCP_UDP_CHKSUM_FIELD,
-	SW_SPEC_PARAM_PREV_INDEX_FIELD,
-	SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD,
-	INDX_TBL_TBL_ENTRY_FIELD,
-	INDX_TBL_NEXT_INDEX_FILED
-} ipa_nat_rule_field_type;
+	IPA_NAT_TABLE_FLAGS,
+	IPA_NAT_TABLE_NEXT_INDEX,
+	IPA_NAT_TABLE_PROTOCOL,
+	IPA_NAT_INDEX_TABLE_ENTRY,
+	IPA_NAT_INDEX_TABLE_NEXT_INDEX,
+	IPA_NAT_TABLE_DMA_CMD_MAX
+} ipa_nat_table_dma_cmd_type;
 
 /*
-	---------------------------------------------
-	|     3      |    2    |    1    |    0      |
-	---------------------------------------------
-	| Public Port(2B)     | Next Index(2B)       |
-	---------------------------------------------
-*/
-typedef struct {
-	uint32_t next_index:16;
-	uint32_t public_port:16;
-} next_index_pub_port;
-
-
-/*
-	---------------------------------------------
-	|     3      |    2    |    1    |    0      |
-	---------------------------------------------
-  |       Flags(2B)     | IP check sum Diff(2B)|
-	|EN|FIN|Resv |        |                      |
-	---------------------------------------------
-*/
-typedef struct {
-	uint32_t ip_chksum:16;
-	uint32_t rsvd1:14;
-	uint32_t redirect:1;
-	uint32_t enable:1;
-} ipcksum_enbl;
-
-
-/*
-	---------------------------------------
-	|   7    |    6    |   5    |    4    |
-	---------------------------------------
-  | Proto   |      TimeStamp(3B)        |
-	| (1B)    |                           |
-	---------------------------------------
-*/
-typedef struct {
-	uint32_t time_stamp:24;
-	uint32_t protocol:8;
-} time_stamp_proto;
-
-
-/*
-	---------------------------------------------
-	|     3      |    2    |    1    |    0      |
-	---------------------------------------------
-  |       next_index     | Table entry         |
-	----------------------------------------------
-*/
-typedef struct {
-	uint16_t tbl_entry;
-	uint16_t next_index;
-} tbl_ent_nxt_indx;
-
-/*--------------------------------------------------
-   32 bit sw_spec_params is interpreted as follows
-   ------------------------------------
-   |     16 bits     |     16 bits    |
-   ------------------------------------
-   |  index table    |  prev index    |
-   |     entry       |                |
-	 ------------------------------------
---------------------------------------------------*/
-typedef struct {
-	uint16_t prev_index;
-	uint16_t index_table_entry;
-} sw_spec_params;
-
-/*------------------------  NAT Table Entry  ---------------------------------------
-
-  -----------------------------------------------------------------------------------
-  |   7    |    6    |   5    |    4    |     3      |    2    |    1    |    0      |
-  -----------------------------------------------------------------------------------
-  |             Target IP(4B)           |             Private IP(4B)                 |
-  -----------------------------------------------------------------------------------
-  |Target Port(2B)   | Private Port(2B) | Public Port(2B)     | Next Index(2B)       |
-  -----------------------------------------------------------------------------------
-  | Proto   |      TimeStamp(3B)        |       Flags(2B)     | IP check sum Diff(2B)|
-  | (1B)    |                           |EN|FIN|Resv |        |                      |
-  -----------------------------------------------------------------------------------
-  | TCP/UDP checksum |PDN info| Reserved|    SW Specific Parameters(4B)              |
-  |    diff (2B)     |  (1B)  |   (1B)  |                                            |
-  -----------------------------------------------------------------------------------
-
-  Dont change below structure definition.
-  It should be same as above(little endian order)
-  -------------------------------------------------------------------------------*/
+ * ------------------------  NAT Table Entry  -----------------------------------------
+ *
+ * ------------------------------------------------------------------------------------
+ * |   7    |    6    |   5    |    4    |     3        |  2   |    1    |    0       |
+ * ------------------------------------------------------------------------------------
+ * |             Target IP(4B)           |             Private IP(4B)                 |
+ * ------------------------------------------------------------------------------------
+ * |Target Port(2B)   | Private Port(2B) | Public Port(2B)     | Next Index(2B)       |
+ * ------------------------------------------------------------------------------------
+ * | Proto   |      TimeStamp(3B)        |       Flags(2B)     | IP check sum Diff(2B)|
+ * | (1B)    |                           |EN|Redirect|Resv     |                      |
+ * ------------------------------------------------------------------------------------
+ * | TCP/UDP checksum |PDN info|Reserved |    SW Specific Parameters(4B)              |
+ * |    diff (2B)     |  (1B)  |  (1B)   |                                            |
+ * ------------------------------------------------------------------------------------
+ *
+ * Dont change below structure definition.
+ *
+ * It should be same as above(little endian order)
+ *
+ * -------------------------------------------------------------------------------
+ */
 struct ipa_nat_rule {
 	uint64_t private_ip:32;
 	uint64_t target_ip:32;
 
-	uint64_t nxt_indx_pub_port:32;
-	uint64_t private_port:16;
-	uint64_t target_port:16;
-
-	uint64_t ip_cksm_enbl:32;
-	uint64_t ts_proto:32;
-
-  /*--------------------------------------------------
-   32 bit sw_spec_params is interpreted as follows
-   ------------------------------------
-   |     16 bits     |     16 bits    |
-   ------------------------------------
-   |  index table    |  prev index    |
-   |     entry       |                |
-   ------------------------------------
-  --------------------------------------------------*/
-	uint64_t sw_spec_params:32;
-
-	uint64_t rsvd2:8;
-
-  /*-----------------------------------------
-   8 bit PDN info is interpreted as following
-   ------------------------------------
-   |     4 bits      |     4 bits     |
-   ------------------------------------
-   |  PDN index      |    reserved    |
-   |                 |                |
-   ------------------------------------
-  -------------------------------------------*/
-	uint64_t rsvd3:4;
-	uint64_t pdn_index:4;
-	uint64_t tcp_udp_chksum:16;
-};
-
-struct ipa_nat_sw_rule {
-	uint64_t private_ip:32;
-	uint64_t target_ip:32;
-
 	uint64_t next_index:16;
 	uint64_t public_port:16;
 	uint64_t private_port:16;
@@ -276,39 +120,127 @@
 	uint64_t time_stamp:24;
 	uint64_t protocol:8;
 
-  /*--------------------------------------------------
-   32 bit sw_spec_params is interpreted as follows
-   ------------------------------------
-   |     16 bits     |     16 bits    |
-   ------------------------------------
-   |  index table    |  prev index    |
-   |     entry       |                |
-   ------------------------------------
-  --------------------------------------------------*/
+	/*--------------------------------------------------
+	32 bit sw_spec_params is interpreted as follows
+	------------------------------------
+	|     16 bits     |     16 bits    |
+	------------------------------------
+	|  index table    |  prev index    |
+	|     entry       |                |
+	------------------------------------
+	--------------------------------------------------*/
 	uint64_t prev_index:16;
 	uint64_t indx_tbl_entry:16;
-	uint64_t rsvd2 :8;
-  /*-----------------------------------------
-   8 bit PDN info is interpreted as following
-   ------------------------------------
-   |     4 bits      |     4 bits     |
-   ------------------------------------
-   |  PDN index      |    reserved    |
-   |                 |                |
-   ------------------------------------
-  -------------------------------------------*/
-	uint64_t rsvd3 :4;
-	uint64_t pdn_index :4;
+	uint64_t rsvd2:8;
+	/*-----------------------------------------
+	8 bit PDN info is interpreted as following
+	------------------------------------
+	|     4 bits      |     4 bits     |
+	------------------------------------
+	|  PDN index      |    reserved    |
+	|                 |                |
+	------------------------------------
+	-------------------------------------------*/
+	uint64_t rsvd3:4;
+	uint64_t pdn_index:4;
 	uint64_t tcp_udp_chksum:16;
 };
-#define IPA_NAT_TABLE_ENTRY_SIZE        32
-#define IPA_NAT_INDEX_TABLE_ENTRY_SIZE  4
+
+static inline char* prep_nat_rule_4print(
+	struct ipa_nat_rule* rule_ptr,
+	char*                buf_ptr,
+	uint32_t             buf_sz )
+{
+	if ( rule_ptr && buf_ptr && buf_sz )
+	{
+		snprintf(
+			buf_ptr, buf_sz,
+			"NAT RULE: "
+			"protocol(0x%02X) "
+			"public_port(0x%04X) "
+			"target_ip(0x%08X) "
+			"target_port(0x%04X) "
+			"private_ip(0x%08X) "
+			"private_port(0x%04X) "
+			"pdn_index(0x%02X) "
+			"ip_chksum(0x%04X) "
+			"tcp_udp_chksum(0x%04X) "
+			"redirect(0x%02X) "
+			"enable(0x%02X) "
+			"time_stamp(0x%08X) "
+			"indx_tbl_entry(0x%04X) "
+			"prev_index(0x%04X) "
+			"next_index(0x%04X)",
+			rule_ptr->protocol,
+			rule_ptr->public_port,
+			rule_ptr->target_ip,
+			rule_ptr->target_port,
+			rule_ptr->private_ip,
+			rule_ptr->private_port,
+			rule_ptr->pdn_index,
+			rule_ptr->ip_chksum,
+			rule_ptr->tcp_udp_chksum,
+			rule_ptr->redirect,
+			rule_ptr->enable,
+			rule_ptr->time_stamp,
+			rule_ptr->indx_tbl_entry,
+			rule_ptr->prev_index,
+			rule_ptr->next_index);
+	}
+
+	return buf_ptr;
+}
+
+static inline const char *ipa3_nat_mem_in_as_str(
+	enum ipa3_nat_mem_in nmi)
+{
+	switch (nmi) {
+	case IPA_NAT_MEM_IN_DDR:
+		return "IPA_NAT_MEM_IN_DDR";
+	case IPA_NAT_MEM_IN_SRAM:
+		return "IPA_NAT_MEM_IN_SRAM";
+	default:
+		break;
+	}
+	return "???";
+}
+
+static inline char *ipa_ioc_v4_nat_init_as_str(
+	struct ipa_ioc_v4_nat_init *ptr,
+	char                       *buf,
+	uint32_t                    buf_sz)
+{
+	if (ptr && buf && buf_sz) {
+		snprintf(
+			buf, buf_sz,
+			"V4 NAT INIT: tbl_index(0x%02X) ipv4_rules_offset(0x%08X) expn_rules_offset(0x%08X) index_offset(0x%08X) index_expn_offset(0x%08X) table_entries(0x%04X) expn_table_entries(0x%04X) ip_addr(0x%08X)",
+			ptr->tbl_index,
+			ptr->ipv4_rules_offset,
+			ptr->expn_rules_offset,
+			ptr->index_offset,
+			ptr->index_expn_offset,
+			ptr->table_entries,
+			ptr->expn_table_entries,
+			ptr->ip_addr);
+	}
+	return buf;
+}
+
+/*
+	---------------------------------------
+	|         1        |         0        |
+	---------------------------------------
+	|               Flags(2B)             |
+	|Enable|Redirect|Resv                 |
+	---------------------------------------
+*/
+typedef struct {
+	uint32_t rsvd1:14;
+	uint32_t redirect:1;
+	uint32_t enable:1;
+} ipa_nat_flags;
 
 struct ipa_nat_indx_tbl_rule {
-	uint32_t tbl_entry_nxt_indx;
-};
-
-struct ipa_nat_sw_indx_tbl_rule {
 	uint16_t tbl_entry;
 	uint16_t next_index;
 };
@@ -318,109 +250,28 @@
 };
 
 struct ipa_nat_ip4_table_cache {
-	uint8_t valid;
 	uint32_t public_addr;
-
-	int nat_fd;
-	int size;
-	uint32_t tbl_addr_offset;
-	char table_name[IPA_RESOURCE_NAME_MAX];
-
-	char  *ipv4_rules_addr;
-	char  *index_table_addr;
-	uint16_t   table_entries;
-
-	char *ipv4_expn_rules_addr;
-	char *index_table_expn_addr;
-	uint16_t  expn_table_entries;
-
+	ipa_mem_descriptor mem_desc;
+	ipa_table table;
+	ipa_table index_table;
 	struct ipa_nat_indx_tbl_meta_info *index_expn_table_meta;
-
-	uint16_t *rule_id_array;
-#ifdef IPA_ON_R3PC
-	uint32_t mmap_offset;
-#endif
-
-	uint16_t cur_tbl_cnt;
-	uint16_t cur_expn_tbl_cnt;
+	ipa_table_dma_cmd_helper table_dma_cmd_helpers[IPA_NAT_TABLE_DMA_CMD_MAX];
 };
 
 struct ipa_nat_cache {
+	ipa_descriptor* ipa_desc;
 	struct ipa_nat_ip4_table_cache ip4_tbl[IPA_NAT_MAX_IP4_TBLS];
-	int ipa_fd;
 	uint8_t table_cnt;
-	enum ipa_hw_type ver;
+	enum ipa3_nat_mem_in nmi;
 };
 
-struct ipa_nat_indx_tbl_sw_rule {
-	uint16_t tbl_entry;
-	uint16_t next_index;
-	uint16_t prev_index;
-};
-
-typedef enum {
-	IPA_NAT_DEL_TYPE_ONLY_ONE,
-	IPA_NAT_DEL_TYPE_HEAD,
-	IPA_NAT_DEL_TYPE_MIDDLE,
-	IPA_NAT_DEL_TYPE_LAST,
-} del_type;
-
-/**
- * ipa_nati_parse_ipv4_rule_hdl() - prase rule handle
- * @tbl_hdl:	[in] nat table rule
- * @rule_hdl: [in] nat rule handle
- * @expn_tbl: [out] expansion table or not
- * @tbl_entry: [out] index into table
- *
- * Parse the rule handle to retrieve the nat table
- * type and entry of nat table
- *
- * Returns:	None
- */
-void ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_hdl,
-				uint16_t rule_hdl,
-				uint8_t *expn_tbl,
-				uint16_t *tbl_entry);
-
-/**
- * ipa_nati_make_rule_hdl() - makes nat rule handle
- * @tbl_hdl: [in] nat table handle
- * @tbl_entry: [in]  nat table entry
- *
- * Calculate the nat rule handle which from
- * nat entry which will be returned to client of
- * nat driver
- *
- * Returns:	>0 nat rule handle
- */
-uint16_t ipa_nati_make_rule_hdl(uint16_t tbl_hdl,
-				uint16_t tbl_entry);
-
-uint32_t ipa_nati_get_index_entry_offset(
-				struct ipa_nat_ip4_table_cache*,
-				nat_table_type tbl_type,
-				uint16_t indx_tbl_entry);
-uint32_t ipa_nati_get_entry_offset(
-				struct ipa_nat_ip4_table_cache*,
-				nat_table_type tbl_type,
-				uint16_t  tbl_entry);
-
-int ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,
-				uint16_t number_of_entries,
-				uint32_t *table_hanle);
-
-int ipa_nati_alloc_table(uint16_t number_of_entries,
-				struct ipa_ioc_nat_alloc_mem *mem,
-				uint16_t*, uint16_t*);
-
-int ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem *,
-				uint32_t public_ip_addr,
-				uint16_t tbl_entries,
-				uint16_t expn_tbl_entries);
+int ipa_nati_add_ipv4_tbl(
+	uint32_t    public_ip_addr,
+	const char *mem_type_ptr,
+	uint16_t    number_of_entries,
+	uint32_t   *table_hanle);
 
 int ipa_nati_del_ipv4_table(uint32_t tbl_hdl);
-int ipa_nati_reset_ipv4_table(uint32_t tbl_hdl);
-int ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index);
 
 int ipa_nati_query_timestamp(uint32_t  tbl_hdl,
 				uint32_t  rule_hdl,
@@ -428,81 +279,112 @@
 
 int ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry *entry);
 
+int ipa_nati_get_pdn_index(uint32_t public_ip, uint8_t *pdn_index);
+
+int ipa_nati_alloc_pdn(ipa_nat_pdn_entry *pdn_info, uint8_t *pdn_index);
+
+int ipa_nati_get_pdn_cnt(void);
+
+int ipa_nati_dealloc_pdn(uint8_t pdn_index);
+
 int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,
 				const ipa_nat_ipv4_rule *clnt_rule,
 				uint32_t *rule_hdl);
 
-int ipa_nati_generate_rule(uint32_t tbl_hdl,
-				const ipa_nat_ipv4_rule *clnt_rule,
-				struct ipa_nat_sw_rule *rule,
-				struct ipa_nat_indx_tbl_sw_rule *index_sw_rule,
-				uint16_t *tbl_entry,
-				uint16_t *indx_tbl_entry);
-
-uint16_t ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule *expn_tbl,
-				uint16_t size);
-
-uint16_t ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule *clnt_rule,
-				struct ipa_nat_sw_rule *sw_rule,
-				struct ipa_nat_ip4_table_cache *tbl_ptr);
-
-uint16_t ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule *clnt_rule,
-				struct ipa_nat_indx_tbl_sw_rule *sw_rule,
-				struct ipa_nat_ip4_table_cache *tbl_ptr);
-
-uint16_t ipa_nati_index_expn_get_free_entry(struct ipa_nat_indx_tbl_rule *tbl,
-				uint16_t size);
-
-void ipa_nati_copy_ipv4_rule_to_hw(
-				struct ipa_nat_ip4_table_cache *ipv4_cache,
-				struct ipa_nat_sw_rule *rule,
-				uint16_t entry, uint8_t tbl_index);
-
-void ipa_nati_copy_ipv4_index_rule_to_hw(
-				struct ipa_nat_ip4_table_cache *ipv4_cache,
-				struct ipa_nat_indx_tbl_sw_rule *indx_sw_rule,
-				uint16_t entry, uint8_t tbl_index);
-
-void ipa_nati_write_next_index(uint8_t tbl_indx,
-				nat_table_type tbl_type,
-				uint16_t value,
-				uint32_t offset);
-
-int ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,
-				uint16_t entry);
-
 int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,
 				uint32_t rule_hdl);
 
-int ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,
-				uint16_t tbl_entry,
-				uint8_t expn_tbl,
-				del_type rule_pos);
+int ipa_nati_get_sram_size(
+	uint32_t* size_ptr);
 
-void ipa_nati_find_index_rule_pos(
-				struct ipa_nat_ip4_table_cache *cache_ptr,
-				uint16_t tbl_entry,
-				del_type *rule_pos);
+int ipa_nati_clear_ipv4_tbl(
+	uint32_t tbl_hdl );
 
-void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx);
-void ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache *cache_ptr,
-				uint8_t expn_tbl,
-				uint16_t tbl_entry,
-				del_type *rule_pos);
-void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx);
+int ipa_nati_copy_ipv4_tbl(
+	uint32_t          src_tbl_hdl,
+	uint32_t          dst_tbl_hdl,
+	ipa_table_walk_cb copy_cb );
 
-uint16_t Read16BitFieldValue(uint32_t param,
-				ipa_nat_rule_field_type fld_type);
+typedef enum
+{
+	USE_NAT_TABLE   = 0,
+	USE_INDEX_TABLE = 1,
 
-/* ========================================================
-								Debug functions
-   ========================================================*/
-#ifdef NAT_DUMP
-void ipa_nati_print_rule(struct ipa_nat_rule*, uint32_t);
-void ipa_nat_dump_ipv4_table(uint32_t);
-void ipa_nati_print_index_rule(struct ipa_nat_indx_tbl_rule*,
-				uint32_t, uint16_t);
-int ipa_nati_query_nat_rules(uint32_t, nat_table_type);
-#endif
+	USE_MAX
+} WhichTbl2Use;
+
+#define VALID_WHICHTBL2USE(w) \
+	( (w) >= USE_NAT_TABLE && (w) < USE_MAX )
+
+int ipa_nati_walk_ipv4_tbl(
+	uint32_t          tbl_hdl,
+	WhichTbl2Use      which,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr );
+
+/*
+ * The following used for retrieving table stats.
+ */
+typedef struct
+{
+	enum ipa3_nat_mem_in nmi;
+	uint32_t tot_ents;
+	uint32_t tot_base_ents;
+	uint32_t tot_base_ents_filled;
+	uint32_t tot_expn_ents;
+	uint32_t tot_expn_ents_filled;
+	uint32_t tot_chains;
+	uint32_t min_chain_len;
+	uint32_t max_chain_len;
+	float    avg_chain_len;
+} ipa_nati_tbl_stats;
+
+int ipa_nati_ipv4_tbl_stats(
+	uint32_t            tbl_hdl,
+	ipa_nati_tbl_stats* nat_stats_ptr,
+	ipa_nati_tbl_stats* idx_stats_ptr );
+
+int ipa_nati_vote_clock(
+	enum ipa_app_clock_vote_type vote_type );
+
+int ipa_NATI_add_ipv4_tbl(
+	enum ipa3_nat_mem_in nmi,
+	uint32_t             public_ip_addr,
+	uint16_t             number_of_entries,
+	uint32_t*            tbl_hdl);
+
+int ipa_NATI_del_ipv4_table(
+	uint32_t tbl_hdl);
+
+int ipa_NATI_clear_ipv4_tbl(
+	uint32_t tbl_hdl );
+
+int ipa_NATI_walk_ipv4_tbl(
+	uint32_t          tbl_hdl,
+	WhichTbl2Use      which,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr );
+
+int ipa_NATI_ipv4_tbl_stats(
+	uint32_t            tbl_hdl,
+	ipa_nati_tbl_stats* nat_stats_ptr,
+	ipa_nati_tbl_stats* idx_stats_ptr );
+
+int ipa_NATI_query_timestamp(
+	uint32_t  tbl_hdl,
+	uint32_t  rule_hdl,
+	uint32_t* time_stamp);
+
+int ipa_NATI_add_ipv4_rule(
+	uint32_t                 tbl_hdl,
+	const ipa_nat_ipv4_rule* clnt_rule,
+	uint32_t*                rule_hdl);
+
+int ipa_NATI_del_ipv4_rule(
+	uint32_t tbl_hdl,
+	uint32_t rule_hdl);
+
+int ipa_NATI_post_ipv4_init_cmd(
+	uint32_t tbl_hdl );
 
 #endif /* #ifndef IPA_NAT_DRVI_H */
diff --git a/ipanat/inc/ipa_nat_logi.h b/ipanat/inc/ipa_nat_logi.h
deleted file mode 100644
index 5f79cc6..0000000
--- a/ipanat/inc/ipa_nat_logi.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/* 
-Copyright (c) 2013, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-    * Neither the name of The Linux Foundation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/*!
-	@file
-	ipa_nat_logi.h
-
-	@brief
-	This file implements the IPAM log functionality.
-
-	@Author
-	
-
-*/
-
-#ifndef IPA_NAT_LOGI_H
-#define IPA_NAT_LOGI_H
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-#include <stdio.h>
-#include <string.h>
-#include <syslog.h>
-
-#define PERROR(fmt) printf("%s:%d %s()", __FILE__, __LINE__, __FUNCTION__);\
-                    perror(fmt);
-
-#define IPAERR(fmt, ...)  printf("ERR: %s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
-
-#ifdef DEBUG
-#define IPADBG(fmt, ...) printf("%s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
-
-#define IPADUMP(fmt, ...) printf(fmt, ##__VA_ARGS__);
-
-#else
-#define IPADBG(fmt, ...)
-#define IPADUMP(fmt, ...)
-#endif
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* IPA_NAT_LOGI_H */
diff --git a/ipanat/inc/ipa_nat_map.h b/ipanat/inc/ipa_nat_map.h
new file mode 100644
index 0000000..d81061b
--- /dev/null
+++ b/ipanat/inc/ipa_nat_map.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#if !defined(_IPA_NATI_MAP_H_)
+# define _IPA_NATI_MAP_H_
+
+#include <stdint.h>
+
+# ifdef __cplusplus
+extern "C"
+{
+# endif /* __cplusplus */
+
+/* Used below */
+#define MAKE_AS_STR_CASE(v) case v: return #v
+
+/*
+ * The following is used to describe which map to use.
+ *
+ * PLEASE KEEP THE FOLLOWING IN SYNC WITH ipa_which_map_as_str()
+ * BELOW.
+ */
+typedef enum
+{
+	MAP_NUM_00 = 0,
+	MAP_NUM_01 = 1,
+	MAP_NUM_02 = 2,
+	MAP_NUM_03 = 3,
+
+	MAP_NUM_99 = 4,
+
+	MAP_NUM_MAX
+} ipa_which_map;
+
+#define VALID_IPA_USE_MAP(w) \
+	( (w) >= MAP_NUM_00 || (w) < MAP_NUM_MAX )
+
+/* KEEP THE FOLLOWING IN SYNC WITH ABOVE. */
+static inline const char* ipa_which_map_as_str(
+	ipa_which_map w )
+{
+	switch ( w )
+	{
+		MAKE_AS_STR_CASE(MAP_NUM_00);
+		MAKE_AS_STR_CASE(MAP_NUM_01);
+		MAKE_AS_STR_CASE(MAP_NUM_02);
+		MAKE_AS_STR_CASE(MAP_NUM_03);
+
+		MAKE_AS_STR_CASE(MAP_NUM_99);
+	default:
+		break;
+	}
+
+	return "???";
+}
+
+int ipa_nat_map_add(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t      val );
+
+int ipa_nat_map_find(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t*     val_ptr );
+
+int ipa_nat_map_del(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t*     val_ptr );
+
+int ipa_nat_map_clear(
+	ipa_which_map which );
+
+int ipa_nat_map_dump(
+	ipa_which_map which );
+
+# ifdef __cplusplus
+}
+# endif /* __cplusplus */
+
+#endif /* #if !defined(_IPA_NATI_MAP_H_) */
diff --git a/ipanat/inc/ipa_nat_statemach.h b/ipanat/inc/ipa_nat_statemach.h
new file mode 100644
index 0000000..a86dd87
--- /dev/null
+++ b/ipanat/inc/ipa_nat_statemach.h
@@ -0,0 +1,303 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#if !defined(_IPA_NAT_STATEMACH_H_)
+# define _IPA_NAT_STATEMACH_H_
+
+/******************************************************************************/
+/**
+ * The following enum represents the states that a nati object can be
+ * in.
+ */
+typedef enum {
+	NATI_STATE_NULL       = 0,
+	NATI_STATE_DDR_ONLY   = 1, /* NAT in DDR only (traditional) */
+	NATI_STATE_SRAM_ONLY  = 2, /* NAT in SRAM only (new) */
+	NATI_STATE_HYBRID     = 3, /* NAT simultaneously in both SRAM/DDR */
+	NATI_STATE_HYBRID_DDR = 4, /* NAT transitioned from SRAM to DDR */
+
+	NATI_STATE_LAST
+} ipa_nati_state;
+
+# undef strcasesame
+# define strcasesame(a, b) (!strcasecmp(a, b))
+
+static inline ipa_nati_state mem_type_str_to_ipa_nati_state(
+	const char* str )
+{
+	if ( str ) {
+		if (strcasesame(str, "HYBRID" ))
+			return NATI_STATE_HYBRID;
+		if (strcasesame(str, "SRAM" ))
+			return NATI_STATE_SRAM_ONLY;
+	}
+	return NATI_STATE_DDR_ONLY;
+}
+
+/******************************************************************************/
+/**
+ * The following enum represents the API triggers that may or may not
+ * cause a nati object to transition through its various allowable
+ * states defined in ipa_nati_state above.
+ */
+typedef enum {
+	NATI_TRIG_NULL       =  0,
+	NATI_TRIG_ADD_TABLE  =  1,
+	NATI_TRIG_DEL_TABLE  =  2,
+	NATI_TRIG_CLR_TABLE  =  3,
+	NATI_TRIG_WLK_TABLE  =  4,
+	NATI_TRIG_TBL_STATS  =  5,
+	NATI_TRIG_ADD_RULE   =  6,
+	NATI_TRIG_DEL_RULE   =  7,
+	NATI_TRIG_TBL_SWITCH =  8,
+	NATI_TRIG_GOTO_DDR   =  9,
+	NATI_TRIG_GOTO_SRAM  = 10,
+	NATI_TRIG_GET_TSTAMP = 11,
+
+	NATI_TRIG_LAST
+} ipa_nati_trigger;
+
+/******************************************************************************/
+/**
+ * The following structure used to keep switch stats.
+ */
+typedef struct
+{
+	uint32_t pass;
+	uint32_t fail;
+} nati_switch_stats;
+
+/******************************************************************************/
+/**
+ * The following structure used to direct map usage.
+ *
+ * Maps are needed to map rule handles..orig to new and new to orig.
+ * See comments in ipa_nat_statemach.c on this topic...
+ */
+typedef struct
+{
+	uint32_t orig2new_map;
+	uint32_t new2orig_map;
+} nati_map_pair;
+
+/******************************************************************************/
+/**
+ * The following is a nati object that will maintain state relative to
+ * various API calls.
+ */
+typedef struct
+{
+	ipa_nati_state prev_state;
+	ipa_nati_state curr_state;
+	uint32_t       ddr_tbl_hdl;
+	uint32_t       sram_tbl_hdl;
+	uint32_t       tot_slots_in_sram;
+	uint32_t       back_to_sram_thresh;
+	/*
+	 * tot_rules_in_table[0] for ddr, and
+	 * tot_rules_in_table[1] for sram
+	 */
+	uint32_t       tot_rules_in_table[2];
+	/*
+	 * map_pairs[0] for ddr, and
+	 * map_pairs[1] for sram
+	 */
+	nati_map_pair  map_pairs[2];
+	/*
+	 * sw_stats[0] for ddr, and
+	 * sw_stats[1] for sram
+	 */
+	nati_switch_stats sw_stats[2];
+} ipa_nati_obj;
+
+/*
+ * For use with the arrays above..in ipa_nati_obj...
+ */
+#undef DDR_SUB
+#undef SRAM_SUB
+
+#define DDR_SUB  0
+#define SRAM_SUB 1
+
+#undef IN_HYBRID_STATE
+#define IN_HYBRID_STATE() \
+	( nati_obj.curr_state == NATI_STATE_HYBRID || \
+	  nati_obj.curr_state == NATI_STATE_HYBRID_DDR )
+
+#undef  SRAM_CURRENTLY_ACTIVE
+#define SRAM_CURRENTLY_ACTIVE() \
+	( nati_obj.curr_state == NATI_STATE_SRAM_ONLY || \
+	  nati_obj.curr_state == NATI_STATE_HYBRID )
+
+#define SRAM_TO_BE_ACCESSED(t) \
+	( SRAM_CURRENTLY_ACTIVE() || \
+	  (t) == NATI_TRIG_GOTO_SRAM || \
+	  (t) == NATI_TRIG_TBL_SWITCH )
+
+/*
+ * NOTE: The exclusion of timestamp retrieval and table creation
+ *       below.
+ *
+ * Why?
+ *
+ *  In re timestamp:
+ *
+ *   Because timestamp retrieval institutes too many repetitive
+ *   accesses, hence would lead to too many successive votes. Instead,
+ *   it will be handled differently and in the app layer above.
+ *
+ *  In re table creation:
+ *
+ *    Because it can't be known, apriori, whether or not sram is
+ *    really available for use. Instead, we'll move table creation
+ *    voting to a place where we know sram is available.
+ */
+#undef  VOTE_REQUIRED
+#define VOTE_REQUIRED(t) \
+	( SRAM_TO_BE_ACCESSED(t) && \
+	  (t) != NATI_TRIG_GET_TSTAMP && \
+	  (t) != NATI_TRIG_ADD_TABLE )
+
+/******************************************************************************/
+/**
+ * A helper macro for changing a nati object's state...
+ */
+# undef SET_NATIOBJ_STATE
+# define SET_NATIOBJ_STATE(x, s)  {        \
+		(x)->prev_state = (x)->curr_state; \
+		(x)->curr_state = s;               \
+	}
+
+/******************************************************************************/
+/**
+ * A function signature for a state/trigger callback function...
+ */
+typedef int (*nati_statemach_cb)(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr );
+
+/******************************************************************************/
+/**
+ * A structure for relating state to trigger callbacks.
+ */
+typedef struct
+{
+	ipa_nati_state    state;
+	ipa_nati_trigger  trigger;
+	nati_statemach_cb sm_cb;
+	const char*       state_as_str;
+	const char*       trigger_as_str;
+	const char*       sm_cb_as_str;
+} nati_statemach_tuple;
+
+#undef SM_ROW
+#define SM_ROW(s, t, f) \
+	{ s, t, f, #s, #t, #f }
+
+/******************************************************************************/
+/**
+ * FUNCTION: ipa_nati_statemach
+ *
+ * PARAMS:
+ *
+ *   @nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   @trigger      (IN) The trigger to run through the state machine
+ *
+ *   @arb_data_ptr (IN) Anything you like.  Will be passed, untouched,
+ *                     to the state/trigger callback function.
+ *
+ * DESCRIPTION:
+ *
+ *   This function allows a nati object and a trigger to be run
+ *   through the state machine.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+int ipa_nati_statemach(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr );
+
+/*
+ * To follow are a number of structures, designed to hold function
+ * arguments, that are to be passed into the state machine...
+ */
+typedef struct
+{
+	uint32_t    public_ip_addr;
+	uint16_t    number_of_entries;
+	uint32_t*   tbl_hdl;
+} table_add_args;
+
+typedef struct
+{
+	uint32_t tbl_hdl;
+} table_del_args;
+
+typedef table_del_args table_clear_args;
+
+typedef struct
+{
+	uint32_t          tbl_hdl;
+	WhichTbl2Use      which;
+	ipa_table_walk_cb walk_cb;
+	void*             arb_data_ptr;
+} table_walk_args;
+
+typedef struct
+{
+	uint32_t            tbl_hdl;
+	ipa_nati_tbl_stats* nat_stats_ptr;
+	ipa_nati_tbl_stats* idx_stats_ptr;
+} table_stats_args;
+
+typedef struct
+{
+	uint32_t                 tbl_hdl;
+	const ipa_nat_ipv4_rule* clnt_rule;
+	uint32_t*                rule_hdl;
+} rule_add_args;
+
+typedef struct
+{
+	uint32_t tbl_hdl;
+	uint32_t rule_hdl;
+} rule_del_args;
+
+typedef struct
+{
+	uint32_t  tbl_hdl;
+	uint32_t  rule_hdl;
+	uint32_t* time_stamp;
+} timestap_query_args;
+
+#endif /* #if !defined(_IPA_NAT_STATEMACH_H_) */
diff --git a/ipanat/inc/ipa_nat_utils.h b/ipanat/inc/ipa_nat_utils.h
new file mode 100644
index 0000000..b6545da
--- /dev/null
+++ b/ipanat/inc/ipa_nat_utils.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2013, 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IPA_NAT_UTILS_H
+#define IPA_NAT_UTILS_H
+
+#include <stdio.h>
+#include <string.h>
+#include <syslog.h>
+#include <time.h>
+#include <linux/msm_ipa.h>
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#if !defined(MSM_IPA_TESTS) && !defined(FEATURE_IPA_ANDROID)
+#ifdef USE_GLIB
+#include <glib.h>
+#define strlcpy g_strlcpy
+#else
+size_t strlcpy(char* dst, const char* src, size_t size);
+#endif
+#endif
+
+#define IPAERR(fmt, ...)  printf("ERR: %s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
+
+#define IPAINFO(fmt, ...)  printf("INFO: %s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
+
+#define IPAWARN(fmt, ...)  printf("WARN: %s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
+
+#undef UNUSED
+#define UNUSED(v) (void)(v)
+
+#ifdef NAT_DEBUG
+#define IPADBG(fmt, ...) printf("%s:%d %s() " fmt, __FILE__,  __LINE__, __FUNCTION__, ##__VA_ARGS__);
+#else
+#define IPADBG(fmt, ...)
+#endif
+
+typedef struct
+{
+	int              fd;
+	enum ipa_hw_type ver;
+} ipa_descriptor;
+
+ipa_descriptor* ipa_descriptor_open(void);
+
+void ipa_descriptor_close(
+	ipa_descriptor*);
+
+void ipa_read_debug_info(
+	const char* debug_file_path);
+
+static inline char* prep_ioc_nat_dma_cmd_4print(
+	struct ipa_ioc_nat_dma_cmd* cmd_ptr,
+	char*                       buf_ptr,
+	uint32_t                    buf_sz )
+{
+	uint32_t i, len, buf_left;
+
+	if ( cmd_ptr && buf_ptr && buf_sz )
+	{
+		snprintf(
+			buf_ptr,
+			buf_sz,
+			"NAT_DMA_CMD: mem_type(%u) entries(%u) ",
+			cmd_ptr->mem_type,
+			cmd_ptr->entries);
+
+		for ( i = 0; i < cmd_ptr->entries; i++ )
+		{
+			len = strlen(buf_ptr);
+
+			buf_left = buf_sz - len;
+
+			if ( buf_left > 0 && buf_left < buf_sz )
+			{
+				snprintf(
+					buf_ptr + len,
+					buf_left,
+					"[%u](table_index(0x%02X) base_addr(0x%02X) offset(0x%08X) data(0x%04X)) ",
+					i,
+					cmd_ptr->dma[i].table_index,
+					cmd_ptr->dma[i].base_addr,
+					cmd_ptr->dma[i].offset,
+					(uint32_t) cmd_ptr->dma[i].data);
+			}
+		}
+	}
+
+	return buf_ptr;
+}
+
+#undef NANOS_PER_SEC
+#undef MICROS_PER_SEC
+#undef MILLIS_PER_SEC
+
+#define NANOS_PER_SEC  1000000000
+#define MICROS_PER_SEC    1000000
+#define MILLIS_PER_SEC       1000
+
+/**
+ * A macro for converting seconds to nanoseconds...
+ */
+#define SECS2NanSECS(x) ((x) * NANOS_PER_SEC)
+
+/**
+ * A macro for converting seconds to microseconds...
+ */
+#define SECS2MicSECS(x) ((x) * MICROS_PER_SEC)
+
+/**
+ * A macro for converting seconds to milliseconds...
+ */
+#define SECS2MilSECS(x) ((x) * MILLIS_PER_SEC)
+
+/******************************************************************************/
+
+typedef enum
+{
+	TimeAsNanSecs = 0,
+	TimeAsMicSecs = 1,
+	TimeAsMilSecs = 2
+} TimeAs_t;
+
+#undef VALID_TIMEAS
+#define VALID_TIMEAS(ta)   \
+	( (ta) == TimeAsNanSecs ||					\
+	  (ta) == TimeAsMicSecs ||					\
+	  (ta) == TimeAsMilSecs )
+
+int currTimeAs(
+	TimeAs_t  timeAs,
+	uint64_t* valPtr );
+
+#endif /* IPA_NAT_UTILS_H */
diff --git a/ipanat/inc/ipa_table.h b/ipanat/inc/ipa_table.h
new file mode 100644
index 0000000..aa2159b
--- /dev/null
+++ b/ipanat/inc/ipa_table.h
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef IPA_TABLE_H
+#define IPA_TABLE_H
+
+#include <stdint.h>
+#include <stdbool.h>
+#include <linux/msm_ipa.h>
+
+#define IPA_TABLE_MAX_ENTRIES 5120
+
+#define IPA_TABLE_INVALID_ENTRY 0x0
+
+#undef  VALID_INDEX
+#define VALID_INDEX(idx) \
+	( (idx) != IPA_TABLE_INVALID_ENTRY )
+
+#undef  VALID_RULE_HDL
+#define VALID_RULE_HDL(hdl) \
+	( (hdl) != IPA_TABLE_INVALID_ENTRY )
+
+#undef GOTO_REC
+#define GOTO_REC(tbl, rec_idx) \
+	( (tbl)->table_addr + ((rec_idx) * (tbl)->entry_size) )
+
+typedef enum
+{
+	IPA_NAT_BASE_TBL       = 0,
+	IPA_NAT_EXPN_TBL       = 1,
+	IPA_NAT_INDX_TBL       = 2,
+	IPA_NAT_INDEX_EXPN_TBL = 3,
+	IPA_IPV6CT_BASE_TBL    = 4,
+	IPA_IPV6CT_EXPN_TBL    = 5,
+} ipa_table_dma_type;
+
+#define VALID_IPA_TABLE_DMA_TYPE(t) \
+	( (t) >= IPA_NAT_BASE_TBL && (t) <= IPA_IPV6CT_EXPN_TBL )
+
+/*
+ *    --------- NAT Rule Handle Entry ID structure ---------
+ *
+ * +-----------+-----------+------------------+----------------+
+ * |   1 bit   |  2 bits   |    12 bits       |     1 bit      |
+ * +-----------+-----------+------------------+----------------+
+ * | 0 - DDR   | reserved  | index into table |  0 - base      |
+ * | 1 - SRAM  |           |                  |  1 - expansion |
+ * +-----------+-----------+------------------+----------------+
+ */
+#define IPA_TABLE_TYPE_BITS      0x00000001
+#define IPA_TABLE_TYPE_MASK      0x00000001
+#define IPA_TABLE_INDX_MASK      0x00000FFF
+#define IPA_TABLE_TYPE_MEM_SHIFT 15
+
+#undef BREAK_RULE_HDL
+#define BREAK_RULE_HDL(tbl, hdl, mt, iet, indx) \
+	do { \
+		mt    = ((hdl) >> IPA_TABLE_TYPE_MEM_SHIFT) & IPA_TABLE_TYPE_MASK; \
+		iet   =  (hdl)                              & IPA_TABLE_TYPE_MASK; \
+		indx  = ((hdl) >> IPA_TABLE_TYPE_BITS)      & IPA_TABLE_INDX_MASK; \
+		indx += (iet) ? tbl->table_entries : 0; \
+		/*IPADBG("hdl(%u) -> mt(%u) iet(%u) indx(%u)\n", hdl, mt, iet, indx);*/ \
+	} while ( 0 )
+
+typedef int (*entry_validity_checker)(
+	void* entry);
+
+typedef uint16_t (*entry_next_index_getter)(
+	void* entry);
+
+typedef uint16_t (*entry_prev_index_getter)(
+	void*    entry,
+	uint16_t entry_index,
+	void*    meta,
+	uint16_t base_table_size);
+
+typedef void (*entry_prev_index_setter)(
+	void*    entry,
+	uint16_t entry_index,
+	uint16_t prev_index,
+	void*    meta,
+	uint16_t base_table_size);
+
+typedef int (*entry_head_inserter)(
+	void*     entry,
+	void*     user_data,
+	uint16_t* dma_command_data);
+
+typedef int (*entry_tail_inserter)(
+	void* entry,
+	void* user_data);
+
+typedef uint16_t (*entry_delete_head_dma_command_data_getter)(
+	void* head,
+	void* next_entry);
+
+typedef struct
+{
+	entry_validity_checker  entry_is_valid;
+	entry_next_index_getter entry_get_next_index;
+	entry_prev_index_getter entry_get_prev_index;
+	entry_prev_index_setter entry_set_prev_index;
+	entry_head_inserter     entry_head_insert;
+	entry_tail_inserter     entry_tail_insert;
+	entry_delete_head_dma_command_data_getter
+	  entry_get_delete_head_dma_command_data;
+} ipa_table_entry_interface;
+
+typedef enum
+{
+	HELP_UPDATE_HEAD  = 0,
+	HELP_UPDATE_ENTRY = 1,
+	HELP_DELETE_HEAD  = 2,
+
+	HELP_UPDATE_MAX,
+} dma_help_type;
+
+#undef VALID_DMA_HELP_TYPE
+#define VALID_DMA_HELP_TYPE(t) \
+	( (t) >=  HELP_UPDATE_HEAD && (t) < HELP_UPDATE_MAX )
+
+typedef struct
+{
+	uint32_t           offset;
+	ipa_table_dma_type table_type;
+	ipa_table_dma_type expn_table_type;
+	uint8_t            table_indx;
+} ipa_table_dma_cmd_helper;
+
+typedef struct
+{
+	char                       name[IPA_RESOURCE_NAME_MAX];
+
+	enum ipa3_nat_mem_in       nmi;
+
+	int                        entry_size;
+
+	uint16_t                   table_entries;
+	uint16_t                   expn_table_entries;
+	uint32_t                   tot_tbl_ents;
+
+	uint8_t*                   table_addr;
+	uint8_t*                   expn_table_addr;
+
+	uint16_t                   cur_tbl_cnt;
+	uint16_t                   cur_expn_tbl_cnt;
+
+	ipa_table_entry_interface* entry_interface;
+
+	ipa_table_dma_cmd_helper*  dma_help[HELP_UPDATE_MAX];
+
+	void*                      meta;
+	int                        meta_entry_size;
+} ipa_table;
+
+typedef struct
+{
+	uint16_t prev_index;
+	void*    prev_entry;
+
+	uint16_t curr_index;
+	void*    curr_entry;
+
+	uint16_t next_index;
+	void*    next_entry;
+} ipa_table_iterator;
+
+
+void ipa_table_init(
+	ipa_table*                 table,
+	const char*                table_name,
+	enum ipa3_nat_mem_in       nmi,
+	int                        entry_size,
+	void*                      meta,
+	int                        meta_entry_size,
+	ipa_table_entry_interface* entry_interface);
+
+int ipa_table_calculate_entries_num(
+	ipa_table*           table,
+	uint16_t             number_of_entries,
+	enum ipa3_nat_mem_in nmi);
+
+int ipa_table_calculate_size(
+	ipa_table* table);
+
+uint8_t* ipa_table_calculate_addresses(
+	ipa_table* table,
+	uint8_t*   base_addr);
+
+void ipa_table_reset(
+	ipa_table* table);
+
+int ipa_table_add_entry(
+	ipa_table*                  table,
+	void*                       user_data,
+	uint16_t*                   index,
+	uint32_t*                   rule_hdl,
+	struct ipa_ioc_nat_dma_cmd* cmd);
+
+void ipa_table_create_delete_command(
+	ipa_table*                  table,
+	struct ipa_ioc_nat_dma_cmd* cmd,
+	ipa_table_iterator*         iterator);
+
+void ipa_table_delete_entry(
+	ipa_table*          table,
+	ipa_table_iterator* iterator,
+	uint8_t             is_prev_empty);
+
+void ipa_table_erase_entry(
+	ipa_table* table,
+	uint16_t   index);
+
+int ipa_table_get_entry(
+	ipa_table* table,
+	uint32_t   entry_handle,
+	void**     entry,
+	uint16_t*  entry_index);
+
+void* ipa_table_get_entry_by_index(
+	ipa_table* table,
+	uint16_t   index);
+
+void ipa_table_dma_cmd_helper_init(
+	ipa_table_dma_cmd_helper* dma_cmd_helper,
+	uint8_t                   table_indx,
+	ipa_table_dma_type        table_type,
+	ipa_table_dma_type        expn_table_type,
+	uint32_t                  offset);
+
+void ipa_table_dma_cmd_generate(
+	ipa_table_dma_cmd_helper*   dma_cmd_helper,
+	uint8_t                     is_expn,
+	uint32_t                    entry_offset,
+	uint16_t                    data,
+	struct ipa_ioc_nat_dma_cmd* cmd);
+
+int ipa_table_iterator_init(
+	ipa_table_iterator* iterator,
+	ipa_table*          table,
+	void*               curr_entry,
+	uint16_t            curr_index);
+
+int ipa_table_iterator_next(
+	ipa_table_iterator* iterator,
+	ipa_table*          table);
+
+int ipa_table_iterator_end(
+	ipa_table_iterator* iterator,
+	ipa_table*          table,
+	uint16_t            head_index,
+	void*               head);
+
+int ipa_table_iterator_is_head_with_tail(
+	ipa_table_iterator* iterator);
+
+int ipa_calc_num_sram_table_entries(
+	uint32_t  sram_size,
+	uint32_t  table1_ent_size,
+	uint32_t  table2_ent_size,
+	uint16_t* num_entries_ptr);
+
+typedef int (*ipa_table_walk_cb)(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr );
+
+typedef enum
+{
+	WHEN_SLOT_EMPTY  = 0,
+	WHEN_SLOT_FILLED = 1,
+
+	WHEN_SLOT_MAX
+} When2Callback;
+
+#define VALID_WHEN2CALLBACK(w) \
+	( (w) >= WHEN_SLOT_EMPTY && (w) < WHEN_SLOT_MAX )
+
+int ipa_table_walk(
+	ipa_table*        table,
+	uint16_t          start_index,
+	When2Callback     when,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr );
+
+int ipa_table_add_dma_cmd(
+	ipa_table*                  tbl_ptr,
+	dma_help_type               help_type,
+	void*                       rec_ptr,
+	uint16_t                    rec_index,
+	uint16_t                    data_for_entry,
+	struct ipa_ioc_nat_dma_cmd* cmd_ptr );
+
+#endif
diff --git a/ipanat/src/Makefile.am b/ipanat/src/Makefile.am
index 8e2d005..abfc353 100644
--- a/ipanat/src/Makefile.am
+++ b/ipanat/src/Makefile.am
@@ -1,12 +1,15 @@
-AM_CFLAGS = -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs
+AM_CFLAGS = -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs -Wno-unused-variable
+
 if KERNELMODULES
 AM_CFLAGS += -I./../inc $(KERNEL_DIR)/include
 else
 AM_CFLAGS += -I./../inc
 endif
-#AM_CFLAGS += -DDEBUG -g
+
+#AM_CFLAGS += -g -DDEBUG -DNAT_DEBUG
 
 common_CFLAGS =  -DUSE_GLIB @GLIB_CFLAGS@
+
 if !KERNELMODULES
 common_LDFLAGS = -lrt @GLIB_LIBS@
 endif
@@ -17,22 +20,40 @@
 library_includedir = $(pkgincludedir)
 endif
 
-c_sources   = ipa_nat_drv.c \
-              ipa_nat_drvi.c \
-              ipa_nat_logi.c
+cpp_sources = \
+  ipa_nat_map.cpp
 
-library_include_HEADERS = ./../inc/ipa_nat_drvi.h \
-                          ./../inc/ipa_nat_drv.h \
-                          ./../inc/ipa_nat_logi.h
+c_sources   = \
+  ipa_table.c \
+  ipa_nat_statemach.c \
+  ipa_nat_drvi.c \
+  ipa_nat_drv.c \
+  ipa_mem_descriptor.c \
+  ipa_nat_utils.c \
+  ipa_ipv6ct.c
+
+library_include_HEADERS = \
+  ../inc/ipa_mem_descriptor.h \
+  ../inc/ipa_nat_drv.h \
+  ../inc/ipa_nat_drvi.h \
+  ../inc/ipa_nat_map.h \
+  ../inc/ipa_nat_statemach.h \
+  ../inc/ipa_nat_utils.h \
+  ../inc/ipa_table.h
 
 if KERNELMODULES
 noinst_LIBRARIES = libipanat.a
 libipanat_a_C = @C@
-libipanat_a_SOURCES = $(c_sources)
+libipanat_a_CC = @CC@
+libipanat_a_SOURCES = $(c_sources) $(cpp_sources)
+libipanat_a_CFLAGS = $(AM_CFLAGS) $(common_CFLAGS)
+libipanat_a_CXXFLAGS = $(AM_CFLAGS) $(common_CPPFLAGS)
 else
 lib_LTLIBRARIES = libipanat.la
 libipanat_la_C = @C@
-libipanat_la_SOURCES = $(c_sources)
+libipanat_la_CC = @CC@
+libipanat_la_SOURCES = $(c_sources) $(cpp_sources)
 libipanat_la_CFLAGS = $(AM_CFLAGS) $(common_CFLAGS)
+libipanat_la_CXXFLAGS = $(AM_CFLAGS) $(common_CPPFLAGS)
 libipanat_la_LDFLAGS = -shared $(common_LDFLAGS) -version-info 1:0:0
 endif
diff --git a/ipanat/src/ipa_ipv6ct.c b/ipanat/src/ipa_ipv6ct.c
new file mode 100644
index 0000000..a5ffbb0
--- /dev/null
+++ b/ipanat/src/ipa_ipv6ct.c
@@ -0,0 +1,856 @@
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "ipa_ipv6ct.h"
+#include "ipa_ipv6cti.h"
+
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#define IPA_IPV6CT_DEBUG_FILE_PATH "/sys/kernel/debug/ipa/ipv6ct"
+#define IPA_IPV6CT_TABLE_NAME "IPA IPv6CT table"
+
+static int ipa_ipv6ct_create_table(ipa_ipv6ct_table* ipv6ct_table, uint16_t number_of_entries, uint8_t table_index);
+static int ipa_ipv6ct_destroy_table(ipa_ipv6ct_table* ipv6ct_table);
+static void ipa_ipv6ct_create_table_dma_cmd_helpers(ipa_ipv6ct_table* ipv6ct_table, uint8_t table_indx);
+static int ipa_ipv6ct_post_init_cmd(ipa_ipv6ct_table* ipv6ct_table, uint8_t tbl_index);
+static int ipa_ipv6ct_post_dma_cmd(struct ipa_ioc_nat_dma_cmd* cmd);
+static uint16_t ipa_ipv6ct_hash(const ipa_ipv6ct_rule* rule, uint16_t size);
+static uint16_t ipa_ipv6ct_xor_segments(uint64_t num);
+
+static int table_entry_is_valid(void* entry);
+static uint16_t table_entry_get_next_index(void* entry);
+static uint16_t table_entry_get_prev_index(void* entry, uint16_t entry_index, void* meta, uint16_t base_table_size);
+static void table_entry_set_prev_index(void* entry, uint16_t entry_index, uint16_t prev_index,
+	void* meta, uint16_t base_table_size);
+static int table_entry_head_insert(void* entry, void* user_data, uint16_t* dma_command_data);
+static int table_entry_tail_insert(void* entry, void* user_data);
+static uint16_t table_entry_get_delete_head_dma_command_data(void* head, void* next_entry);
+
+static ipa_ipv6ct ipv6ct;
+static pthread_mutex_t ipv6ct_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static ipa_table_entry_interface entry_interface =
+{
+	table_entry_is_valid,
+	table_entry_get_next_index,
+	table_entry_get_prev_index,
+	table_entry_set_prev_index,
+	table_entry_head_insert,
+	table_entry_tail_insert,
+	table_entry_get_delete_head_dma_command_data
+};
+
+/**
+ * ipa_ipv6ct_add_tbl() - Adds a new IPv6CT table
+ * @number_of_entries: [in] number of IPv6CT entries
+ * @table_handle: [out] handle of new IPv6CT table
+ *
+ * This function creates new IPv6CT table and posts IPv6CT init command to HW
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_ipv6ct_add_tbl(uint16_t number_of_entries, uint32_t* table_handle)
+{
+	int ret;
+	ipa_ipv6ct_table* ipv6ct_table;
+
+	IPADBG("\n");
+
+	if (table_handle == NULL || number_of_entries == 0)
+	{
+		IPAERR("Invalid parameters table_handle=%pK number_of_entries=%d\n", table_handle, number_of_entries);
+		return -EINVAL;
+	}
+
+	*table_handle = 0;
+
+	if (ipv6ct.table_cnt >= IPA_IPV6CT_MAX_TBLS)
+	{
+		IPAERR("Can't add addition IPv6 connection tracking table. Maximum %d tables allowed\n", IPA_IPV6CT_MAX_TBLS);
+		return -EINVAL;
+	}
+
+	if (!ipv6ct.ipa_desc)
+	{
+		ipv6ct.ipa_desc = ipa_descriptor_open();
+		if (ipv6ct.ipa_desc == NULL)
+		{
+			IPAERR("failed to open IPA driver file descriptor\n");
+			return -EIO;
+		}
+	}
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		ret = -EPERM;
+		goto bail_ipa_desc;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[ipv6ct.table_cnt];
+	ret = ipa_ipv6ct_create_table(ipv6ct_table, number_of_entries, ipv6ct.table_cnt);
+	if (ret)
+	{
+		IPAERR("unable to create ipv6ct table Error: %d\n", ret);
+		goto bail_ipa_desc;
+	}
+
+	/* Initialize the ipa hw with ipv6ct table dimensions */
+	ret = ipa_ipv6ct_post_init_cmd(ipv6ct_table, ipv6ct.table_cnt);
+	if (ret)
+	{
+		IPAERR("unable to post ipv6ct_init command Error %d\n", ret);
+		goto bail_ipv6ct_table;
+	}
+
+	/* Return table handle */
+	++ipv6ct.table_cnt;
+	*table_handle = ipv6ct.table_cnt;
+
+	IPADBG("Returning table handle 0x%x\n", *table_handle);
+	return 0;
+
+bail_ipv6ct_table:
+	ipa_ipv6ct_destroy_table(ipv6ct_table);
+bail_ipa_desc:
+	if (!ipv6ct.table_cnt) {
+		ipa_descriptor_close(ipv6ct.ipa_desc);
+		ipv6ct.ipa_desc = NULL;
+	}
+	return ret;
+}
+
+int ipa_ipv6ct_del_tbl(uint32_t table_handle)
+{
+	ipa_ipv6ct_table* ipv6ct_table;
+	int ret;
+
+	IPADBG("\n");
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		return -EINVAL;
+	}
+
+	if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS)
+	{
+		IPAERR("invalid table handle %d passed\n", table_handle);
+		return -EINVAL;
+	}
+	IPADBG("Passed Table Handle: 0x%x\n", table_handle);
+
+	if (pthread_mutex_lock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to lock the ipv6ct mutex\n");
+		return -EINVAL;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[table_handle - 1];
+	if (!ipv6ct_table->mem_desc.valid)
+	{
+		IPAERR("invalid table handle %d\n", table_handle);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_ipv6ct_destroy_table(ipv6ct_table);
+	if (ret)
+	{
+		IPAERR("unable to delete IPV6CT table with handle %d\n", table_handle);
+		goto unlock;
+	}
+
+	if (!--ipv6ct.table_cnt) {
+		ipa_descriptor_close(ipv6ct.ipa_desc);
+		ipv6ct.ipa_desc = NULL;
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+		return (ret) ? ret : -EPERM;
+	}
+
+	IPADBG("return\n");
+	return ret;
+}
+
+int ipa_ipv6ct_add_rule(uint32_t table_handle, const ipa_ipv6ct_rule* user_rule, uint32_t* rule_handle)
+{
+	int ret;
+	ipa_ipv6ct_table* ipv6ct_table;
+	uint16_t new_entry_index;
+	uint32_t new_entry_handle;
+	uint32_t cmd_sz =
+		sizeof(struct ipa_ioc_nat_dma_cmd) +
+		(MAX_DMA_ENTRIES_FOR_ADD * sizeof(struct ipa_ioc_nat_dma_one));
+	char cmd_buf[cmd_sz];
+	struct ipa_ioc_nat_dma_cmd* cmd =
+		(struct ipa_ioc_nat_dma_cmd*) cmd_buf;
+
+	IPADBG("In\n");
+
+	memset(cmd_buf, 0, sizeof(cmd_buf));
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		return -EINVAL;
+	}
+
+	if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS ||
+		rule_handle == NULL || user_rule == NULL)
+	{
+		IPAERR("Invalid parameters table_handle=%d rule_handle=%pK user_rule=%pK\n",
+			table_handle, rule_handle, user_rule);
+		return -EINVAL;
+	}
+	IPADBG("Passed Table handle: 0x%x\n", table_handle);
+
+	if (user_rule->protocol == IPA_IPV6CT_INVALID_PROTO_FIELD_CMP)
+	{
+		IPAERR("invalid parameter protocol=%d\n", user_rule->protocol);
+		return -EINVAL;
+	}
+
+	if (pthread_mutex_lock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to lock the ipv6ct mutex\n");
+		return -EINVAL;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[table_handle - 1];
+	if (!ipv6ct_table->mem_desc.valid)
+	{
+		IPAERR("invalid table handle %d\n", table_handle);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	new_entry_index = ipa_ipv6ct_hash(user_rule, ipv6ct_table->table.table_entries - 1);
+
+	ret = ipa_table_add_entry(&ipv6ct_table->table, (void*)user_rule, &new_entry_index, &new_entry_handle, cmd);
+	if (ret)
+	{
+		IPAERR("failed to add a new IPV6CT entry\n");
+		goto unlock;
+	}
+
+	ret = ipa_ipv6ct_post_dma_cmd(cmd);
+	if (ret)
+	{
+		IPAERR("unable to post dma command\n");
+		goto bail;
+	}
+
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+		return -EPERM;
+	}
+
+	*rule_handle = new_entry_handle;
+
+	IPADBG("return\n");
+
+	return 0;
+
+bail:
+	ipa_table_erase_entry(&ipv6ct_table->table, new_entry_index);
+
+unlock:
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+
+	IPADBG("return\n");
+
+	return ret;
+}
+
+int ipa_ipv6ct_del_rule(uint32_t table_handle, uint32_t rule_handle)
+{
+	ipa_ipv6ct_table* ipv6ct_table;
+	ipa_table_iterator table_iterator;
+	ipa_ipv6ct_hw_entry* entry;
+	uint16_t index;
+	uint32_t cmd_sz =
+		sizeof(struct ipa_ioc_nat_dma_cmd) +
+		(MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
+	char cmd_buf[cmd_sz];
+	struct ipa_ioc_nat_dma_cmd* cmd =
+		(struct ipa_ioc_nat_dma_cmd*) cmd_buf;
+	int ret;
+
+	IPADBG("In\n");
+
+	memset(cmd_buf, 0, sizeof(cmd_buf));
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		return -EINVAL;
+	}
+
+	if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS ||
+		rule_handle == IPA_TABLE_INVALID_ENTRY)
+	{
+		IPAERR("Invalid parameters table_handle=%d rule_handle=%d\n", table_handle, rule_handle);
+		return -EINVAL;
+	}
+	IPADBG("Passed Table: 0x%x and rule handle 0x%x\n", table_handle, rule_handle);
+
+	if (pthread_mutex_lock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to lock the ipv6ct mutex\n");
+		return -EINVAL;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[table_handle - 1];
+	if (!ipv6ct_table->mem_desc.valid)
+	{
+		IPAERR("invalid table handle %d\n", table_handle);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_table_get_entry(&ipv6ct_table->table, rule_handle, (void**)&entry, &index);
+	if (ret)
+	{
+		IPAERR("unable to retrive the entry with handle=%d in IPV6CT table with handle=%d\n",
+			rule_handle, table_handle);
+		goto unlock;
+	}
+
+	ret = ipa_table_iterator_init(&table_iterator, &ipv6ct_table->table, entry, index);
+	if (ret)
+	{
+		IPAERR("unable to create iterator which points to the entry index=%d in IPV6CT table with handle=%d\n",
+			index, table_handle);
+		goto unlock;
+	}
+
+	ipa_table_create_delete_command(&ipv6ct_table->table, cmd, &table_iterator);
+
+	ret = ipa_ipv6ct_post_dma_cmd(cmd);
+	if (ret)
+	{
+		IPAERR("unable to post dma command\n");
+		goto unlock;
+	}
+
+	if (!ipa_table_iterator_is_head_with_tail(&table_iterator))
+	{
+		/* The entry can be deleted */
+		uint8_t is_prev_empty = (table_iterator.prev_entry != NULL &&
+			((ipa_ipv6ct_hw_entry*)table_iterator.prev_entry)->protocol == IPA_IPV6CT_INVALID_PROTO_FIELD_CMP);
+		ipa_table_delete_entry(&ipv6ct_table->table, &table_iterator, is_prev_empty);
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+		return (ret) ? ret : -EPERM;
+	}
+
+	IPADBG("return\n");
+
+	return ret;
+}
+
+int ipa_ipv6ct_query_timestamp(uint32_t table_handle, uint32_t rule_handle, uint32_t* time_stamp)
+{
+	int ret;
+	ipa_ipv6ct_table* ipv6ct_table;
+	ipa_ipv6ct_hw_entry *entry;
+
+	IPADBG("\n");
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		return -EINVAL;
+	}
+
+	if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS ||
+		rule_handle == IPA_TABLE_INVALID_ENTRY || time_stamp == NULL)
+	{
+		IPAERR("invalid parameters passed table_handle=%d rule_handle=%d time_stamp=%pK\n",
+			table_handle, rule_handle, time_stamp);
+		return -EINVAL;
+	}
+	IPADBG("Passed Table: %d and rule handle %d\n", table_handle, rule_handle);
+
+	if (pthread_mutex_lock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to lock the ipv6ct mutex\n");
+		return -EINVAL;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[table_handle - 1];
+	if (!ipv6ct_table->mem_desc.valid)
+	{
+		IPAERR("invalid table handle %d\n", table_handle);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_table_get_entry(&ipv6ct_table->table, rule_handle, (void**)&entry, NULL);
+	if (ret)
+	{
+		IPAERR("unable to retrive the entry with handle=%d in IPV6CT table with handle=%d\n",
+			rule_handle, table_handle);
+		goto unlock;
+	}
+
+	*time_stamp = entry->time_stamp;
+
+unlock:
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+		return (ret) ? ret : -EPERM;
+	}
+
+	IPADBG("return\n");
+	return ret;
+}
+
+/**
+* ipv6ct_hash() - Find the index into ipv6ct table
+* @rule: [in] an IPv6CT rule
+* @size: [in] size of the IPv6CT table
+*
+* This hash method is used to find the hash index of an entry into IPv6CT table.
+* In case of result zero, N-1 will be returned, where N is size of IPv6CT table.
+*
+* Returns: >0 index into IPv6CT table, negative on failure
+*/
+static uint16_t ipa_ipv6ct_hash(const ipa_ipv6ct_rule* rule, uint16_t size)
+{
+	uint16_t hash = 0;
+
+	IPADBG("src_ipv6_lsb 0x%llx\n", rule->src_ipv6_lsb);
+	IPADBG("src_ipv6_msb 0x%llx\n", rule->src_ipv6_msb);
+	IPADBG("dest_ipv6_lsb 0x%llx\n", rule->dest_ipv6_lsb);
+	IPADBG("dest_ipv6_msb 0x%llx\n", rule->dest_ipv6_msb);
+	IPADBG("src_port: 0x%x dest_port: 0x%x\n", rule->src_port, rule->dest_port);
+	IPADBG("protocol: 0x%x size: 0x%x\n", rule->protocol, size);
+
+	hash ^= ipa_ipv6ct_xor_segments(rule->src_ipv6_lsb);
+	hash ^= ipa_ipv6ct_xor_segments(rule->src_ipv6_msb);
+	hash ^= ipa_ipv6ct_xor_segments(rule->dest_ipv6_lsb);
+	hash ^= ipa_ipv6ct_xor_segments(rule->dest_ipv6_msb);
+
+	hash ^= rule->src_port;
+	hash ^= rule->dest_port;
+	hash ^= rule->protocol;
+
+	/*
+	 * The size passed to hash function expected be power^2-1, while the actual size is power^2,
+	 * actual_size = size + 1
+	 */
+	hash &= size;
+
+	/* If the hash resulted to zero then set it to maximum value as zero is unused entry in ipv6ct table */
+	if (hash == 0)
+	{
+		hash = size;
+	}
+
+	IPADBG("ipa_ipv6ct_hash returning value: %d\n", hash);
+	return hash;
+}
+
+static uint16_t ipa_ipv6ct_xor_segments(uint64_t num)
+{
+	const uint64_t mask = 0xffff;
+	const size_t bits_in_two_byte = 16;
+	uint16_t ret = 0;
+
+	IPADBG("\n");
+
+	while (num)
+	{
+		ret ^= (uint16_t)(num & mask);
+		num >>= bits_in_two_byte;
+	}
+
+	IPADBG("return\n");
+	return ret;
+}
+
+static int table_entry_is_valid(void* entry)
+{
+	ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry;
+
+	IPADBG("\n");
+
+	return ipv6ct_entry->enable;
+}
+
+static uint16_t table_entry_get_next_index(void* entry)
+{
+	uint16_t result;
+	ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry;
+
+	IPADBG("\n");
+
+	result = ipv6ct_entry->next_index;
+
+	IPADBG("Next entry of %pK is %d\n", entry, result);
+	return result;
+}
+
+static uint16_t table_entry_get_prev_index(void* entry, uint16_t entry_index, void* meta, uint16_t base_table_size)
+{
+	uint16_t result;
+	ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry;
+
+	IPADBG("\n");
+
+	result = ipv6ct_entry->prev_index;
+
+	IPADBG("Previous entry of %d is %d\n", entry_index, result);
+	return result;
+}
+
+static void table_entry_set_prev_index(void* entry, uint16_t entry_index, uint16_t prev_index,
+	void* meta, uint16_t base_table_size)
+{
+	ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry;
+
+	IPADBG("Previous entry of %d is %d\n", entry_index, prev_index);
+
+	ipv6ct_entry->prev_index = prev_index;
+
+	IPADBG("return\n");
+}
+
+static int table_entry_copy_from_user(void* entry, void* user_data)
+{
+	ipa_ipv6ct_hw_entry* ipv6ct_entry = (ipa_ipv6ct_hw_entry*)entry;
+	const ipa_ipv6ct_rule* user_rule = (const ipa_ipv6ct_rule*)user_data;
+
+	IPADBG("\n");
+
+	ipv6ct_entry->src_ipv6_lsb = user_rule->src_ipv6_lsb;
+	ipv6ct_entry->src_ipv6_msb = user_rule->src_ipv6_msb;
+	ipv6ct_entry->dest_ipv6_lsb = user_rule->dest_ipv6_lsb;
+	ipv6ct_entry->dest_ipv6_msb = user_rule->dest_ipv6_msb;
+	ipv6ct_entry->protocol = user_rule->protocol;
+	ipv6ct_entry->src_port = user_rule->src_port;
+	ipv6ct_entry->dest_port = user_rule->dest_port;
+
+	switch (user_rule->direction_settings)
+	{
+	case IPA_IPV6CT_DIRECTION_DENY_ALL:
+		break;
+	case IPA_IPV6CT_DIRECTION_ALLOW_OUT:
+		ipv6ct_entry->out_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT;
+		break;
+	case IPA_IPV6CT_DIRECTION_ALLOW_IN:
+		ipv6ct_entry->in_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT;
+		break;
+	case IPA_IPV6CT_DIRECTION_ALLOW_ALL:
+		ipv6ct_entry->out_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT;
+		ipv6ct_entry->in_allowed = IPA_IPV6CT_DIRECTION_ALLOW_BIT;
+		break;
+	default:
+		IPAERR("wrong value for IPv6CT direction setting parameter %d\n", user_rule->direction_settings);
+		return -EINVAL;
+	}
+
+	IPADBG("return\n");
+	return 0;
+}
+
+static int table_entry_head_insert(void* entry, void* user_data, uint16_t* dma_command_data)
+{
+	int ret;
+
+	IPADBG("\n");
+
+	ret = table_entry_copy_from_user(entry, user_data);
+	if (ret)
+	{
+		IPAERR("unable to copy from user a new entry\n");
+		return ret;
+	}
+
+	*dma_command_data = 0;
+	((ipa_ipv6ct_flags*)dma_command_data)->enable = IPA_IPV6CT_FLAG_ENABLE_BIT;
+
+	IPADBG("return\n");
+	return 0;
+}
+
+static int table_entry_tail_insert(void* entry, void* user_data)
+{
+	int ret;
+
+	IPADBG("\n");
+
+	ret = table_entry_copy_from_user(entry, user_data);
+	if (ret)
+	{
+		IPAERR("unable to copy from user a new entry\n");
+		return ret;
+	}
+
+	((ipa_ipv6ct_hw_entry*)entry)->enable = IPA_IPV6CT_FLAG_ENABLE_BIT;
+
+	IPADBG("return\n");
+	return 0;
+}
+
+static uint16_t table_entry_get_delete_head_dma_command_data(void* head, void* next_entry)
+{
+	IPADBG("\n");
+	return IPA_IPV6CT_INVALID_PROTO_FIELD_VALUE;
+}
+
+/**
+ * ipa_ipv6ct_create_table() - Creates a new IPv6CT table
+ * @ipv6ct_table: [in] IPv6CT table
+ * @number_of_entries: [in] number of IPv6CT entries
+ * @table_index: [in] the index of the IPv6CT table
+ *
+ * This function creates new IPv6CT table:
+ * - Initializes table, memory descriptor and table_dma_cmd_helpers structures
+ * - Allocates, maps and clears the memory for table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+static int ipa_ipv6ct_create_table(ipa_ipv6ct_table* ipv6ct_table, uint16_t number_of_entries, uint8_t table_index)
+{
+	int ret, size;
+
+	IPADBG("\n");
+
+	ipa_table_init(
+		&ipv6ct_table->table, IPA_IPV6CT_TABLE_NAME, IPA_NAT_MEM_IN_DDR,
+		sizeof(ipa_ipv6ct_hw_entry), NULL, 0, &entry_interface);
+
+	ret = ipa_table_calculate_entries_num(
+		&ipv6ct_table->table, number_of_entries, IPA_NAT_MEM_IN_DDR);
+
+	if (ret)
+	{
+		IPAERR("unable to calculate number of entries in ipv6ct table %d, while required by user %d\n",
+			table_index, number_of_entries);
+		return ret;
+	}
+
+	size = ipa_table_calculate_size(&ipv6ct_table->table);
+	IPADBG("IPv6CT table size: %d\n", size);
+
+	ipa_mem_descriptor_init(
+		&ipv6ct_table->mem_desc,
+		IPA_IPV6CT_DEV_NAME,
+		size,
+		table_index,
+		IPA_IOC_ALLOC_IPV6CT_TABLE,
+		IPA_IOC_DEL_IPV6CT_TABLE,
+		false); /* false here means don't consider using sram */
+
+	ret = ipa_mem_descriptor_allocate_memory(
+		&ipv6ct_table->mem_desc,
+		ipv6ct.ipa_desc->fd);
+
+	if (ret)
+	{
+		IPAERR("unable to allocate ipv6ct memory descriptor Error: %d\n", ret);
+		goto bail;
+	}
+
+	ipa_table_calculate_addresses(&ipv6ct_table->table, ipv6ct_table->mem_desc.base_addr);
+
+	ipa_table_reset(&ipv6ct_table->table);
+
+	ipa_ipv6ct_create_table_dma_cmd_helpers(ipv6ct_table, table_index);
+
+	IPADBG("return\n");
+	return 0;
+
+bail:
+	memset(ipv6ct_table, 0, sizeof(*ipv6ct_table));
+	return ret;
+}
+
+static int ipa_ipv6ct_destroy_table(ipa_ipv6ct_table* ipv6ct_table)
+{
+	int ret;
+
+	IPADBG("\n");
+
+	ret = ipa_mem_descriptor_delete(&ipv6ct_table->mem_desc, ipv6ct.ipa_desc->fd);
+	if (ret)
+		IPAERR("unable to delete IPV6CT descriptor\n");
+
+	memset(ipv6ct_table, 0, sizeof(*ipv6ct_table));
+
+	IPADBG("return\n");
+	return ret;
+}
+
+/**
+ * ipa_ipv6ct_create_table_dma_cmd_helpers() -
+ *   Creates dma_cmd_helpers for base table in the received IPv6CT table
+ * @ipv6ct_table: [in] IPv6CT table
+ * @table_indx: [in] The index of the IPv6CT table
+ *
+ * A DMA command helper helps to generate the DMA command for one
+ * specific field change. Each table has 3 different types of field
+ * change: update_head, update_entry and delete_head. This function
+ * creates the helpers and updates the base table correspondingly.
+ */
+static void ipa_ipv6ct_create_table_dma_cmd_helpers(
+	ipa_ipv6ct_table* ipv6ct_table,
+	uint8_t table_indx )
+{
+	IPADBG("\n");
+
+	ipa_table_dma_cmd_helper_init(
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_FLAGS],
+		table_indx,
+		IPA_IPV6CT_BASE_TBL,
+		IPA_IPV6CT_EXPN_TBL,
+		ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_FLAG_FIELD_OFFSET);
+
+	ipa_table_dma_cmd_helper_init(
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_NEXT_INDEX],
+		table_indx,
+		IPA_IPV6CT_BASE_TBL,
+		IPA_IPV6CT_EXPN_TBL,
+		ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_NEXT_FIELD_OFFSET);
+
+	ipa_table_dma_cmd_helper_init(
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_PROTOCOL],
+		table_indx,
+		IPA_IPV6CT_BASE_TBL,
+		IPA_IPV6CT_EXPN_TBL,
+		ipv6ct_table->mem_desc.addr_offset + IPA_IPV6CT_RULE_PROTO_FIELD_OFFSET);
+
+	ipv6ct_table->table.dma_help[HELP_UPDATE_HEAD] =
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_FLAGS];
+	ipv6ct_table->table.dma_help[HELP_UPDATE_ENTRY] =
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_NEXT_INDEX];
+	ipv6ct_table->table.dma_help[HELP_DELETE_HEAD] =
+		&ipv6ct_table->table_dma_cmd_helpers[IPA_IPV6CT_TABLE_PROTOCOL];
+
+	IPADBG("return\n");
+}
+
+static int ipa_ipv6ct_post_init_cmd(ipa_ipv6ct_table* ipv6ct_table, uint8_t tbl_index)
+{
+	struct ipa_ioc_ipv6ct_init cmd;
+	int ret;
+
+	IPADBG("\n");
+
+	cmd.tbl_index = tbl_index;
+
+	cmd.base_table_offset = ipv6ct_table->mem_desc.addr_offset;
+	cmd.expn_table_offset = cmd.base_table_offset + (ipv6ct_table->table.table_entries * sizeof(ipa_ipv6ct_hw_entry));
+
+	/* Driverr/HW expected base table size to be power^2-1 due to H/W hash calculation */
+	cmd.table_entries = ipv6ct_table->table.table_entries - 1;
+	cmd.expn_table_entries = ipv6ct_table->table.expn_table_entries;
+
+	ret = ioctl(ipv6ct.ipa_desc->fd, IPA_IOC_INIT_IPV6CT_TABLE, &cmd);
+	if (ret)
+	{
+		IPAERR("unable to post init cmd Error: %d IPA fd %d\n", ret, ipv6ct.ipa_desc->fd);
+		return ret;
+	}
+
+	IPADBG("Posted IPA_IOC_INIT_IPV6CT_TABLE to kernel successfully\n");
+	return 0;
+}
+
+static int ipa_ipv6ct_post_dma_cmd(struct ipa_ioc_nat_dma_cmd* cmd)
+{
+	IPADBG("\n");
+
+	cmd->mem_type = IPA_NAT_MEM_IN_DDR;
+
+	if (ioctl(ipv6ct.ipa_desc->fd, IPA_IOC_TABLE_DMA_CMD, cmd))
+	{
+		IPAERR("ioctl (IPA_IOC_TABLE_DMA_CMD) on fd %d has failed\n",
+			   ipv6ct.ipa_desc->fd);
+		return -EIO;
+	}
+	IPADBG("posted IPA_IOC_TABLE_DMA_CMD to kernel successfully\n");
+	return 0;
+}
+
+void ipa_ipv6ct_dump_table(uint32_t table_handle)
+{
+	ipa_ipv6ct_table* ipv6ct_table;
+
+	if (ipv6ct.ipa_desc->ver < IPA_HW_v4_0)
+	{
+		IPAERR("IPv6 connection tracking isn't supported for IPA version %d\n", ipv6ct.ipa_desc->ver);
+		return;
+	}
+
+	if (table_handle == IPA_TABLE_INVALID_ENTRY || table_handle > IPA_IPV6CT_MAX_TBLS)
+	{
+		IPAERR("invalid parameters passed %d\n", table_handle);
+		return;
+	}
+
+	if (pthread_mutex_lock(&ipv6ct_mutex))
+	{
+		IPAERR("unable to lock the ipv6ct mutex\n");
+		return;
+	}
+
+	ipv6ct_table = &ipv6ct.tables[table_handle - 1];
+	if (!ipv6ct_table->mem_desc.valid)
+	{
+		IPAERR("invalid table handle %d\n", table_handle);
+		goto unlock;
+	}
+
+	/* Prevents interleaving with later kernel printouts. Flush doesn't help. */
+	sleep(1);
+	ipa_read_debug_info(IPA_IPV6CT_DEBUG_FILE_PATH);
+	sleep(1);
+
+unlock:
+	if (pthread_mutex_unlock(&ipv6ct_mutex))
+		IPAERR("unable to unlock the ipv6ct mutex\n");
+}
diff --git a/ipanat/src/ipa_mem_descriptor.c b/ipanat/src/ipa_mem_descriptor.c
new file mode 100644
index 0000000..172564d
--- /dev/null
+++ b/ipanat/src/ipa_mem_descriptor.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "ipa_mem_descriptor.h"
+#include "ipa_nat_utils.h"
+
+#include <sys/ioctl.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <unistd.h>
+
+#define IPA_DEV_DIR "/dev/"
+
+#ifdef IPA_ON_R3PC
+#define IPA_DEVICE_MMAP_MEM_SIZE (2 * 1024UL * 1024UL - 1)
+#endif
+
+static int AllocateMemory(
+	ipa_mem_descriptor* desc,
+	int ipa_fd)
+{
+	struct ipa_ioc_nat_ipv6ct_table_alloc cmd;
+	int ret = 0;
+
+	IPADBG("In\n");
+
+#ifndef IPA_ON_R3PC
+	/*
+	 * If/when the number of NAT table entries requested yields a byte
+	 * count that will fit in SRAM, SRAM will be used to hold the NAT
+	 * table. When SRAM is used, some odd things can happen, relative
+	 * to mmap'ing's virtual memory scheme, that require us to make
+	 * some adjustments.
+	 *
+	 * To be more specific, the real physical SRAM location for the
+	 * table and the table's size may not play well with Linux's
+	 * mmap'ing virtual memory scheme....which likes everything to be
+	 * PAGE_SIZE aligned and sized in multiples of PAGE_SIZE.
+	 *
+	 * Given the above, if the NAT table's (in SRAM) physical address
+	 * in not on a PAGE_SIZE boundary, it will be offset into the
+	 * mmap'd virtual memory, hence we need to know that offset in
+	 * order to get to the table.  If said offset plus the table's
+	 * size takes it across a PAGE_SIZE boundary, we need to allocate
+	 * more space to ensure that the table is completely within the
+	 * mmap'd virtual memory.
+	 */
+	desc->sram_available = desc->sram_to_be_used = false;
+
+	memset(&desc->nat_sram_info, 0, sizeof(desc->nat_sram_info));
+
+	ret = ioctl(
+		ipa_fd,
+		IPA_IOC_GET_NAT_IN_SRAM_INFO,
+		&desc->nat_sram_info);
+
+	if ( ret == 0 )
+	{
+		IPADBG("sram_mem_available_for_nat(0x%08x) "
+			   "nat_table_offset_into_mmap(0x%08x) "
+			   "best_nat_in_sram_size_rqst(0x%08x)\n",
+			   desc->nat_sram_info.sram_mem_available_for_nat,
+			   desc->nat_sram_info.nat_table_offset_into_mmap,
+			   desc->nat_sram_info.best_nat_in_sram_size_rqst);
+
+		desc->sram_available = true;
+
+		if ( desc->consider_using_sram )
+		{
+			if (desc->orig_rqst_size <=
+				desc->nat_sram_info.sram_mem_available_for_nat)
+			{
+				desc->sram_to_be_used = true;
+			}
+		}
+	}
+#endif
+
+	/*
+	 * Now do the actual allocation...
+	 */
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.size = desc->orig_rqst_size;
+
+	ret = ioctl(ipa_fd, desc->allocate_ioctl_num, &cmd);
+
+	if (ret)
+	{
+		IPAERR("Unable to post %s allocate table command. Error %d IPA fd %d\n",
+			   desc->name, ret, ipa_fd);
+		goto bail;
+	}
+
+	desc->addr_offset = cmd.offset;
+
+	IPADBG("The memory desc for %s allocated successfully\n", desc->name);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int MapMemory(
+	ipa_mem_descriptor* desc,
+	int ipa_fd)
+{
+	char device_full_path[IPA_RESOURCE_NAME_MAX];
+	size_t ipa_dev_dir_path_len;
+	int device_fd;
+	int ret = 0;
+
+	UNUSED(ipa_fd);
+
+	IPADBG("In\n");
+
+	ipa_dev_dir_path_len =
+		strlcpy(device_full_path, IPA_DEV_DIR, IPA_RESOURCE_NAME_MAX);
+
+	if (ipa_dev_dir_path_len >= IPA_RESOURCE_NAME_MAX)
+	{
+		IPAERR("Unable to copy a string with size %zu to buffer with size %d\n",
+			   ipa_dev_dir_path_len, IPA_RESOURCE_NAME_MAX);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	strlcpy(device_full_path + ipa_dev_dir_path_len,
+			desc->name, IPA_RESOURCE_NAME_MAX - ipa_dev_dir_path_len);
+
+	device_fd = open(device_full_path, O_RDWR);
+
+	if (device_fd < 0)
+	{
+		IPAERR("unable to open the desc %s in path %s. Error:%d\n",
+			   desc->name, device_full_path, device_fd);
+		ret = -EIO;
+		goto bail;
+	}
+
+#ifndef IPA_ON_R3PC
+	/*
+	 * If/when the number of NAT table entries requested yields a byte
+	 * count that will fit in SRAM, SRAM will be used to hold the NAT
+	 * table. When SRAM is used, some odd things can happen, relative
+	 * to mmap'ing's virtual memory scheme, that require us to make
+	 * some adjustments.
+	 *
+	 * To be more specific, the real physical SRAM location for the
+	 * table and the table's size may not play well with Linux's
+	 * mmap'ing virtual memory scheme....which likes everything to be
+	 * PAGE_SIZE aligned and sized in multiples of PAGE_SIZE.
+	 *
+	 * Given the above, if the NAT table's (in SRAM) physical address
+	 * in not on a PAGE_SIZE boundary, it will be offset into the
+	 * mmap'd virtual memory, hence we need to know that offset in
+	 * order to get to the table.  If said offset plus the table's
+	 * size takes it across a PAGE_SIZE boundary, we need to allocate
+	 * more space to ensure that the table is completely within the
+	 * mmap'd virtual memory.
+	 */
+	desc->mmap_size =
+		( desc->sram_to_be_used )                      ?
+		desc->nat_sram_info.best_nat_in_sram_size_rqst :
+		desc->orig_rqst_size;
+
+	desc->mmap_addr = desc->base_addr =
+		(void* )mmap(
+			NULL,
+			desc->mmap_size,
+			PROT_READ | PROT_WRITE,
+			MAP_SHARED,
+			device_fd,
+			0);
+#else
+	IPADBG("user space r3pc\n");
+	desc->mmap_addr = desc->base_addr =
+		(void *) mmap(
+			(caddr_t)0,
+			IPA_DEVICE_MMAP_MEM_SIZE,
+			PROT_READ | PROT_WRITE,
+			MAP_SHARED,
+			device_fd,
+			0);
+#endif
+
+	if (desc->base_addr == MAP_FAILED)
+	{
+		IPAERR("Unable to mmap the memory for %s\n", desc->name);
+		ret = -EINVAL;
+		goto close;
+	}
+
+	if ( desc->sram_to_be_used )
+	{
+		desc->base_addr =
+			(uint8_t*) (desc->base_addr) +
+			desc->nat_sram_info.nat_table_offset_into_mmap;
+	}
+
+	IPADBG("mmap for %s return value 0x%lx -> 0x%lx\n",
+		   desc->name,
+		   (long unsigned int) desc->mmap_addr,
+		   (long unsigned int) desc->base_addr);
+
+close:
+	if (close(device_fd))
+	{
+		IPAERR("unable to close the file descriptor for %s\n", desc->name);
+		ret = -EINVAL;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int DeallocateMemory(
+	ipa_mem_descriptor* desc,
+	int ipa_fd)
+{
+	struct ipa_ioc_nat_ipv6ct_table_del cmd;
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.table_index = desc->table_index;
+
+	cmd.mem_type =
+		( desc->sram_to_be_used ) ?
+		IPA_NAT_MEM_IN_SRAM       :
+		IPA_NAT_MEM_IN_DDR;
+
+	ret = ioctl(ipa_fd, desc->delete_ioctl_num, &cmd);
+
+	if (ret)
+	{
+		IPAERR("unable to post table delete command for %s Error: %d IPA fd %d\n",
+			   desc->name, ret, ipa_fd);
+		goto bail;
+	}
+
+	IPADBG("posted delete command for %s to kernel successfully\n", desc->name);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+void ipa_mem_descriptor_init(
+	ipa_mem_descriptor* desc,
+	const char* device_name,
+	int size,
+	uint8_t table_index,
+	unsigned long allocate_ioctl_num,
+	unsigned long delete_ioctl_num,
+	bool consider_using_sram )
+{
+	IPADBG("In\n");
+
+	strlcpy(desc->name, device_name, IPA_RESOURCE_NAME_MAX);
+
+	desc->orig_rqst_size      = desc->mmap_size = size;
+	desc->table_index         = table_index;
+	desc->allocate_ioctl_num  = allocate_ioctl_num;
+	desc->delete_ioctl_num    = delete_ioctl_num;
+	desc->consider_using_sram = consider_using_sram;
+
+	IPADBG("Out\n");
+}
+
+int ipa_mem_descriptor_allocate_memory(
+	ipa_mem_descriptor* desc,
+	int ipa_fd)
+{
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = AllocateMemory(desc, ipa_fd);
+
+	if (ret)
+	{
+		IPAERR("unable to allocate %s\n", desc->name);
+		goto bail;
+	}
+
+	ret = MapMemory(desc, ipa_fd);
+
+	if (ret)
+	{
+		IPAERR("unable to map %s\n", desc->name);
+		DeallocateMemory(desc, ipa_fd);
+		goto bail;
+	}
+
+	desc->valid = TRUE;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_mem_descriptor_delete(
+	ipa_mem_descriptor* desc,
+	int ipa_fd)
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if (! desc->valid)
+	{
+		IPAERR("invalid desc handle passed\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	desc->valid = FALSE;
+
+#ifndef IPA_ON_R3PC
+	munmap(desc->mmap_addr, desc->mmap_size);
+#else
+	munmap(desc->mmap_addr, IPA_DEVICE_MMAP_MEM_SIZE);
+#endif
+
+	ret = DeallocateMemory(desc, ipa_fd);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
diff --git a/ipanat/src/ipa_nat_drv.c b/ipanat/src/ipa_nat_drv.c
index d01a6c9..0fbf473 100644
--- a/ipanat/src/ipa_nat_drv.c
+++ b/ipanat/src/ipa_nat_drv.c
@@ -1,38 +1,41 @@
 /*
-Copyright (c) 2013 - 2017, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-    * Redistributions of source code must retain the above copyright
-      notice, this list of conditions and the following disclaimer.
-    * Redistributions in binary form must reproduce the above
-      copyright notice, this list of conditions and the following
-      disclaimer in the documentation and/or other materials provided
-      with the distribution.
-    * Neither the name of The Linux Foundation nor the names of its
-      contributors may be used to endorse or promote products derived
-      from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #include "ipa_nat_drv.h"
 #include "ipa_nat_drvi.h"
 
+#include <errno.h>
+
 /**
  * ipa_nat_add_ipv4_tbl() - create ipv4 nat table
  * @public_ip_addr: [in] public ipv4 address
+ * @mem_type_ptr: [in] type of memory table is to reside in
  * @number_of_entries: [in]  number of nat entries
  * @table_handle: [out] Handle of new ipv4 nat table
  *
@@ -40,27 +43,36 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_add_ipv4_tbl(uint32_t public_ip_addr,
-		uint16_t number_of_entries,
-		uint32_t *tbl_hdl)
+int ipa_nat_add_ipv4_tbl(
+	uint32_t public_ip_addr,
+	const char *mem_type_ptr,
+	uint16_t number_of_entries,
+	uint32_t *tbl_hdl)
 {
-  int ret;
+	int ret;
 
-  if (NULL == tbl_hdl || 0 == number_of_entries) {
-    IPAERR("Invalid parameters \n");
-    return -EINVAL;
-  }
+	if (tbl_hdl == NULL || mem_type_ptr == NULL || number_of_entries == 0) {
+		IPAERR(
+			"Invalid parameters tbl_hdl=%pK mem_type_ptr=%p number_of_entries=%d\n",
+			tbl_hdl,
+			mem_type_ptr,
+			number_of_entries);
+		return -EINVAL;
+	}
 
-  ret = ipa_nati_add_ipv4_tbl(public_ip_addr,
-								number_of_entries,
-								tbl_hdl);
-  if (ret != 0) {
-    IPAERR("unable to add table \n");
-    return -EINVAL;
-  }
-  IPADBG("Returning table handle 0x%x\n", *tbl_hdl);
+	*tbl_hdl = 0;
 
-  return ret;
+	ret = ipa_nati_add_ipv4_tbl(
+		public_ip_addr, mem_type_ptr, number_of_entries, tbl_hdl);
+
+	if (ret) {
+		IPAERR("unable to add NAT table\n");
+		return ret;
+	}
+
+	IPADBG("Returning table handle 0x%x\n", *tbl_hdl);
+
+	return ret;
 } /* __ipa_nat_add_ipv4_tbl() */
 
 /**
@@ -71,16 +83,17 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_del_ipv4_tbl(uint32_t tbl_hdl)
+int ipa_nat_del_ipv4_tbl(
+	uint32_t tbl_hdl)
 {
-  if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-      tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-    IPAERR("invalid table handle passed \n");
-    return -EINVAL;
-  }
-  IPADBG("Passed Table Handle: 0x%x\n", tbl_hdl);
+	if ( ! VALID_TBL_HDL(tbl_hdl) ) {
+		IPAERR("Invalid table handle passed 0x%08X\n", tbl_hdl);
+		return -EINVAL;
+	}
 
-  return ipa_nati_del_ipv4_table(tbl_hdl);
+	IPADBG("Passed Table Handle: 0x%08X\n", tbl_hdl);
+
+	return ipa_nati_del_ipv4_table(tbl_hdl);
 }
 
 /**
@@ -93,28 +106,32 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_add_ipv4_rule(uint32_t tbl_hdl,
-		const ipa_nat_ipv4_rule *clnt_rule,
-		uint32_t *rule_hdl)
+int ipa_nat_add_ipv4_rule(
+	uint32_t tbl_hdl,
+	const ipa_nat_ipv4_rule *clnt_rule,
+	uint32_t *rule_hdl)
 {
-  int result = -EINVAL;
+	int result = -EINVAL;
 
-  if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-      tbl_hdl > IPA_NAT_MAX_IP4_TBLS || NULL == rule_hdl ||
-      NULL == clnt_rule) {
-    IPAERR("invalide table handle passed \n");
-    return result;
-  }
-  IPADBG("Passed Table handle: 0x%x\n", tbl_hdl);
-
-  if (ipa_nati_add_ipv4_rule(tbl_hdl, clnt_rule, rule_hdl) != 0) {
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 rule_hdl == NULL ||
+		 clnt_rule == NULL ) {
+		IPAERR(
+			"Invalid parameters tbl_hdl=%d clnt_rule=%pK rule_hdl=%pK\n",
+			tbl_hdl, clnt_rule, rule_hdl);
 		return result;
 	}
 
-  IPADBG("returning rule handle 0x%x\n", *rule_hdl);
-  return 0;
-}
+	IPADBG("Passed Table handle: 0x%x\n", tbl_hdl);
 
+	if (ipa_nati_add_ipv4_rule(tbl_hdl, clnt_rule, rule_hdl)) {
+		return result;
+	}
+
+	IPADBG("Returning rule handle %u\n", *rule_hdl);
+
+	return 0;
+}
 
 /**
  * ipa_nat_del_ipv4_rule() - to delete ipv4 nat rule
@@ -125,25 +142,31 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_del_ipv4_rule(uint32_t tbl_hdl,
-		uint32_t rule_hdl)
+int ipa_nat_del_ipv4_rule(
+	uint32_t tbl_hdl,
+	uint32_t rule_hdl)
 {
-  int result = -EINVAL;
+	int result = -EINVAL;
 
-  if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-      IPA_NAT_INVALID_NAT_ENTRY == rule_hdl) {
-    IPAERR("invalide parameters\n");
-    return result;
-  }
-  IPADBG("Passed Table: 0x%x and rule handle 0x%x\n", tbl_hdl, rule_hdl);
+	if ( ! VALID_TBL_HDL(tbl_hdl) || ! VALID_RULE_HDL(rule_hdl) )
+	{
+		IPAERR("Invalid parameters tbl_hdl=0x%08X rule_hdl=0x%08X\n",
+			   tbl_hdl, rule_hdl);
+		return result;
+	}
 
-  result = ipa_nati_del_ipv4_rule(tbl_hdl, rule_hdl);
-  if (result) {
-    IPAERR("unable to delete rule from hw \n");
-    return result;
-  }
+	IPADBG("Passed Table: 0x%08X and rule handle 0x%08X\n", tbl_hdl, rule_hdl);
 
-  return 0;
+	result = ipa_nati_del_ipv4_rule(tbl_hdl, rule_hdl);
+	if (result) {
+		IPAERR(
+			"Unable to delete rule with handle 0x%08X "
+			"from hw for NAT table with handle 0x%08X\n",
+			rule_hdl, tbl_hdl);
+		return result;
+	}
+
+	return 0;
 }
 
 /**
@@ -157,22 +180,25 @@
  *
  * Returns:	0  On Success, negative on failure
  */
-int ipa_nat_query_timestamp(uint32_t  tbl_hdl,
-		uint32_t  rule_hdl,
-		uint32_t  *time_stamp)
+int ipa_nat_query_timestamp(
+	uint32_t tbl_hdl,
+	uint32_t rule_hdl,
+	uint32_t *time_stamp)
 {
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 ! VALID_RULE_HDL(rule_hdl) ||
+		 time_stamp == NULL )
+	{
+		IPAERR("Invalid parameters passed tbl_hdl=0x%x rule_hdl=%u time_stamp=%pK\n",
+			   tbl_hdl, rule_hdl, time_stamp);
+		return -EINVAL;
+	}
 
-  if (0 == tbl_hdl || tbl_hdl > IPA_NAT_MAX_IP4_TBLS ||
-      NULL == time_stamp) {
-    IPAERR("invalid parameters passed \n");
-    return -EINVAL;
-  }
-  IPADBG("Passed Table: 0x%x and rule handle 0x%x\n", tbl_hdl, rule_hdl);
+	IPADBG("Passed Table 0x%x and rule handle %u\n", tbl_hdl, rule_hdl);
 
-  return ipa_nati_query_timestamp(tbl_hdl, rule_hdl, time_stamp);
+	return ipa_nati_query_timestamp(tbl_hdl, rule_hdl, time_stamp);
 }
 
-
 /**
 * ipa_nat_modify_pdn() - modify single PDN entry in the PDN config table
 * @table_handle: [in] handle of ipv4 nat table
@@ -183,24 +209,25 @@
 *
 * Returns:	0  On Success, negative on failure
 */
-int ipa_nat_modify_pdn(uint32_t  tbl_hdl,
+int ipa_nat_modify_pdn(
+	uint32_t tbl_hdl,
 	uint8_t pdn_index,
 	ipa_nat_pdn_entry *pdn_info)
 {
 	struct ipa_ioc_nat_pdn_entry pdn_data;
 
-	if (0 == tbl_hdl || tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-		IPAERR("invalid parameters passed \n");
-		return -EINVAL;
-	}
-
-	if (!pdn_info) {
-		IPAERR("pdn_info is NULL \n");
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 pdn_info == NULL) {
+		IPAERR(
+			"invalid parameters passed tbl_hdl=%d pdn_info=%pK\n",
+			tbl_hdl, pdn_info);
 		return -EINVAL;
 	}
 
 	if (pdn_index > IPA_MAX_PDN_NUM) {
-		IPAERR("PDN index is out of range %d", pdn_index);
+		IPAERR(
+			"PDN index %d is out of range maximum %d",
+			pdn_index, IPA_MAX_PDN_NUM);
 		return -EINVAL;
 	}
 
@@ -212,4 +239,110 @@
 	return ipa_nati_modify_pdn(&pdn_data);
 }
 
+/**
+* ipa_nat_get_pdn_index() - get a PDN index for a public ip
+* @public_ip : [in] IPv4 address of the PDN entry
+* @pdn_index : [out] the index of the requested PDN entry
+*
+* Get a PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_get_pdn_index(
+	uint32_t public_ip,
+	uint8_t *pdn_index)
+{
+	if(!pdn_index)
+	{
+		IPAERR("NULL PDN index\n");
+		return -EINVAL;
+	}
 
+	return ipa_nati_get_pdn_index(public_ip, pdn_index);
+}
+
+/**
+* ipa_nat_alloc_pdn() - allocate a PDN for new WAN
+* @pdn_info : [in] values for the PDN entry to be created
+* @pdn_index : [out] the index of the requested PDN entry
+*
+* allocate a new PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_alloc_pdn(
+	ipa_nat_pdn_entry *pdn_info,
+	uint8_t *pdn_index)
+{
+	if(!pdn_info)
+	{
+		IPAERR("NULL PDN info\n");
+		return -EINVAL;
+	}
+
+	if(!pdn_index)
+	{
+		IPAERR("NULL PDN index\n");
+		return -EINVAL;
+	}
+
+	return ipa_nati_alloc_pdn(pdn_info, pdn_index);
+}
+
+/**
+* ipa_nat_get_pdn_count() - get the number of allocated PDNs
+* @pdn_cnt : [out] the number of allocated PDNs
+*
+* get the number of allocated PDNs
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_get_pdn_count(
+	uint8_t *pdn_cnt)
+{
+	if(!pdn_cnt)
+	{
+		IPAERR("NULL PDN count\n");
+		return -EINVAL;
+	}
+
+	*pdn_cnt = ipa_nati_get_pdn_cnt();
+
+	return 0;
+}
+
+/**
+* ipa_nat_dealloc_pdn() - deallocate a PDN entry
+* @pdn_index : [in] pdn index to be deallocated
+*
+* deallocate a PDN in specified index - zero the PDN entry
+*
+* Returns:	0  On Success, negative on failure
+*/
+int ipa_nat_dealloc_pdn(
+	uint8_t pdn_index)
+{
+	if(pdn_index > IPA_MAX_PDN_NUM) {
+		IPAERR("PDN index is out of range %d", pdn_index);
+		return -EINVAL;
+	}
+
+	return ipa_nati_dealloc_pdn(pdn_index);
+}
+
+/**
+ * ipa_nat_vote_clock() - used for voting clock
+ * @vote_type: [in] desired vote type
+ */
+int ipa_nat_vote_clock(
+	enum ipa_app_clock_vote_type vote_type )
+{
+	if ( ! (vote_type >= IPA_APP_CLK_DEVOTE &&
+			vote_type <= IPA_APP_CLK_RESET_VOTE) )
+	{
+		IPAERR("Bad vote_type(%u) parameter\n", vote_type);
+		return -EINVAL;
+	}
+
+	return ipa_nati_vote_clock(vote_type);
+}
diff --git a/ipanat/src/ipa_nat_drvi.c b/ipanat/src/ipa_nat_drvi.c
index 43ceaf5..b8d64f6 100644
--- a/ipanat/src/ipa_nat_drvi.c
+++ b/ipanat/src/ipa_nat_drvi.c
@@ -1,377 +1,164 @@
 /*
-Copyright (c) 2013 - 2019, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-		* Redistributions of source code must retain the above copyright
-			notice, this list of conditions and the following disclaimer.
-		* Redistributions in binary form must reproduce the above
-			copyright notice, this list of conditions and the following
-			disclaimer in the documentation and/or other materials provided
-			with the distribution.
-		* Neither the name of The Linux Foundation nor the names of its
-			contributors may be used to endorse or promote products derived
-			from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
+ * Copyright (c) 2013-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
 
 #include "ipa_nat_drv.h"
 #include "ipa_nat_drvi.h"
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <netinet/in.h>
+#include <errno.h>
+#include <pthread.h>
+#include <unistd.h>
 #include <linux/msm_ipa.h>
 
-#ifdef USE_GLIB
-#include <glib.h>
-#define strlcpy g_strlcpy
-#else
-#ifndef FEATURE_IPA_ANDROID
-static size_t strlcpy(char * dst, const char * src, size_t size)
-{
-	size_t i;
+#define MAX_DMA_ENTRIES_FOR_ADD 4
+#define MAX_DMA_ENTRIES_FOR_DEL 3
 
-	if (size < 1)
-		return 0;
-	for (i = 0; i < (size - 1) && src[i] != '\0'; i++)
-		dst[i] = src[i];
-	for (; i < size; i++)
-		dst[i] = '\0';
-	return strlen(dst);
-}
-#endif
-#endif
+#define IPA_NAT_DEBUG_FILE_PATH "/sys/kernel/debug/ipa/ip4_nat"
+#define IPA_NAT_TABLE_NAME "IPA NAT table"
+#define IPA_NAT_INDEX_TABLE_NAME "IPA NAT index table"
 
-struct ipa_nat_cache ipv4_nat_cache;
-pthread_mutex_t nat_mutex    = PTHREAD_MUTEX_INITIALIZER;
+#undef min
+#define min(a, b) ((a) < (b)) ? (a) : (b)
+
+#undef max
+#define max(a, b) ((a) > (b)) ? (a) : (b)
+
+static struct ipa_nat_cache ipv4_nat_cache[IPA_NAT_MEM_IN_MAX];
+
+static struct ipa_nat_cache *active_nat_cache_ptr = NULL;
+
+#undef DDR_IS_ACTIVE
+#define DDR_IS_ACTIVE() \
+	(active_nat_cache_ptr) ? \
+	(active_nat_cache_ptr->nmi == IPA_NAT_MEM_IN_DDR) : \
+	false
+
+#undef  SRAM_IS_ACTIVE
+#define SRAM_IS_ACTIVE() \
+	(active_nat_cache_ptr) ? \
+	(active_nat_cache_ptr->nmi == IPA_NAT_MEM_IN_SRAM) : \
+	false
+
+extern pthread_mutex_t nat_mutex;
 
 static ipa_nat_pdn_entry pdns[IPA_MAX_PDN_NUM];
+static int num_pdns = 0;
 
-/* ------------------------------------------
-		UTILITY FUNCTIONS START
-	 --------------------------------------------*/
-
-/**
- * UpdateSwSpecParams() - updates sw specific params
- * @rule: [in/out] nat table rule
- * @param_type: [in] which param need to update
- * @value: [in] value of param
- *
- * Update SW specific params in the passed rule.
- *
- * Returns: None
+/*
+ * ----------------------------------------------------------------------------
+ * Private helpers for manipulating regular tables
+ * ----------------------------------------------------------------------------
  */
-void UpdateSwSpecParams(struct ipa_nat_rule *rule,
-															uint8_t param_type,
-															uint32_t value)
+static int table_entry_is_valid(
+	void* entry)
 {
-	uint32_t temp = rule->sw_spec_params;
+	struct ipa_nat_rule* rule = (struct ipa_nat_rule*) entry;
 
-	if (IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE == param_type) {
-		value = (value << INDX_TBL_ENTRY_SIZE_IN_BITS);
-		temp &= 0x0000FFFF;
-	} else {
-		temp &= 0xFFFF0000;
-	}
+	IPADBG("In\n");
 
-	temp = (temp | value);
-	rule->sw_spec_params = temp;
-	return;
+	IPADBG("enable(%u)\n", rule->enable);
+
+	IPADBG("Out\n");
+
+	return rule->enable;
+}
+
+static uint16_t table_entry_get_next_index(
+	void* entry)
+{
+	uint16_t result;
+	struct ipa_nat_rule* rule = (struct ipa_nat_rule*)entry;
+
+	IPADBG("In\n");
+
+	result = rule->next_index;
+
+	IPADBG("Next entry of %pK is %u\n", entry, result);
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static uint16_t table_entry_get_prev_index(
+	void* entry,
+	uint16_t entry_index,
+	void* meta,
+	uint16_t base_table_size)
+{
+	uint16_t result;
+	struct ipa_nat_rule* rule = (struct ipa_nat_rule*)entry;
+
+	UNUSED(entry_index);
+	UNUSED(meta);
+	UNUSED(base_table_size);
+
+	IPADBG("In\n");
+
+	result = rule->prev_index;
+
+	IPADBG("Previous entry of %u is %u\n", entry_index, result);
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static void table_entry_set_prev_index(
+	void*    entry,
+	uint16_t entry_index,
+	uint16_t prev_index,
+	void*    meta,
+	uint16_t base_table_size)
+{
+	struct ipa_nat_rule* rule = (struct ipa_nat_rule*) entry;
+
+	UNUSED(entry_index);
+	UNUSED(meta);
+	UNUSED(base_table_size);
+
+	IPADBG("In\n");
+
+	IPADBG("Previous entry of %u is %u\n", entry_index, prev_index);
+
+	rule->prev_index = prev_index;
+
+	IPADBG("Out\n");
 }
 
 /**
- * Read8BitFieldValue()
- * @rule: [in/out]
- * @param_type: [in]
- * @value: [in]
- *
- *
- *
- * Returns: None
- */
-
-uint8_t Read8BitFieldValue(uint32_t param,
-														ipa_nat_rule_field_type fld_type)
-{
-	void *temp = (void *)&param;
-
-	switch (fld_type) {
-
-	case PROTOCOL_FIELD:
-		return ((time_stamp_proto *)temp)->protocol;
-
-	default:
-		IPAERR("Invalid Field type passed\n");
-		return 0;
-	}
-}
-
-uint16_t Read16BitFieldValue(uint32_t param,
-														 ipa_nat_rule_field_type fld_type)
-{
-	void *temp = (void *)&param;
-
-	switch (fld_type) {
-
-	case NEXT_INDEX_FIELD:
-		return ((next_index_pub_port *)temp)->next_index;
-
-	case PUBLIC_PORT_FILED:
-		return ((next_index_pub_port *)temp)->public_port;
-
-	case ENABLE_FIELD:
-		return ((ipcksum_enbl *)temp)->enable;
-
-	case SW_SPEC_PARAM_PREV_INDEX_FIELD:
-		return ((sw_spec_params *)temp)->prev_index;
-
-	case SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD:
-		return ((sw_spec_params *)temp)->index_table_entry;
-
-	case INDX_TBL_TBL_ENTRY_FIELD:
-		return ((tbl_ent_nxt_indx *)temp)->tbl_entry;
-
-	case INDX_TBL_NEXT_INDEX_FILED:
-		return ((tbl_ent_nxt_indx *)temp)->next_index;
-
-#ifdef NAT_DUMP
-	case IP_CHKSUM_FIELD:
-		return ((ipcksum_enbl *)temp)->ip_chksum;
-#endif
-
-	default:
-		IPAERR("Invalid Field type passed\n");
-		return 0;
-	}
-}
-
-uint32_t Read32BitFieldValue(uint32_t param,
-														 ipa_nat_rule_field_type fld_type)
-{
-
-	void *temp = (void *)&param;
-
-	switch (fld_type) {
-
-	case TIME_STAMP_FIELD:
-		return ((time_stamp_proto *)temp)->time_stamp;
-
-	default:
-		IPAERR("Invalid Field type passed\n");
-		return 0;
-	}
-}
-
-/**
-* GetIPAVer(void) - store IPA HW ver in cache
-*
-*
-* Returns: 0 on success, negative on failure
-*/
-int GetIPAVer(void)
-{
-	int ret;
-
-	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_HW_VERSION, &ipv4_nat_cache.ver);
-	if (ret != 0) {
-		perror("GetIPAVer(): ioctl error value");
-		IPAERR("unable to get IPA version. Error ;%d\n", ret);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		return -EINVAL;
-	}
-	IPADBG("IPA version is %d\n", ipv4_nat_cache.ver);
-	return 0;
-}
-
-/**
- * CreateNatDevice() - Create nat devices
- * @mem: [in] name of device that need to create
- *
- * Create Nat device and Register for file create
- * notification in given directory and wait till
- * receive notification
- *
- * Returns: 0 on success, negative on failure
- */
-int CreateNatDevice(struct ipa_ioc_nat_alloc_mem *mem)
-{
-	int ret;
-
-	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_ALLOC_NAT_MEM, mem);
-	if (ret != 0) {
-		perror("CreateNatDevice(): ioctl error value");
-		IPAERR("unable to post nat mem init. Error ;%d\n", ret);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		return -EINVAL;
-	}
-	IPADBG("posted IPA_IOC_ALLOC_NAT_MEM to kernel successfully\n");
-	return 0;
-}
-
-/**
- * GetNearest2Power() - Returns the nearest power of 2
- * @num: [in] given number
- * @ret: [out] nearest power of 2
- *
- * Returns the nearest power of 2 for a
- * given number
- *
- * Returns: 0 on success, negative on failure
- */
-int GetNearest2Power(uint16_t num, uint16_t *ret)
-{
-	uint16_t number = num;
-	uint16_t tmp = 1;
-	*ret = 0;
-
-	if (0 == num) {
-		return -EINVAL;
-	}
-
-	if (1 == num) {
-		*ret = 2;
-		return 0;
-	}
-
-	for (;;) {
-		if (1 == num) {
-			if (number != tmp) {
-				tmp *= 2;
-			}
-
-			*ret = tmp;
-			return 0;
-		}
-
-		num >>= 1;
-		tmp *= 2;
-	}
-
-	return -EINVAL;
-}
-
-/**
- * GetNearestEven() - Returns the nearest even number
- * @num: [in] given number
- * @ret: [out] nearest even number
- *
- * Returns the nearest even number for a given number
- *
- * Returns: 0 on success, negative on failure
- */
-void GetNearestEven(uint16_t num, uint16_t *ret)
-{
-
-	if (num < 2) {
-		*ret = 2;
-		return;
-	}
-
-	while ((num % 2) != 0) {
-		num = num + 1;
-	}
-
-	*ret = num;
-	return;
-}
-
-/**
- * dst_hash() - Find the index into ipv4 base table
- * @public_ip: [in] public_ip
- * @trgt_ip: [in] Target IP address
- * @trgt_port: [in]  Target port
- * @public_port: [in]  Public port
- * @proto: [in] Protocol (TCP/IP)
- * @size: [in] size of the ipv4 base Table
- *
- * This hash method is used to find the hash index of new nat
- * entry into ipv4 base table. In case of zero index, the
- * new entry will be stored into N-1 index where N is size of
- * ipv4 base table
- *
- * Returns: >0 index into ipv4 base table, negative on failure
- */
-static uint16_t dst_hash(uint32_t public_ip, uint32_t trgt_ip,
-			uint16_t trgt_port, uint16_t public_port,
-			uint8_t proto, uint16_t size)
-{
-	uint16_t hash = ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
-		 (trgt_port) ^ (public_port) ^ (proto);
-
-	if (ipv4_nat_cache.ver >= IPA_HW_v4_0)
-		hash ^= ((uint16_t)(public_ip)) ^
-		((uint16_t)(public_ip >> 16));
-
-	IPADBG("public ip 0x%X\n", public_ip);
-	IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
-	IPADBG("public_port: 0x%x\n", public_port);
-	IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
-
-	hash = (hash & size);
-
-	/* If the hash resulted to zero then set it to maximum value
-		 as zero is unused entry in nat tables */
-	if (0 == hash) {
-		return size;
-	}
-
-	IPADBG("dst_hash returning value: %d\n", hash);
-	return hash;
-}
-
-/**
- * src_hash() - Find the index into ipv4 index base table
- * @priv_ip: [in] Private IP address
- * @priv_port: [in]  Private port
- * @trgt_ip: [in]  Target IP address
- * @trgt_port: [in] Target Port
- * @proto: [in]  Protocol (TCP/IP)
- * @size: [in] size of the ipv4 index base Table
- *
- * This hash method is used to find the hash index of new nat
- * entry into ipv4 index base table. In case of zero index, the
- * new entry will be stored into N-1 index where N is size of
- * ipv4 index base table
- *
- * Returns: >0 index into ipv4 index base table, negative on failure
- */
-static uint16_t src_hash(uint32_t priv_ip, uint16_t priv_port,
-				uint32_t trgt_ip, uint16_t trgt_port,
-				uint8_t proto, uint16_t size)
-{
-	uint16_t hash =  ((uint16_t)(priv_ip)) ^ ((uint16_t)(priv_ip >> 16)) ^
-		 (priv_port) ^
-		 ((uint16_t)(trgt_ip)) ^ ((uint16_t)(trgt_ip >> 16)) ^
-		 (trgt_port) ^ (proto);
-
-	IPADBG("priv_ip: 0x%x priv_port: 0x%x\n", priv_ip, priv_port);
-	IPADBG("trgt_ip: 0x%x trgt_port: 0x%x\n", trgt_ip, trgt_port);
-	IPADBG("proto: 0x%x size: 0x%x\n", proto, size);
-
-	hash = (hash & size);
-
-	/* If the hash resulted to zero then set it to maximum value
-		 as zero is unused entry in nat tables */
-	if (0 == hash) {
-		return size;
-	}
-
-	IPADBG("src_hash returning value: %d\n", hash);
-	return hash;
-}
-
-/**
- * ipa_nati_calc_ip_cksum() - Calculate the source nat
- *														 IP checksum diff
+ * ipa_nati_calc_ip_cksum() - Calculate the source nat IP checksum diff
  * @pub_ip_addr: [in] public ip address
  * @priv_ip_addr: [in]	Private ip address
  *
@@ -383,12 +170,15 @@
  *
  * Returns: >0 ip checksum diff
  */
-static uint16_t ipa_nati_calc_ip_cksum(uint32_t pub_ip_addr,
-										uint32_t priv_ip_addr)
+static uint16_t ipa_nati_calc_ip_cksum(
+	uint32_t pub_ip_addr,
+	uint32_t priv_ip_addr)
 {
 	uint16_t ret;
 	uint32_t cksum = 0;
 
+	IPADBG("In\n");
+
 	/* Add LSB(2 bytes) of public ip address to cksum */
 	cksum += (pub_ip_addr & 0xFFFF);
 
@@ -424,12 +214,14 @@
 
 	/* Return the LSB(2 bytes) of checksum	*/
 	ret = (uint16_t)cksum;
+
+	IPADBG("Out\n");
+
 	return ret;
 }
 
 /**
- * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat
- *																TCP/UDP checksum diff
+ * ipa_nati_calc_tcp_udp_cksum() - Calculate the source nat TCP/UDP checksum diff
  * @pub_ip_addr: [in] public ip address
  * @pub_port: [in] public tcp/udp port
  * @priv_ip_addr: [in]	Private ip address
@@ -443,14 +235,17 @@
  *
  * Returns: >0 tcp/udp checksum diff
  */
-static uint16_t ipa_nati_calc_tcp_udp_cksum(uint32_t pub_ip_addr,
-										uint16_t pub_port,
-										uint32_t priv_ip_addr,
-										uint16_t priv_port)
+static uint16_t ipa_nati_calc_tcp_udp_cksum(
+	uint32_t pub_ip_addr,
+	uint16_t pub_port,
+	uint32_t priv_ip_addr,
+	uint16_t priv_port)
 {
 	uint16_t ret = 0;
 	uint32_t cksum = 0;
 
+	IPADBG("In\n");
+
 	/* Add LSB(2 bytes) of public ip address to cksum */
 	cksum += (pub_ip_addr & 0xFFFF);
 
@@ -505,1953 +300,2390 @@
 
 	/* return the LSB(2 bytes) of checksum */
 	ret = (uint16_t)cksum;
+
+	IPADBG("Out\n");
+
 	return ret;
 }
 
-/**
- * ipa_nati_make_rule_hdl() - makes nat rule handle
- * @tbl_hdl: [in] nat table handle
- * @tbl_entry: [in]  nat table entry
- *
- * Calculate the nat rule handle which from
- * nat entry which will be returned to client of
- * nat driver
- *
- * Returns: >0 nat rule handle
- */
-uint16_t ipa_nati_make_rule_hdl(uint16_t tbl_hdl,
-				uint16_t tbl_entry)
+static int table_entry_copy_from_user(
+	void* entry,
+	void* user_data)
 {
-	struct ipa_nat_ip4_table_cache *tbl_ptr;
-	uint16_t rule_hdl = 0;
-	uint16_t cnt = 0;
+	uint32_t pub_ip_addr;
 
-	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
+	struct ipa_nat_rule*     nat_entry = (struct ipa_nat_rule*) entry;
+	const ipa_nat_ipv4_rule* user_rule = (const ipa_nat_ipv4_rule*) user_data;
 
-	if (tbl_entry >= tbl_ptr->table_entries) {
-		/* Increase the current expansion table count */
-		tbl_ptr->cur_expn_tbl_cnt++;
+	IPADBG("In\n");
 
-		/* Update the index into table */
-		rule_hdl = tbl_entry - tbl_ptr->table_entries;
-		rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
-		/* Update the table type mask */
-		rule_hdl = (rule_hdl | IPA_NAT_RULE_HDL_TBL_TYPE_MASK);
-	} else {
-		/* Increase the current count */
-		tbl_ptr->cur_tbl_cnt++;
+	pub_ip_addr = pdns[user_rule->pdn_index].public_ip;
 
-		rule_hdl = tbl_entry;
-		rule_hdl = (rule_hdl << IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
+	nat_entry->private_ip   = user_rule->private_ip;
+	nat_entry->private_port = user_rule->private_port;
+	nat_entry->protocol     = user_rule->protocol;
+	nat_entry->public_port  = user_rule->public_port;
+	nat_entry->target_ip    = user_rule->target_ip;
+	nat_entry->target_port  = user_rule->target_port;
+	nat_entry->pdn_index    = user_rule->pdn_index;
+
+	nat_entry->ip_chksum =
+		ipa_nati_calc_ip_cksum(pub_ip_addr, user_rule->private_ip);
+
+	if (IPPROTO_TCP == nat_entry->protocol ||
+		IPPROTO_UDP == nat_entry->protocol) {
+		nat_entry->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum(
+			pub_ip_addr,
+			user_rule->public_port,
+			user_rule->private_ip,
+			user_rule->private_port);
 	}
 
-	for (; cnt < (tbl_ptr->table_entries + tbl_ptr->expn_table_entries); cnt++) {
-		if (IPA_NAT_INVALID_NAT_ENTRY == tbl_ptr->rule_id_array[cnt]) {
-			tbl_ptr->rule_id_array[cnt] = rule_hdl;
-			return cnt + 1;
-		}
-	}
+	IPADBG("Out\n");
 
 	return 0;
 }
 
-/**
- * ipa_nati_parse_ipv4_rule_hdl() - prase rule handle
- * @tbl_hdl:	[in] nat table rule
- * @rule_hdl: [in] nat rule handle
- * @expn_tbl: [out] expansion table or not
- * @tbl_entry: [out] index into table
- *
- * Parse the rule handle to retrieve the nat table
- * type and entry of nat table
- *
- * Returns: None
+static int table_entry_head_insert(
+	void*      entry,
+	void*      user_data,
+	uint16_t*  dma_command_data)
+{
+	int  ret;
+
+	IPADBG("In\n");
+
+	IPADBG("entry(%p) user_data(%p) dma_command_data(%p)\n",
+		   entry,
+		   user_data,
+		   dma_command_data);
+
+	ret = table_entry_copy_from_user(entry, user_data);
+
+	if (ret) {
+		IPAERR("unable to copy from user a new entry\n");
+		goto bail;
+	}
+
+	*dma_command_data = 0;
+
+	((ipa_nat_flags*)dma_command_data)->enable = IPA_NAT_FLAG_ENABLE_BIT;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int table_entry_tail_insert(
+	void* entry,
+	void* user_data)
+{
+	struct ipa_nat_rule* nat_entry = (struct ipa_nat_rule*) entry;
+
+	int  ret;
+
+	IPADBG("In\n");
+
+	IPADBG("entry(%p) user_data(%p)\n",
+		   entry,
+		   user_data);
+
+	ret = table_entry_copy_from_user(entry, user_data);
+
+	if (ret) {
+		IPAERR("unable to copy from user a new entry\n");
+		goto bail;
+	}
+
+	nat_entry->enable = IPA_NAT_FLAG_ENABLE_BIT;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static uint16_t table_entry_get_delete_head_dma_command_data(
+	void* head,
+	void* next_entry)
+{
+	UNUSED(head);
+	UNUSED(next_entry);
+
+	IPADBG("In\n");
+
+	IPADBG("Out\n");
+
+	return IPA_NAT_INVALID_PROTO_FIELD_VALUE;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Private helpers for manipulating index tables
+ * ----------------------------------------------------------------------------
  */
-void ipa_nati_parse_ipv4_rule_hdl(uint8_t tbl_index,
-				uint16_t rule_hdl, uint8_t *expn_tbl,
-				uint16_t *tbl_entry)
+static int index_table_entry_is_valid(
+	void* entry)
 {
-	struct ipa_nat_ip4_table_cache *tbl_ptr;
-	uint16_t rule_id;
+	struct ipa_nat_indx_tbl_rule* rule =
+		(struct ipa_nat_indx_tbl_rule*) entry;
 
-	*expn_tbl = 0;
-	*tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_index];
-
-	if (rule_hdl >= (tbl_ptr->table_entries + tbl_ptr->expn_table_entries)) {
-		IPAERR("invalid rule handle\n");
-		return;
-	}
-
-	rule_id = tbl_ptr->rule_id_array[rule_hdl-1];
-
-	/* Retrieve the table type */
-	*expn_tbl = 0;
-	if (rule_id & IPA_NAT_RULE_HDL_TBL_TYPE_MASK) {
-		*expn_tbl = 1;
-	}
-
-	/* Retrieve the table entry */
-	*tbl_entry = (rule_id >> IPA_NAT_RULE_HDL_TBL_TYPE_BITS);
-	return;
-}
-
-uint32_t ipa_nati_get_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
-						nat_table_type tbl_type,
-						uint16_t	tbl_entry)
-{
-	struct ipa_nat_rule *tbl_ptr;
-	uint32_t ret = 0;
-
-	if (IPA_NAT_EXPN_TBL == tbl_type) {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
-	} else {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
-	}
-
-	ret = (char *)&tbl_ptr[tbl_entry] - (char *)tbl_ptr;
-	ret += cache_ptr->tbl_addr_offset;
-	return ret;
-}
-
-uint32_t ipa_nati_get_index_entry_offset(struct ipa_nat_ip4_table_cache *cache_ptr,
-								nat_table_type tbl_type,
-								uint16_t indx_tbl_entry)
-{
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	uint32_t ret = 0;
-
-	if (IPA_NAT_INDEX_EXPN_TBL == tbl_type) {
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
-	} else {
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
-	}
-
-	ret = (char *)&indx_tbl_ptr[indx_tbl_entry] - (char *)indx_tbl_ptr;
-	ret += cache_ptr->tbl_addr_offset;
-	return ret;
-}
-
-/* ------------------------------------------
-		UTILITY FUNCTIONS END
---------------------------------------------*/
-
-/* ------------------------------------------
-	 Main Functions
---------------------------------------------**/
-void ipa_nati_reset_tbl(uint8_t tbl_indx)
-{
-	uint16_t table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
-	uint16_t expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_indx].expn_table_entries;
-
-	/* Base table */
-	IPADBG("memset() base table to 0, %p\n",
-				 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr);
-
-	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr,
-				 0,
-				 IPA_NAT_TABLE_ENTRY_SIZE * table_entries);
-
-	/* Base expansino table */
-	IPADBG("memset() expn base table to 0, %p\n",
-				 ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr);
-
-	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr,
-				 0,
-				 IPA_NAT_TABLE_ENTRY_SIZE * expn_table_entries);
-
-	/* Index table */
-	IPADBG("memset() index table to 0, %p\n",
-				 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr);
-
-	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_addr,
-				 0,
-				 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * table_entries);
-
-	/* Index expansion table */
-	IPADBG("memset() index expn table to 0, %p\n",
-				 ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr);
-
-	memset(ipv4_nat_cache.ip4_tbl[tbl_indx].index_table_expn_addr,
-				 0,
-				 IPA_NAT_INDEX_TABLE_ENTRY_SIZE * expn_table_entries);
-
-	IPADBG("returning from ipa_nati_reset_tbl()\n");
-	return;
-}
-
-int ipa_nati_add_ipv4_tbl(uint32_t public_ip_addr,
-				uint16_t number_of_entries,
-				uint32_t *tbl_hdl)
-{
-	struct ipa_ioc_nat_alloc_mem mem;
-	uint8_t tbl_indx = ipv4_nat_cache.table_cnt;
-	uint16_t table_entries, expn_table_entries;
 	int ret;
 
-	*tbl_hdl = 0;
-	/* Allocate table */
-	memset(&mem, 0, sizeof(mem));
-	ret = ipa_nati_alloc_table(number_of_entries,
-														 &mem,
-														 &table_entries,
-														 &expn_table_entries);
-	if (0 != ret) {
-		IPAERR("unable to allocate nat table\n");
-		return -ENOMEM;
-	}
+	IPADBG("In\n");
 
-	/* Update the cache
-		 The (IPA_NAT_UNUSED_BASE_ENTRIES/2) indicates zero entry entries
-		 for both base and expansion table
-	*/
-	ret = ipa_nati_update_cache(&mem,
-															public_ip_addr,
-															table_entries,
-															expn_table_entries);
-	if (0 != ret) {
-		IPAERR("unable to update cache Error: %d\n", ret);
-		return -EINVAL;
-	}
+	ret = (rule->tbl_entry) ? 1 : 0;
 
-	/* Reset the nat table before posting init cmd */
-	ipa_nati_reset_tbl(tbl_indx);
+	IPADBG("enable(%d)\n", ret);
 
-	/* Initialize the ipa hw with nat table dimensions */
-	ret = ipa_nati_post_ipv4_init_cmd(tbl_indx);
-	if (0 != ret) {
-		IPAERR("unable to post nat_init command Error %d\n", ret);
-		return -EINVAL;
-	}
+	IPADBG("Out\n");
 
-	/* store the initial public ip address in the cached pdn table
-		this is backward compatible for pre IPAv4 versions, we will always
-		use this ip as the single PDN address
-	*/
-	pdns[0].public_ip = public_ip_addr;
-
-	/* Return table handle */
-	ipv4_nat_cache.table_cnt++;
-	*tbl_hdl = ipv4_nat_cache.table_cnt;
-
-#ifdef NAT_DUMP
-	ipa_nat_dump_ipv4_table(*tbl_hdl);
-#endif
-	return 0;
-}
-
-int ipa_nati_alloc_table(uint16_t number_of_entries,
-				struct ipa_ioc_nat_alloc_mem *mem,
-				uint16_t *table_entries,
-				uint16_t *expn_table_entries)
-{
-	int fd = 0, ret;
-	uint16_t total_entries;
-
-	/* Copy the table name */
-	strlcpy(mem->dev_name, NAT_DEV_NAME, IPA_RESOURCE_NAME_MAX);
-
-	/* Calculate the size for base table and expansion table */
-	*table_entries = (uint16_t)(number_of_entries * IPA_NAT_BASE_TABLE_PERCENTAGE);
-	if (*table_entries == 0) {
-		*table_entries = 1;
-	}
-	if (GetNearest2Power(*table_entries, table_entries)) {
-		IPAERR("unable to calculate power of 2\n");
-		return -EINVAL;
-	}
-
-	*expn_table_entries = (uint16_t)(number_of_entries * IPA_NAT_EXPANSION_TABLE_PERCENTAGE);
-	GetNearestEven(*expn_table_entries, expn_table_entries);
-
-	total_entries = (*table_entries)+(*expn_table_entries);
-
-	/* Calclate the memory size for both table and index table entries */
-	mem->size = (IPA_NAT_TABLE_ENTRY_SIZE * total_entries);
-	IPADBG("Nat Table size: %zu\n", mem->size);
-	mem->size += (IPA_NAT_INDEX_TABLE_ENTRY_SIZE * total_entries);
-	IPADBG("Nat Base and Index Table size: %zu\n", mem->size);
-
-	if (!ipv4_nat_cache.ipa_fd) {
-		fd = open(IPA_DEV_NAME, O_RDONLY);
-		if (fd < 0) {
-			perror("ipa_nati_alloc_table(): open error value:");
-			IPAERR("unable to open ipa device\n");
-			return -EIO;
-		}
-		ipv4_nat_cache.ipa_fd = fd;
-	}
-
-	if (GetIPAVer()) {
-		IPAERR("unable to get ipa ver\n");
-		return -EIO;
-	}
-
-	ret = CreateNatDevice(mem);
 	return ret;
 }
 
-
-int ipa_nati_update_cache(struct ipa_ioc_nat_alloc_mem *mem,
-				uint32_t public_addr,
-				uint16_t tbl_entries,
-				uint16_t expn_tbl_entries)
+static uint16_t index_table_entry_get_next_index(
+	void* entry)
 {
-	uint32_t index = ipv4_nat_cache.table_cnt;
-	char *ipv4_rules_addr = NULL;
+	uint16_t result;
+	struct ipa_nat_indx_tbl_rule* rule = (struct ipa_nat_indx_tbl_rule*)entry;
 
-	int fd = 0;
-	int flags = MAP_SHARED;
-	int prot = PROT_READ | PROT_WRITE;
-	off_t offset = 0;
+	IPADBG("In\n");
+
+	result = rule->next_index;
+
+	IPADBG("Next entry of %pK is %d\n", entry, result);
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static uint16_t index_table_entry_get_prev_index(
+	void* entry,
+	uint16_t entry_index,
+	void* meta,
+	uint16_t base_table_size)
+{
+	uint16_t result = 0;
+	struct ipa_nat_indx_tbl_meta_info* index_expn_table_meta =
+		(struct ipa_nat_indx_tbl_meta_info*)meta;
+
+	UNUSED(entry);
+
+	IPADBG("In\n");
+
+	if (entry_index >= base_table_size)
+		result = index_expn_table_meta[entry_index - base_table_size].prev_index;
+
+	IPADBG("Previous entry of %d is %d\n", entry_index, result);
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+static void index_table_entry_set_prev_index(
+	void*    entry,
+	uint16_t entry_index,
+	uint16_t prev_index,
+	void*    meta,
+	uint16_t base_table_size)
+{
+	struct ipa_nat_indx_tbl_meta_info* index_expn_table_meta =
+		(struct ipa_nat_indx_tbl_meta_info*) meta;
+
+	UNUSED(entry);
+
+	IPADBG("In\n");
+
+	IPADBG("Previous entry of %u is %u\n", entry_index, prev_index);
+
+	if ( entry_index >= base_table_size )
+	{
+		index_expn_table_meta[entry_index - base_table_size].prev_index = prev_index;
+	}
+	else if ( VALID_INDEX(prev_index) )
+	{
+		IPAERR("Base table entry %u can't has prev entry %u, but only %u",
+			   entry_index, prev_index, IPA_TABLE_INVALID_ENTRY);
+	}
+
+	IPADBG("Out\n");
+}
+
+static int index_table_entry_head_insert(
+	void*      entry,
+	void*      user_data,
+	uint16_t*  dma_command_data)
+{
+	IPADBG("In\n");
+
+	UNUSED(entry);
+
+	IPADBG("entry(%p) user_data(%p) dma_command_data(%p)\n",
+		   entry,
+		   user_data,
+		   dma_command_data);
+
+	*dma_command_data = *((uint16_t*)user_data);
+
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+static int index_table_entry_tail_insert(
+	void* entry,
+	void* user_data)
+{
+	struct ipa_nat_indx_tbl_rule* rule_ptr =
+		(struct ipa_nat_indx_tbl_rule*) entry;
+
+	IPADBG("In\n");
+
+	IPADBG("entry(%p) user_data(%p)\n",
+		   entry,
+		   user_data);
+
+	rule_ptr->tbl_entry = *((uint16_t*)user_data);
+
+	IPADBG("Out\n");
+
+	return 0;
+}
+
+static uint16_t index_table_entry_get_delete_head_dma_command_data(
+	void* head,
+	void* next_entry)
+{
+	uint16_t result;
+	struct ipa_nat_indx_tbl_rule* rule =
+		(struct ipa_nat_indx_tbl_rule*)next_entry;
+
+	UNUSED(head);
+
+	IPADBG("In\n");
+
+	result = rule->tbl_entry;
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Private data and functions used by this file's API
+ * ----------------------------------------------------------------------------
+ */
+static ipa_table_entry_interface entry_interface = {
+	table_entry_is_valid,
+	table_entry_get_next_index,
+	table_entry_get_prev_index,
+	table_entry_set_prev_index,
+	table_entry_head_insert,
+	table_entry_tail_insert,
+	table_entry_get_delete_head_dma_command_data
+};
+
+static ipa_table_entry_interface index_entry_interface = {
+	index_table_entry_is_valid,
+	index_table_entry_get_next_index,
+	index_table_entry_get_prev_index,
+	index_table_entry_set_prev_index,
+	index_table_entry_head_insert,
+	index_table_entry_tail_insert,
+	index_table_entry_get_delete_head_dma_command_data
+};
+
+/**
+ * ipa_nati_create_table_dma_cmd_helpers()
+ *
+ *   Creates dma_cmd_helpers for base and index tables in the received
+ *   NAT table
+ *
+ * @nat_table: [in] NAT table
+ * @table_indx: [in] The index of the NAT table
+ *
+ * A DMA command helper helps to generate the DMA command for one
+ * specific field change. Each table has 3 different types of field
+ * change: update_head, update_entry and delete_head. This function
+ * creates the helpers for base and index tables and updates the
+ * tables correspondingly.
+ */
+static void ipa_nati_create_table_dma_cmd_helpers(
+	struct ipa_nat_ip4_table_cache* nat_table,
+	uint8_t table_indx)
+{
+	IPADBG("In\n");
+
+	/*
+	 * Create helpers for base table
+	 */
+	ipa_table_dma_cmd_helper_init(
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_FLAGS],
+		table_indx,
+		IPA_NAT_BASE_TBL,
+		IPA_NAT_EXPN_TBL,
+		nat_table->mem_desc.addr_offset + IPA_NAT_RULE_FLAG_FIELD_OFFSET);
+
+	ipa_table_dma_cmd_helper_init(
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_NEXT_INDEX],
+		table_indx,
+		IPA_NAT_BASE_TBL,
+		IPA_NAT_EXPN_TBL,
+		nat_table->mem_desc.addr_offset + IPA_NAT_RULE_NEXT_FIELD_OFFSET);
+
+	ipa_table_dma_cmd_helper_init(
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_PROTOCOL],
+		table_indx,
+		IPA_NAT_BASE_TBL,
+		IPA_NAT_EXPN_TBL,
+		nat_table->mem_desc.addr_offset + IPA_NAT_RULE_PROTO_FIELD_OFFSET);
+
+	/*
+	 * Create helpers for index table
+	 */
+	ipa_table_dma_cmd_helper_init(
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY],
+		table_indx,
+		IPA_NAT_INDX_TBL,
+		IPA_NAT_INDEX_EXPN_TBL,
+		nat_table->mem_desc.addr_offset + IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET);
+
+	ipa_table_dma_cmd_helper_init(
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_NEXT_INDEX],
+		table_indx,
+		IPA_NAT_INDX_TBL,
+		IPA_NAT_INDEX_EXPN_TBL,
+		nat_table->mem_desc.addr_offset + IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET);
+
+	/*
+	 * Init helpers for base table
+	 */
+	nat_table->table.dma_help[HELP_UPDATE_HEAD] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_FLAGS];
+
+	nat_table->table.dma_help[HELP_UPDATE_ENTRY] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_NEXT_INDEX];
+
+	nat_table->table.dma_help[HELP_DELETE_HEAD] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_TABLE_PROTOCOL];
+
+	/*
+	 * Init helpers for index table
+	 */
+	nat_table->index_table.dma_help[HELP_UPDATE_HEAD] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY];
+
+	nat_table->index_table.dma_help[HELP_UPDATE_ENTRY] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_NEXT_INDEX];
+
+	nat_table->index_table.dma_help[HELP_DELETE_HEAD] =
+		&nat_table->table_dma_cmd_helpers[IPA_NAT_INDEX_TABLE_ENTRY];
+
+	IPADBG("Out\n");
+}
+
+/**
+ * ipa_nati_create_table() - Creates a new IPv4 NAT table
+ * @nat_table: [in] IPv4 NAT table
+ * @public_ip_addr: [in] public IPv4 address
+ * @number_of_entries: [in] number of NAT entries
+ * @table_index: [in] the index of the IPv4 NAT table
+ *
+ * This function creates new IPv4 NAT table:
+ * - Initializes table, index table, memory descriptor and
+ *   table_dma_cmd_helpers structures
+ * - Allocates the index expansion table meta data
+ * - Allocates, maps and clears the memory for table and index table
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+static int ipa_nati_create_table(
+	struct ipa_nat_cache*           nat_cache_ptr,
+	struct ipa_nat_ip4_table_cache* nat_table,
+	uint32_t                        public_ip_addr,
+	uint16_t                        number_of_entries,
+	uint8_t                         table_index)
+{
+	int ret, size;
+	void* base_addr;
+
 #ifdef IPA_ON_R3PC
-	int ret = 0;
 	uint32_t nat_mem_offset = 0;
 #endif
 
-	ipv4_nat_cache.ip4_tbl[index].valid = IPA_NAT_TABLE_VALID;
-	ipv4_nat_cache.ip4_tbl[index].public_addr = public_addr;
-	ipv4_nat_cache.ip4_tbl[index].size = mem->size;
-	ipv4_nat_cache.ip4_tbl[index].tbl_addr_offset = mem->offset;
+	IPADBG("In\n");
 
-	ipv4_nat_cache.ip4_tbl[index].table_entries = tbl_entries;
-	ipv4_nat_cache.ip4_tbl[index].expn_table_entries = expn_tbl_entries;
+	nat_table->public_addr = public_ip_addr;
 
-	IPADBG("num of ipv4 rules:%d\n", tbl_entries);
-	IPADBG("num of ipv4 expn rules:%d\n", expn_tbl_entries);
+	ipa_table_init(
+		&nat_table->table,
+		IPA_NAT_TABLE_NAME,
+		nat_cache_ptr->nmi,
+		sizeof(struct ipa_nat_rule),
+		NULL,
+		0,
+		&entry_interface);
 
-	/* allocate memory for nat index expansion table */
-	if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
-		ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta =
-			 malloc(sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
+	ret = ipa_table_calculate_entries_num(
+		&nat_table->table,
+		number_of_entries,
+		nat_cache_ptr->nmi);
 
-		if (NULL == ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta) {
-			IPAERR("Fail to allocate ipv4 index expansion table meta\n");
-			return 0;
-		}
-
-		memset(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta,
-					 0,
-					 sizeof(struct ipa_nat_indx_tbl_meta_info) * expn_tbl_entries);
+	if (ret) {
+		IPAERR(
+			"unable to calculate number of entries in "
+			"nat table %d, while required by user %d\n",
+			table_index, number_of_entries);
+		goto done;
 	}
 
-	/* Allocate memory for rule_id_array */
-	if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
-		ipv4_nat_cache.ip4_tbl[index].rule_id_array =
-			 malloc(sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
+	/*
+	 * Allocate memory for NAT index expansion table meta data
+	 */
+	nat_table->index_expn_table_meta = (struct ipa_nat_indx_tbl_meta_info*)
+		calloc(nat_table->table.expn_table_entries,
+			   sizeof(struct ipa_nat_indx_tbl_meta_info));
 
-		if (NULL == ipv4_nat_cache.ip4_tbl[index].rule_id_array) {
-			IPAERR("Fail to allocate rule id array\n");
-			return 0;
-		}
-
-		memset(ipv4_nat_cache.ip4_tbl[index].rule_id_array,
-					 0,
-					 sizeof(uint16_t) * (tbl_entries + expn_tbl_entries));
+	if (nat_table->index_expn_table_meta == NULL) {
+		size = nat_table->table.expn_table_entries *
+			sizeof(struct ipa_nat_indx_tbl_meta_info);
+		IPAERR(
+			"Fail to allocate ipv4 index expansion table meta with size %d\n",
+			size);
+		ret = -ENOMEM;
+		goto done;
 	}
 
+	ipa_table_init(
+		&nat_table->index_table,
+		IPA_NAT_INDEX_TABLE_NAME,
+		nat_cache_ptr->nmi,
+		sizeof(struct ipa_nat_indx_tbl_rule),
+		nat_table->index_expn_table_meta,
+		sizeof(struct ipa_nat_indx_tbl_meta_info),
+		&index_entry_interface);
 
-	/* open the nat table */
-	strlcpy(mem->dev_name, NAT_DEV_FULL_NAME, IPA_RESOURCE_NAME_MAX);
-	fd = open(mem->dev_name, O_RDWR);
-	if (fd < 0) {
-		perror("ipa_nati_update_cache(): open error value:");
-		IPAERR("unable to open nat device. Error:%d\n", fd);
-		return -EIO;
+	nat_table->index_table.table_entries =
+		nat_table->table.table_entries;
+
+	nat_table->index_table.expn_table_entries =
+		nat_table->table.expn_table_entries;
+
+	nat_table->index_table.tot_tbl_ents =
+		nat_table->table.tot_tbl_ents;
+
+	size  = ipa_table_calculate_size(&nat_table->table);
+	size += ipa_table_calculate_size(&nat_table->index_table);
+
+	IPADBG("Nat Base and Index Table size: %d\n", size);
+
+	ipa_mem_descriptor_init(
+		&nat_table->mem_desc,
+		IPA_NAT_DEV_NAME,
+		size,
+		table_index,
+		IPA_IOC_ALLOC_NAT_TABLE,
+		IPA_IOC_DEL_NAT_TABLE,
+		true);  /* true here means do consider using sram */
+
+	ret = ipa_mem_descriptor_allocate_memory(
+		&nat_table->mem_desc,
+		nat_cache_ptr->ipa_desc->fd);
+
+	if (ret) {
+		IPAERR("unable to allocate nat memory descriptor Error: %d\n", ret);
+		goto bail_meta;
 	}
 
-	/* copy the nat table name */
-	strlcpy(ipv4_nat_cache.ip4_tbl[index].table_name,
-					mem->dev_name,
-					IPA_RESOURCE_NAME_MAX);
-	ipv4_nat_cache.ip4_tbl[index].nat_fd = fd;
-
-	/* open the nat device Table */
-#ifndef IPA_ON_R3PC
-	ipv4_rules_addr = (void *)mmap(NULL, mem->size,
-																 prot, flags,
-																 fd, offset);
-#else
-	IPADBG("user space r3pc\n");
-	ipv4_rules_addr = (void *)mmap((caddr_t)0, NAT_MMAP_MEM_SIZE,
-																 prot, flags,
-																 fd, offset);
-#endif
-	if (MAP_FAILED  == ipv4_rules_addr) {
-		perror("unable to mmap the memory\n");
-		return -EINVAL;
-	}
+	base_addr = nat_table->mem_desc.base_addr;
 
 #ifdef IPA_ON_R3PC
-	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_GET_NAT_OFFSET, &nat_mem_offset);
-	if (ret != 0) {
-		perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
-		IPAERR("unable to post ant offset cmd Error: %d\n", ret);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		return -EIO;
+	ret = ioctl(nat_cache_ptr->ipa_desc->fd,
+				IPA_IOC_GET_NAT_OFFSET,
+				&nat_mem_offset);
+	if (ret) {
+		IPAERR("unable to post ant offset cmd Error: %d IPA fd %d\n",
+			   ret, nat_cache_ptr->ipa_desc->fd);
+		goto bail_mem_desc;
 	}
-	ipv4_rules_addr += nat_mem_offset;
-	ipv4_nat_cache.ip4_tbl[index].mmap_offset = nat_mem_offset;
+	base_addr += nat_mem_offset;
 #endif
 
-	IPADBG("mmap return value 0x%lx\n", (long unsigned int)ipv4_rules_addr);
+	base_addr =
+		ipa_table_calculate_addresses(&nat_table->table, base_addr);
+	ipa_table_calculate_addresses(&nat_table->index_table, base_addr);
 
-	ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr = ipv4_rules_addr;
+	ipa_table_reset(&nat_table->table);
+	ipa_table_reset(&nat_table->index_table);
 
-	ipv4_nat_cache.ip4_tbl[index].ipv4_expn_rules_addr =
-	ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * tbl_entries);
+	ipa_nati_create_table_dma_cmd_helpers(nat_table, table_index);
 
-	ipv4_nat_cache.ip4_tbl[index].index_table_addr =
-	ipv4_rules_addr + (IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries));
+	goto done;
 
-	ipv4_nat_cache.ip4_tbl[index].index_table_expn_addr =
-	ipv4_rules_addr +
-	(IPA_NAT_TABLE_ENTRY_SIZE * (tbl_entries + expn_tbl_entries))+
-	(IPA_NAT_INDEX_TABLE_ENTRY_SIZE * tbl_entries);
+#ifdef IPA_ON_R3PC
+bail_mem_desc:
+	ipa_mem_descriptor_delete(&nat_table->mem_desc, nat_cache_ptr->ipa_desc->fd);
+#endif
 
-	return 0;
+bail_meta:
+	free(nat_table->index_expn_table_meta);
+	memset(nat_table, 0, sizeof(*nat_table));
+
+done:
+	IPADBG("Out\n");
+
+	return ret;
 }
 
-/* comment: check the implementation once
-	 offset should be in terms of byes */
-int ipa_nati_post_ipv4_init_cmd(uint8_t tbl_index)
+static int ipa_nati_destroy_table(
+	struct ipa_nat_cache*           nat_cache_ptr,
+	struct ipa_nat_ip4_table_cache* nat_table)
+{
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_mem_descriptor_delete(
+		&nat_table->mem_desc, nat_cache_ptr->ipa_desc->fd);
+
+	if (ret)
+		IPAERR("unable to delete NAT descriptor\n");
+
+	free(nat_table->index_expn_table_meta);
+
+	memset(nat_table, 0, sizeof(*nat_table));
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int ipa_nati_post_ipv4_init_cmd(
+	struct ipa_nat_cache*           nat_cache_ptr,
+	struct ipa_nat_ip4_table_cache* nat_table,
+	uint8_t                         tbl_index,
+	bool                            focus_change )
 {
 	struct ipa_ioc_v4_nat_init cmd;
-	uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_index].tbl_addr_offset;
-	int ret;
 
-	cmd.tbl_index = tbl_index;
+	char buf[1024];
+	int  ret;
 
-	cmd.ipv4_rules_offset = offset;
-	cmd.expn_rules_offset = cmd.ipv4_rules_offset +
-	(ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
+	IPADBG("In\n");
 
-	cmd.index_offset = cmd.expn_rules_offset +
-	(ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries * IPA_NAT_TABLE_ENTRY_SIZE);
+	IPADBG("nat_cache_ptr(%p) nat_table(%p) tbl_index(%u) focus_change(%u)\n",
+		   nat_cache_ptr, nat_table, tbl_index, focus_change);
 
-	cmd.index_expn_offset = cmd.index_offset +
-	(ipv4_nat_cache.ip4_tbl[tbl_index].table_entries * IPA_NAT_INDEX_TABLE_ENTRY_SIZE);
+	memset(&cmd, 0, sizeof(cmd));
 
-	cmd.table_entries  = ipv4_nat_cache.ip4_tbl[tbl_index].table_entries - 1;
-	cmd.expn_table_entries = ipv4_nat_cache.ip4_tbl[tbl_index].expn_table_entries;
+	cmd.tbl_index    = tbl_index;
+	cmd.focus_change = focus_change;
 
-	cmd.ip_addr = ipv4_nat_cache.ip4_tbl[tbl_index].public_addr;
+	cmd.mem_type = nat_cache_ptr->nmi;
 
-	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_INIT_NAT, &cmd);
-	if (ret != 0) {
-		perror("ipa_nati_post_ipv4_init_cmd(): ioctl error value");
-		IPAERR("unable to post init cmd Error: %d\n", ret);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		return -EINVAL;
+	cmd.ipv4_rules_offset =
+		nat_table->mem_desc.addr_offset;
+
+	cmd.expn_rules_offset =
+		cmd.ipv4_rules_offset +
+		(nat_table->table.table_entries * sizeof(struct ipa_nat_rule));
+
+	cmd.index_offset =
+		cmd.expn_rules_offset +
+		(nat_table->table.expn_table_entries * sizeof(struct ipa_nat_rule));
+
+	cmd.index_expn_offset =
+		cmd.index_offset +
+		(nat_table->index_table.table_entries * sizeof(struct ipa_nat_indx_tbl_rule));
+
+	/*
+	 * Driverr/HW expected base table size to be power^2-1 due to H/W
+	 * hash calculation
+	 */
+	cmd.table_entries =
+		nat_table->table.table_entries - 1;
+	cmd.expn_table_entries =
+		nat_table->table.expn_table_entries;
+
+	cmd.ip_addr = nat_table->public_addr;
+
+	*buf = '\0';
+	IPADBG("%s\n", ipa_ioc_v4_nat_init_as_str(&cmd, buf, sizeof(buf)));
+
+	ret = ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_V4_INIT_NAT, &cmd);
+
+	if (ret) {
+		IPAERR("unable to post init cmd Error: %d IPA fd %d\n",
+			   ret, nat_cache_ptr->ipa_desc->fd);
+		goto bail;
 	}
+
 	IPADBG("Posted IPA_IOC_V4_INIT_NAT to kernel successfully\n");
 
-	return 0;
-}
+bail:
+	IPADBG("Out\n");
 
-int ipa_nati_del_ipv4_table(uint32_t tbl_hdl)
-{
-	uint8_t index = (uint8_t)(tbl_hdl - 1);
-	void *addr = (void *)ipv4_nat_cache.ip4_tbl[index].ipv4_rules_addr;
-	struct ipa_ioc_v4_nat_del del_cmd;
-	int ret;
-
-	if (!ipv4_nat_cache.ip4_tbl[index].valid) {
-		IPAERR("invalid table handle passed\n");
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	if (pthread_mutex_lock(&nat_mutex) != 0) {
-		ret = -1;
-		goto lock_mutex_fail;
-	}
-
-	/* unmap the device memory from user space */
-#ifndef IPA_ON_R3PC
-	munmap(addr, ipv4_nat_cache.ip4_tbl[index].size);
-#else
-	addr = (char *)addr - ipv4_nat_cache.ip4_tbl[index].mmap_offset;
-	munmap(addr, NAT_MMAP_MEM_SIZE);
-#endif
-
-	/* close the file descriptor of nat device */
-	if (close(ipv4_nat_cache.ip4_tbl[index].nat_fd)) {
-		IPAERR("unable to close the file descriptor\n");
-		ret = -EINVAL;
-		if (pthread_mutex_unlock(&nat_mutex) != 0)
-			goto unlock_mutex_fail;
-		goto fail;
-	}
-
-	del_cmd.table_index = index;
-	del_cmd.public_ip_addr = ipv4_nat_cache.ip4_tbl[index].public_addr;
-	ret = ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_V4_DEL_NAT, &del_cmd);
-	if (ret != 0) {
-		perror("ipa_nati_del_ipv4_table(): ioctl error value");
-		IPAERR("unable to post nat del command init Error: %d\n", ret);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		ret = -EINVAL;
-		if (pthread_mutex_unlock(&nat_mutex) != 0)
-			goto unlock_mutex_fail;
-		goto fail;
-	}
-	IPAERR("posted IPA_IOC_V4_DEL_NAT to kernel successfully\n");
-
-	free(ipv4_nat_cache.ip4_tbl[index].index_expn_table_meta);
-	free(ipv4_nat_cache.ip4_tbl[index].rule_id_array);
-
-	memset(&ipv4_nat_cache.ip4_tbl[index],
-				 0,
-				 sizeof(ipv4_nat_cache.ip4_tbl[index]));
-
-	/* Decrease the table count by 1*/
-	ipv4_nat_cache.table_cnt--;
-
-	if (pthread_mutex_unlock(&nat_mutex) != 0) {
-		ret = -1;
-		goto unlock_mutex_fail;
-	}
-
-	return 0;
-
-lock_mutex_fail:
-	IPAERR("unable to lock the nat mutex\n");
-	return ret;
-
-unlock_mutex_fail:
-	IPAERR("unable to unlock the nat mutex\n");
-
-fail:
 	return ret;
 }
 
-int ipa_nati_query_timestamp(uint32_t  tbl_hdl,
-				uint32_t  rule_hdl,
-				uint32_t  *time_stamp)
+static void ipa_nati_copy_second_index_entry_to_head(
+	struct ipa_nat_ip4_table_cache* nat_table,
+	ipa_table_iterator* index_table_iterator,
+	struct ipa_ioc_nat_dma_cmd* cmd)
 {
-	uint8_t tbl_index = (uint8_t)(tbl_hdl - 1);
-	uint8_t expn_tbl = 0;
-	uint16_t tbl_entry = 0;
-	struct ipa_nat_rule *tbl_ptr = NULL;
+	uint16_t index;
+	struct ipa_nat_rule* table;
+	struct ipa_nat_indx_tbl_rule* index_table_rule =
+		(struct ipa_nat_indx_tbl_rule*)index_table_iterator->next_entry;
 
-	if (!ipv4_nat_cache.ip4_tbl[tbl_index].valid) {
-		IPAERR("invalid table handle\n");
-		return -EINVAL;
+	IPADBG("In\n");
+
+	/*
+	 * The DMA command for field tbl_entry already added by the
+	 * index_table.ipa_table_create_delete_command()
+	 */
+	ipa_table_add_dma_cmd(
+		&nat_table->index_table,
+		HELP_UPDATE_ENTRY,
+		index_table_iterator->curr_entry,
+		index_table_iterator->curr_index,
+		index_table_rule->next_index,
+		cmd);
+
+	/* Change the indx_tbl_entry field in the related table rule */
+	if (index_table_rule->tbl_entry < nat_table->table.table_entries) {
+		index = index_table_rule->tbl_entry;
+		table = (struct ipa_nat_rule*)nat_table->table.table_addr;
+	} else {
+		index = index_table_rule->tbl_entry - nat_table->table.table_entries;
+		table = (struct ipa_nat_rule*)nat_table->table.expn_table_addr;
 	}
 
-	if (pthread_mutex_lock(&nat_mutex) != 0) {
-		IPAERR("unable to lock the nat mutex\n");
-		return -1;
-	}
+	table[index].indx_tbl_entry = index_table_iterator->curr_index;
 
-	ipa_nati_parse_ipv4_rule_hdl(tbl_index, (uint16_t)rule_hdl,
-															 &expn_tbl, &tbl_entry);
-
-	tbl_ptr =
-	(struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_rules_addr;
-	if (expn_tbl) {
-		tbl_ptr =
-			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_index].ipv4_expn_rules_addr;
-	}
-
-	if (tbl_ptr)
-		*time_stamp = Read32BitFieldValue(tbl_ptr[tbl_entry].ts_proto,
-					TIME_STAMP_FIELD);
-
-	if (pthread_mutex_unlock(&nat_mutex) != 0) {
-		IPAERR("unable to unlock the nat mutex\n");
-		return -1;
-	}
-
-	return 0;
+	IPADBG("Out\n");
 }
 
-int ipa_nati_modify_pdn(struct ipa_ioc_nat_pdn_entry *entry)
+/**
+ * dst_hash() - Find the index into ipv4 base table
+ * @public_ip: [in] public_ip
+ * @trgt_ip: [in] Target IP address
+ * @trgt_port: [in]  Target port
+ * @public_port: [in]  Public port
+ * @proto: [in] Protocol (TCP/IP)
+ * @size: [in] size of the ipv4 base Table
+ *
+ * This hash method is used to find the hash index of new nat
+ * entry into ipv4 base table. In case of zero index, the
+ * new entry will be stored into N-1 index where N is size of
+ * ipv4 base table
+ *
+ * Returns: >0 index into ipv4 base table, negative on failure
+ */
+static uint16_t dst_hash(
+	struct ipa_nat_cache* nat_cache_ptr,
+	uint32_t public_ip,
+	uint32_t trgt_ip,
+	uint16_t trgt_port,
+	uint16_t public_port,
+	uint8_t  proto,
+	uint16_t size)
 {
+	uint16_t hash =
+		((uint16_t)(trgt_ip))       ^
+		((uint16_t)(trgt_ip >> 16)) ^
+		(trgt_port)                 ^
+		(public_port)               ^
+		(proto);
+
+	IPADBG("In\n");
+
+	IPADBG("public_ip: 0x%08X public_port: 0x%04X\n", public_ip, public_port);
+	IPADBG("target_ip: 0x%08X target_port: 0x%04X\n", trgt_ip, trgt_port);
+	IPADBG("proto: 0x%02X size: 0x%04X\n", proto, size);
+
+	if (nat_cache_ptr->ipa_desc->ver >= IPA_HW_v4_0)
+		hash ^=
+			((uint16_t)(public_ip)) ^
+			((uint16_t)(public_ip >> 16));
+
+	/*
+	 * The size passed to hash function expected be power^2-1, while
+	 * the actual size is power^2, actual_size = size + 1
+	 */
+	hash = (hash & size);
+
+	/*
+	 * If the hash resulted to zero then set it to maximum value as
+	 * zero is unused entry in nat tables
+	 */
+	if (hash == 0) {
+		hash = size;
+	}
+
+	IPADBG("dst_hash returning value: %d\n", hash);
+
+	IPADBG("Out\n");
+
+	return hash;
+}
+
+/**
+ * src_hash() - Find the index into ipv4 index base table
+ * @priv_ip: [in] Private IP address
+ * @priv_port: [in]  Private port
+ * @trgt_ip: [in]  Target IP address
+ * @trgt_port: [in] Target Port
+ * @proto: [in]  Protocol (TCP/IP)
+ * @size: [in] size of the ipv4 index base Table
+ *
+ * This hash method is used to find the hash index of new nat
+ * entry into ipv4 index base table. In case of zero index, the
+ * new entry will be stored into N-1 index where N is size of
+ * ipv4 index base table
+ *
+ * Returns: >0 index into ipv4 index base table, negative on failure
+ */
+static uint16_t src_hash(
+	uint32_t priv_ip,
+	uint16_t priv_port,
+	uint32_t trgt_ip,
+	uint16_t trgt_port,
+	uint8_t  proto,
+	uint16_t size)
+{
+	uint16_t hash =
+		((uint16_t)(priv_ip))       ^
+		((uint16_t)(priv_ip >> 16)) ^
+		(priv_port)                 ^
+		((uint16_t)(trgt_ip))       ^
+		((uint16_t)(trgt_ip >> 16)) ^
+		(trgt_port)                 ^
+		(proto);
+
+	IPADBG("In\n");
+
+	IPADBG("private_ip: 0x%08X private_port: 0x%04X\n", priv_ip, priv_port);
+	IPADBG(" target_ip: 0x%08X  target_port: 0x%04X\n", trgt_ip, trgt_port);
+	IPADBG("proto: 0x%02X size: 0x%04X\n", proto, size);
+
+	/*
+	 * The size passed to hash function expected be power^2-1, while
+	 * the actual size is power^2, actual_size = size + 1
+	 */
+	hash = (hash & size);
+
+	/*
+	 * If the hash resulted to zero then set it to maximum value as
+	 * zero is unused entry in nat tables
+	 */
+	if (hash == 0) {
+		hash = size;
+	}
+
+	IPADBG("src_hash returning value: %d\n", hash);
+
+	IPADBG("Out\n");
+
+	return hash;
+}
+
+static int ipa_nati_post_ipv4_dma_cmd(
+	struct ipa_nat_cache*       nat_cache_ptr,
+	struct ipa_ioc_nat_dma_cmd* cmd)
+{
+	char buf[4096];
+	int  ret = 0;
+
+	IPADBG("In\n");
+
+	cmd->mem_type = nat_cache_ptr->nmi;
+
+	*buf = '\0';
+	IPADBG("%s\n", prep_ioc_nat_dma_cmd_4print(cmd, buf, sizeof(buf)));
+
+	if (ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_TABLE_DMA_CMD, cmd)) {
+		IPAERR("ioctl (IPA_IOC_TABLE_DMA_CMD) on fd %d has failed\n",
+			   nat_cache_ptr->ipa_desc->fd);
+		ret = -EIO;
+		goto bail;
+	}
+
+	IPADBG("Posted IPA_IOC_TABLE_DMA_CMD to kernel successfully\n");
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * API functions exposed to the upper layers
+ * ----------------------------------------------------------------------------
+ */
+int ipa_nati_modify_pdn(
+	struct ipa_ioc_nat_pdn_entry *entry)
+{
+	struct ipa_nat_cache* nat_cache_ptr;
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	nat_cache_ptr =
+		(ipv4_nat_cache[IPA_NAT_MEM_IN_DDR].ipa_desc) ?
+		&ipv4_nat_cache[IPA_NAT_MEM_IN_DDR]           :
+		&ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM];
+
+	if ( nat_cache_ptr->ipa_desc == NULL )
+	{
+		IPAERR("Uninitialized cache file descriptor\n");
+		ret = -EIO;
+		goto done;
+	}
+
 	if (entry->public_ip == 0)
 		IPADBG("PDN %d public ip will be set  to 0\n", entry->pdn_index);
 
-	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_MODIFY_PDN, entry)) {
-		perror("ipa_nati_modify_pdn(): ioctl error value");
-		IPAERR("unable to call modify pdn icotl\n");
-		IPAERR("index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
-			entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		return -EIO;
+	ret = ioctl(nat_cache_ptr->ipa_desc->fd, IPA_IOC_NAT_MODIFY_PDN, entry);
+
+	if ( ret ) {
+		IPAERR("unable to call modify pdn icotl\nindex %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X IPA fd %d\n",
+			   entry->pdn_index,
+			   entry->public_ip,
+			   entry->src_metadata,
+			   entry->dst_metadata,
+			   nat_cache_ptr->ipa_desc->fd);
+		goto done;
 	}
 
-	pdns[entry->pdn_index].public_ip = entry->public_ip;
+	pdns[entry->pdn_index].public_ip    = entry->public_ip;
 	pdns[entry->pdn_index].dst_metadata = entry->dst_metadata;
 	pdns[entry->pdn_index].src_metadata = entry->src_metadata;
 
 	IPADBG("posted IPA_IOC_NAT_MODIFY_PDN to kernel successfully and stored in cache\n index %d, ip 0x%X, src_metdata 0x%X, dst_metadata 0x%X\n",
-		entry->pdn_index, entry->public_ip, entry->src_metadata, entry->dst_metadata);
+		   entry->pdn_index,
+		   entry->public_ip,
+		   entry->src_metadata,
+		   entry->dst_metadata);
+done:
+	IPADBG("Out\n");
 
-	return 0;
+	return ret;
 }
 
-int ipa_nati_add_ipv4_rule(uint32_t tbl_hdl,
-				const ipa_nat_ipv4_rule *clnt_rule,
-				uint32_t *rule_hdl)
+int ipa_nati_get_pdn_index(
+	uint32_t public_ip,
+	uint8_t *pdn_index)
 {
-	struct ipa_nat_ip4_table_cache *tbl_ptr;
-	struct ipa_nat_sw_rule sw_rule;
-	struct ipa_nat_indx_tbl_sw_rule index_sw_rule;
-	uint16_t new_entry, new_index_tbl_entry;
+	int i = 0;
 
-	/* verify that the rule's PDN is valid */
-	if (clnt_rule->pdn_index >= IPA_MAX_PDN_NUM ||
-		pdns[clnt_rule->pdn_index].public_ip == 0) {
-		IPAERR("invalid parameters, pdn index %d, public ip = 0x%X\n",
-			clnt_rule->pdn_index, pdns[clnt_rule->pdn_index].public_ip);
-		return -EINVAL;
+	for(i = 0; i < (IPA_MAX_PDN_NUM - 1); i++) {
+		if(pdns[i].public_ip == public_ip) {
+			IPADBG("ip 0x%X matches PDN index %d\n", public_ip, i);
+			*pdn_index = i;
+			return 0;
+		}
 	}
 
-	memset(&sw_rule, 0, sizeof(sw_rule));
-	memset(&index_sw_rule, 0, sizeof(index_sw_rule));
+	IPAERR("ip 0x%X does not match any PDN\n", public_ip);
 
-	/* Generate rule from client input */
-	if (ipa_nati_generate_rule(tbl_hdl, clnt_rule,
-					&sw_rule, &index_sw_rule,
-					&new_entry, &new_index_tbl_entry)) {
-		IPAERR("unable to generate rule\n");
-		return -EINVAL;
-	}
+	return -EIO;
+}
 
-	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
-	ipa_nati_copy_ipv4_rule_to_hw(tbl_ptr, &sw_rule, new_entry, (uint8_t)(tbl_hdl-1));
-	ipa_nati_copy_ipv4_index_rule_to_hw(tbl_ptr,
-																			&index_sw_rule,
-																			new_index_tbl_entry,
-																			(uint8_t)(tbl_hdl-1));
+int ipa_nati_alloc_pdn(
+	ipa_nat_pdn_entry *pdn_info,
+	uint8_t *pdn_index)
+{
+	ipa_nat_pdn_entry zero_test;
+	struct ipa_ioc_nat_pdn_entry pdn_data;
+	int i, ret;
 
-	IPADBG("new entry:%d, new index entry: %d\n", new_entry, new_index_tbl_entry);
-	if (ipa_nati_post_ipv4_dma_cmd((uint8_t)(tbl_hdl - 1), new_entry)) {
-		IPAERR("unable to post dma command\n");
+	IPADBG("alloc PDN  for ip 0x%x\n", pdn_info->public_ip);
+
+	memset(&zero_test, 0, sizeof(zero_test));
+
+	if(num_pdns >= (IPA_MAX_PDN_NUM - 1)) {
+		IPAERR("exceeded max num of PDNs, num_pdns %d\n", num_pdns);
 		return -EIO;
 	}
 
-	/* Generate rule handle */
-	*rule_hdl  = ipa_nati_make_rule_hdl((uint16_t)tbl_hdl, new_entry);
-	if (!(*rule_hdl)) {
-		IPAERR("unable to generate rule handle\n");
-		return -EINVAL;
-	}
-
-#ifdef NAT_DUMP
-	ipa_nat_dump_ipv4_table(tbl_hdl);
-#endif
-
-	return 0;
-}
-
-int ipa_nati_generate_rule(uint32_t tbl_hdl,
-				const ipa_nat_ipv4_rule *clnt_rule,
-				struct ipa_nat_sw_rule *rule,
-				struct ipa_nat_indx_tbl_sw_rule *index_sw_rule,
-				uint16_t *tbl_entry,
-				uint16_t *indx_tbl_entry)
-{
-	struct ipa_nat_ip4_table_cache *tbl_ptr;
-	uint16_t tmp;
-
-	if (NULL == clnt_rule || NULL == index_sw_rule ||
-			NULL == rule || NULL == tbl_entry  ||
-			NULL == indx_tbl_entry) {
-		IPAERR("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_hdl-1];
-
-	*tbl_entry = ipa_nati_generate_tbl_rule(clnt_rule,
-																					rule,
-																					tbl_ptr);
-	if (IPA_NAT_INVALID_NAT_ENTRY == *tbl_entry) {
-		IPAERR("unable to generate table entry\n");
-		return -EINVAL;
-	}
-
-	index_sw_rule->tbl_entry = *tbl_entry;
-	*indx_tbl_entry = ipa_nati_generate_index_rule(clnt_rule,
-																								 index_sw_rule,
-																								 tbl_ptr);
-	if (IPA_NAT_INVALID_NAT_ENTRY == *indx_tbl_entry) {
-		IPAERR("unable to generate index table entry\n");
-		return -EINVAL;
-	}
-
-	rule->indx_tbl_entry = *indx_tbl_entry;
-	if (*indx_tbl_entry >= tbl_ptr->table_entries) {
-		tmp = *indx_tbl_entry - tbl_ptr->table_entries;
-		tbl_ptr->index_expn_table_meta[tmp].prev_index = index_sw_rule->prev_index;
-	}
-
-	return 0;
-}
-
-uint16_t ipa_nati_generate_tbl_rule(const ipa_nat_ipv4_rule *clnt_rule,
-						struct ipa_nat_sw_rule *sw_rule,
-						struct ipa_nat_ip4_table_cache *tbl_ptr)
-{
-	uint32_t pub_ip_addr;
-	uint16_t prev = 0, nxt_indx = 0, new_entry;
-	struct ipa_nat_rule *tbl = NULL, *expn_tbl = NULL;
-
-	pub_ip_addr = pdns[clnt_rule->pdn_index].public_ip;
-
-	tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_rules_addr;
-	expn_tbl = (struct ipa_nat_rule *)tbl_ptr->ipv4_expn_rules_addr;
-
-	/* copy the values from client rule to sw rule */
-	sw_rule->private_ip = clnt_rule->private_ip;
-	sw_rule->private_port = clnt_rule->private_port;
-	sw_rule->protocol = clnt_rule->protocol;
-	sw_rule->public_port = clnt_rule->public_port;
-	sw_rule->target_ip = clnt_rule->target_ip;
-	sw_rule->target_port = clnt_rule->target_port;
-	sw_rule->pdn_index = clnt_rule->pdn_index;
-
-	/* consider only public and private ip fields */
-	sw_rule->ip_chksum = ipa_nati_calc_ip_cksum(pub_ip_addr,
-																							clnt_rule->private_ip);
-
-	if (IPPROTO_TCP == sw_rule->protocol ||
-			IPPROTO_UDP == sw_rule->protocol) {
-		/* consider public and private ip & port fields */
-		sw_rule->tcp_udp_chksum = ipa_nati_calc_tcp_udp_cksum(
-			 pub_ip_addr,
-			 clnt_rule->public_port,
-			 clnt_rule->private_ip,
-			 clnt_rule->private_port);
-	}
-
-	sw_rule->rsvd1 = 0;
-	sw_rule->enable = IPA_NAT_FLAG_DISABLE_BIT;
-	sw_rule->next_index = 0;
-
-	/*
-		SW sets this timer to 0.
-		The assumption is that 0 is an invalid clock value and no clock
-		wraparounds are expected
-	*/
-	sw_rule->time_stamp = 0;
-	sw_rule->rsvd2 = 0;
-	sw_rule->rsvd3 = 0;
-	sw_rule->prev_index = 0;
-	sw_rule->indx_tbl_entry = 0;
-
-	new_entry = dst_hash(pub_ip_addr, clnt_rule->target_ip,
-											 clnt_rule->target_port,
-											 clnt_rule->public_port,
-											 clnt_rule->protocol,
-											 tbl_ptr->table_entries-1);
-
-	/* check whether there is any collision
-		 if no collision return */
-	if (!Read16BitFieldValue(tbl[new_entry].ip_cksm_enbl,
-													 ENABLE_FIELD)) {
-		sw_rule->prev_index = 0;
-		IPADBG("Destination Nat New Entry Index %d\n", new_entry);
-		return new_entry;
-	}
-
-	/* First collision */
-	if (Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
-													NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
-		sw_rule->prev_index = new_entry;
-	} else { /* check for more than one collision	*/
-		/* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
-		nxt_indx = Read16BitFieldValue(tbl[new_entry].nxt_indx_pub_port,
-																	 NEXT_INDEX_FIELD);
-
-		while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
-			prev = nxt_indx;
-
-			nxt_indx -= tbl_ptr->table_entries;
-			nxt_indx = Read16BitFieldValue(expn_tbl[nxt_indx].nxt_indx_pub_port,
-																		 NEXT_INDEX_FIELD);
-
-			/* Handling error case */
-			if (prev == nxt_indx) {
-				IPAERR("Error: Prev index:%d and next:%d index should not be same\n", prev, nxt_indx);
-				return IPA_NAT_INVALID_NAT_ENTRY;
+	for(i = 0; i < (IPA_MAX_PDN_NUM - 1); i++) {
+		if(pdns[i].public_ip == pdn_info->public_ip)
+		{
+			IPADBG("found the same pdn in index %d\n", i);
+			*pdn_index = i;
+			if((pdns[i].src_metadata != pdn_info->src_metadata) ||
+			   (pdns[i].dst_metadata != pdn_info->dst_metadata))
+			{
+				IPAERR("WARNING: metadata values don't match! [%d, %d], [%d, %d]\n\n",
+					   pdns[i].src_metadata, pdn_info->src_metadata,
+					   pdns[i].dst_metadata, pdn_info->dst_metadata);
 			}
+			return 0;
 		}
 
-		sw_rule->prev_index = prev;
-	}
-
-	/* On collision check for the free entry in expansion table */
-	new_entry = ipa_nati_expn_tbl_free_entry(expn_tbl,
-					tbl_ptr->expn_table_entries);
-
-	if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
-		/* Expansion table is full return*/
-		IPAERR("Expansion table is full\n");
-		IPAERR("Current Table: %d & Expn Entries: %d\n",
-			   tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
-		return IPA_NAT_INVALID_NAT_ENTRY;
-	}
-	new_entry += tbl_ptr->table_entries;
-
-	IPADBG("new entry index %d\n", new_entry);
-	return new_entry;
-}
-
-/* returns expn table entry index */
-uint16_t ipa_nati_expn_tbl_free_entry(struct ipa_nat_rule *expn_tbl,
-						uint16_t size)
-{
-	int cnt;
-
-	for (cnt = 1; cnt < size; cnt++) {
-		if (!Read16BitFieldValue(expn_tbl[cnt].ip_cksm_enbl,
-														 ENABLE_FIELD)) {
-			IPADBG("new expansion table entry index %d\n", cnt);
-			return cnt;
-		}
-	}
-
-	IPAERR("nat expansion table is full\n");
-	return 0;
-}
-
-uint16_t ipa_nati_generate_index_rule(const ipa_nat_ipv4_rule *clnt_rule,
-						struct ipa_nat_indx_tbl_sw_rule *sw_rule,
-						struct ipa_nat_ip4_table_cache *tbl_ptr)
-{
-	struct ipa_nat_indx_tbl_rule *indx_tbl, *indx_expn_tbl;
-	uint16_t prev = 0, nxt_indx = 0, new_entry;
-
-	indx_tbl =
-	(struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_addr;
-	indx_expn_tbl =
-	(struct ipa_nat_indx_tbl_rule *)tbl_ptr->index_table_expn_addr;
-
-	new_entry = src_hash(clnt_rule->private_ip,
-											 clnt_rule->private_port,
-											 clnt_rule->target_ip,
-											 clnt_rule->target_port,
-											 clnt_rule->protocol,
-											 tbl_ptr->table_entries-1);
-
-	/* check whether there is any collision
-		 if no collision return */
-	if (!Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
-													 INDX_TBL_TBL_ENTRY_FIELD)) {
-		sw_rule->prev_index = 0;
-		IPADBG("Source Nat Index Table Entry %d\n", new_entry);
-		return new_entry;
-	}
-
-	/* check for more than one collision	*/
-	if (Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
-													INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
-		sw_rule->prev_index = new_entry;
-		IPADBG("First collosion. Entry %d\n", new_entry);
-	} else {
-		/* Find the IPA_NAT_DEL_TYPE_LAST entry in list */
-		nxt_indx = Read16BitFieldValue(indx_tbl[new_entry].tbl_entry_nxt_indx,
-																	 INDX_TBL_NEXT_INDEX_FILED);
-
-		while (nxt_indx != IPA_NAT_INVALID_NAT_ENTRY) {
-			prev = nxt_indx;
-
-			nxt_indx -= tbl_ptr->table_entries;
-			nxt_indx = Read16BitFieldValue(indx_expn_tbl[nxt_indx].tbl_entry_nxt_indx,
-																		 INDX_TBL_NEXT_INDEX_FILED);
-
-			/* Handling error case */
-			if (prev == nxt_indx) {
-				IPAERR("Error: Prev:%d and next:%d index should not be same\n", prev, nxt_indx);
-				return IPA_NAT_INVALID_NAT_ENTRY;
-			}
-		}
-
-		sw_rule->prev_index = prev;
-	}
-
-	/* On collision check for the free entry in expansion table */
-	new_entry = ipa_nati_index_expn_get_free_entry(indx_expn_tbl,
-					tbl_ptr->expn_table_entries);
-
-	if (IPA_NAT_INVALID_NAT_ENTRY == new_entry) {
-		/* Expansion table is full return*/
-		IPAERR("Index expansion table is full\n");
-		IPAERR("Current Table: %d & Expn Entries: %d\n",
-			   tbl_ptr->cur_tbl_cnt, tbl_ptr->cur_expn_tbl_cnt);
-		return IPA_NAT_INVALID_NAT_ENTRY;
-	}
-	new_entry += tbl_ptr->table_entries;
-
-
-	if (sw_rule->prev_index == new_entry) {
-		IPAERR("Error: prev_entry:%d ", sw_rule->prev_index);
-		IPAERR("and new_entry:%d should not be same ", new_entry);
-		IPAERR("infinite loop detected\n");
-		return IPA_NAT_INVALID_NAT_ENTRY;
-	}
-
-	IPADBG("index table entry %d\n", new_entry);
-	return new_entry;
-}
-
-/* returns index expn table entry index */
-uint16_t ipa_nati_index_expn_get_free_entry(
-						struct ipa_nat_indx_tbl_rule *indx_tbl,
-						uint16_t size)
-{
-	int cnt;
-	for (cnt = 1; cnt < size; cnt++) {
-		if (!Read16BitFieldValue(indx_tbl[cnt].tbl_entry_nxt_indx,
-														 INDX_TBL_TBL_ENTRY_FIELD)) {
-			return cnt;
-		}
-	}
-
-	IPAERR("nat index expansion table is full\n");
-	return 0;
-}
-
-void ipa_nati_write_next_index(uint8_t tbl_indx,
-				nat_table_type tbl_type,
-				uint16_t value,
-				uint32_t offset)
-{
-	struct ipa_ioc_nat_dma_cmd *cmd;
-
-	IPADBG("Updating next index field of table %d on collosion using dma\n", tbl_type);
-	IPADBG("table index: %d, value: %d offset;%d\n", tbl_indx, value, offset);
-
-	cmd = (struct ipa_ioc_nat_dma_cmd *)
-	malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
-				 sizeof(struct ipa_ioc_nat_dma_one));
-	if (NULL == cmd) {
-		IPAERR("unable to allocate memory\n");
-		return;
-	}
-
-	cmd->dma[0].table_index = tbl_indx;
-	cmd->dma[0].base_addr = tbl_type;
-	cmd->dma[0].data = value;
-	cmd->dma[0].offset = offset;
-
-	cmd->entries = 1;
-	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
-		perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
-		IPAERR("unable to call dma icotl to update next index\n");
-		IPAERR("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		goto fail;
-	}
-
-fail:
-	free(cmd);
-
-	return;
-}
-
-void ipa_nati_copy_ipv4_rule_to_hw(
-				struct ipa_nat_ip4_table_cache *ipv4_cache,
-				struct ipa_nat_sw_rule *rule,
-				uint16_t entry, uint8_t tbl_index)
-{
-	struct ipa_nat_rule *tbl_ptr;
-	uint16_t prev_entry = rule->prev_index;
-	nat_table_type tbl_type;
-	uint32_t offset = 0;
-
-	if (entry < ipv4_cache->table_entries) {
-		tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
-
-		memcpy(&tbl_ptr[entry],
-					 rule,
-					 sizeof(struct ipa_nat_rule));
-	} else {
-		tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_expn_rules_addr;
-		memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
-					 rule,
-					 sizeof(struct ipa_nat_rule));
-	}
-
-	/* Update the previos entry next_index */
-	if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
-
-		if (prev_entry < ipv4_cache->table_entries) {
-			tbl_type = IPA_NAT_BASE_TBL;
-			tbl_ptr = (struct ipa_nat_rule *)ipv4_cache->ipv4_rules_addr;
-		} else {
-			tbl_type = IPA_NAT_EXPN_TBL;
-			/* tbp_ptr is already pointing to expansion table
-				 no need to initialize it */
-			prev_entry = prev_entry - ipv4_cache->table_entries;
-		}
-
-		offset = ipa_nati_get_entry_offset(ipv4_cache, tbl_type, prev_entry);
-		offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
-
-		ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
-	}
-
-	return;
-}
-
-void ipa_nati_copy_ipv4_index_rule_to_hw(
-				struct ipa_nat_ip4_table_cache *ipv4_cache,
-				struct ipa_nat_indx_tbl_sw_rule *indx_sw_rule,
-				uint16_t entry,
-				uint8_t tbl_index)
-{
-	struct ipa_nat_indx_tbl_rule *tbl_ptr;
-	struct ipa_nat_sw_indx_tbl_rule sw_rule;
-	uint16_t prev_entry = indx_sw_rule->prev_index;
-	nat_table_type tbl_type;
-	uint16_t offset = 0;
-
-	sw_rule.next_index = indx_sw_rule->next_index;
-	sw_rule.tbl_entry = indx_sw_rule->tbl_entry;
-
-	if (entry < ipv4_cache->table_entries) {
-		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
-
-		memcpy(&tbl_ptr[entry],
-					 &sw_rule,
-					 sizeof(struct ipa_nat_indx_tbl_rule));
-	} else {
-		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_expn_addr;
-
-		memcpy(&tbl_ptr[entry - ipv4_cache->table_entries],
-					 &sw_rule,
-					 sizeof(struct ipa_nat_indx_tbl_rule));
-	}
-
-	/* Update the next field of previous entry on collosion */
-	if (IPA_NAT_INVALID_NAT_ENTRY != prev_entry) {
-		if (prev_entry < ipv4_cache->table_entries) {
-			tbl_type = IPA_NAT_INDX_TBL;
-			tbl_ptr = (struct ipa_nat_indx_tbl_rule *)ipv4_cache->index_table_addr;
-		} else {
-			tbl_type = IPA_NAT_INDEX_EXPN_TBL;
-			/* tbp_ptr is already pointing to expansion table
-			 no need to initialize it */
-			prev_entry = prev_entry - ipv4_cache->table_entries;
-		}
-
-		offset = ipa_nati_get_index_entry_offset(ipv4_cache, tbl_type, prev_entry);
-		offset += IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
-
-		IPADBG("Updating next index field of index table on collosion using dma()\n");
-		ipa_nati_write_next_index(tbl_index, tbl_type, entry, offset);
-	}
-
-	return;
-}
-
-int ipa_nati_post_ipv4_dma_cmd(uint8_t tbl_indx,
-				uint16_t entry)
-{
-	struct ipa_ioc_nat_dma_cmd *cmd;
-	struct ipa_nat_rule *tbl_ptr;
-	uint32_t offset = ipv4_nat_cache.ip4_tbl[tbl_indx].tbl_addr_offset;
-	int ret = 0;
-
-	cmd = (struct ipa_ioc_nat_dma_cmd *)
-	malloc(sizeof(struct ipa_ioc_nat_dma_cmd)+
-				 sizeof(struct ipa_ioc_nat_dma_one));
-	if (NULL == cmd) {
-		IPAERR("unable to allocate memory\n");
-		return -ENOMEM;
-	}
-
-	if (entry < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries) {
-		tbl_ptr =
-			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
-
-		cmd->dma[0].table_index = tbl_indx;
-		cmd->dma[0].base_addr = IPA_NAT_BASE_TBL;
-		cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
-
-		cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
-		cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
-	} else {
-		tbl_ptr =
-			 (struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_expn_rules_addr;
-		entry = entry - ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
-
-		cmd->dma[0].table_index = tbl_indx;
-		cmd->dma[0].base_addr = IPA_NAT_EXPN_TBL;
-		cmd->dma[0].data = IPA_NAT_FLAG_ENABLE_BIT_MASK;
-
-		cmd->dma[0].offset = (char *)&tbl_ptr[entry] - (char *)tbl_ptr;
-		cmd->dma[0].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
-		cmd->dma[0].offset += offset;
-	}
-
-	cmd->entries = 1;
-	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
-		perror("ipa_nati_post_ipv4_dma_cmd(): ioctl error value");
-		IPAERR("unable to call dma icotl\n");
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		ret = -EIO;
-		goto fail;
-	}
-	IPADBG("posted IPA_IOC_NAT_DMA to kernel successfully during add operation\n");
-
-
-fail:
-	free(cmd);
-
-	return ret;
-}
-
-
-int ipa_nati_del_ipv4_rule(uint32_t tbl_hdl,
-				uint32_t rule_hdl)
-{
-	uint8_t expn_tbl;
-	uint16_t tbl_entry;
-	struct ipa_nat_ip4_table_cache *tbl_ptr;
-	del_type rule_pos;
-	uint8_t tbl_indx = (uint8_t)(tbl_hdl - 1);
-	int ret;
-
-	/* Parse the rule handle */
-	ipa_nati_parse_ipv4_rule_hdl(tbl_indx, (uint16_t)rule_hdl,
-															 &expn_tbl, &tbl_entry);
-	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_entry) {
-		IPAERR("Invalid Rule Entry\n");
-		ret = -EINVAL;
-		goto fail;
-	}
-
-	if (pthread_mutex_lock(&nat_mutex) != 0) {
-		ret = -1;
-		goto mutex_lock_error;
-	}
-
-	IPADBG("Delete below rule\n");
-	IPADBG("tbl_entry:%d expn_tbl:%d\n", tbl_entry, expn_tbl);
-
-	tbl_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
-	if (!tbl_ptr->valid) {
-		IPAERR("invalid table handle\n");
-		ret = -EINVAL;
-		if (pthread_mutex_unlock(&nat_mutex) != 0)
-			goto mutex_unlock_error;
-		goto fail;
-	}
-
-	ipa_nati_find_rule_pos(tbl_ptr, expn_tbl,
-												 tbl_entry, &rule_pos);
-	IPADBG("rule_pos:%d\n", rule_pos);
-
-	if (ipa_nati_post_del_dma_cmd(tbl_indx, tbl_entry,
-					expn_tbl, rule_pos)) {
-		ret = -EINVAL;
-		if (pthread_mutex_unlock(&nat_mutex) != 0)
-			goto mutex_unlock_error;
-		goto fail;
-	}
-
-	ipa_nati_del_dead_ipv4_head_nodes(tbl_indx);
-
-	/* Reset rule_id_array entry */
-	ipv4_nat_cache.ip4_tbl[tbl_indx].rule_id_array[rule_hdl-1] =
-	IPA_NAT_INVALID_NAT_ENTRY;
-
-#ifdef NAT_DUMP
-	IPADBG("Dumping Table after deleting rule\n");
-	ipa_nat_dump_ipv4_table(tbl_hdl);
-#endif
-
-	if (pthread_mutex_unlock(&nat_mutex) != 0) {
-		ret = -1;
-		goto mutex_unlock_error;
-	}
-
-	return 0;
-
-mutex_lock_error:
-	IPAERR("unable to lock the nat mutex\n");
-	return ret;
-
-mutex_unlock_error:
-	IPAERR("unable to unlock the nat mutex\n");
-
-fail:
-	return ret;
-}
-
-void ReorderCmds(struct ipa_ioc_nat_dma_cmd *cmd, int size)
-{
-	int indx_tbl_start = 0, cnt, cnt1;
-	struct ipa_ioc_nat_dma_cmd *tmp;
-
-	IPADBG("called ReorderCmds() with entries :%d\n", cmd->entries);
-
-	for (cnt = 0; cnt < cmd->entries; cnt++) {
-		if (cmd->dma[cnt].base_addr == IPA_NAT_INDX_TBL ||
-				cmd->dma[cnt].base_addr == IPA_NAT_INDEX_EXPN_TBL) {
-			indx_tbl_start = cnt;
+		if(!memcmp((pdns + i), &zero_test, sizeof(ipa_nat_pdn_entry)))
+		{
+			IPADBG("found an empty pdn in index %d\n", i);
 			break;
 		}
 	}
 
-	if (indx_tbl_start == 0) {
-		IPADBG("Reorder not needed\n");
-		return;
+	if(i >= (IPA_MAX_PDN_NUM - 1))
+	{
+		IPAERR("couldn't find an empty entry while num is %d\n",
+			   num_pdns);
+		return -EIO;
 	}
 
-	tmp = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
-	if (tmp == NULL) {
-		IPAERR("unable to allocate memory\n");
-		return;
+	pdn_data.pdn_index    = i;
+	pdn_data.public_ip    = pdn_info->public_ip;
+	pdn_data.src_metadata = pdn_info->src_metadata;
+	pdn_data.dst_metadata = pdn_info->dst_metadata;
+
+	ret = ipa_nati_modify_pdn(&pdn_data);
+	if(!ret)
+	{
+		num_pdns++;
+		*pdn_index = i;
+		IPADBG("modify num_pdns (%d)\n", num_pdns);
 	}
 
-	cnt1 = 0;
-	tmp->entries = cmd->entries;
-	for (cnt = indx_tbl_start; cnt < cmd->entries; cnt++) {
-		tmp->dma[cnt1] = cmd->dma[cnt];
-		cnt1++;
-	}
-
-	for (cnt = 0; cnt < indx_tbl_start; cnt++) {
-		tmp->dma[cnt1] = cmd->dma[cnt];
-		cnt1++;
-	}
-
-	memset(cmd, 0, size);
-	memcpy(cmd, tmp, size);
-	free(tmp);
-
-	return;
+	return ret;
 }
 
-int ipa_nati_post_del_dma_cmd(uint8_t tbl_indx,
-				uint16_t cur_tbl_entry,
-				uint8_t expn_tbl,
-				del_type rule_pos)
+int ipa_nati_get_pdn_cnt(void)
 {
+	return num_pdns;
+}
 
-#define MAX_DMA_ENTRIES_FOR_DEL 3
+int ipa_nati_dealloc_pdn(
+	uint8_t pdn_index)
+{
+	ipa_nat_pdn_entry zero_test;
+	struct ipa_ioc_nat_pdn_entry pdn_data;
+	int ret;
 
-	struct ipa_nat_ip4_table_cache *cache_ptr;
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	struct ipa_nat_rule *tbl_ptr;
-	int ret = 0, size = 0;
+	IPADBG(" trying to deallocate PDN index %d\n", pdn_index);
 
-	uint16_t indx_tbl_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	del_type indx_rule_pos;
-
-	struct ipa_ioc_nat_dma_cmd *cmd;
-	uint8_t no_of_cmds = 0;
-
-	uint16_t prev_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	uint16_t next_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	uint16_t indx_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	uint16_t indx_next_next_entry = IPA_NAT_INVALID_NAT_ENTRY;
-	uint16_t table_entry;
-
-	size = sizeof(struct ipa_ioc_nat_dma_cmd)+
-	(MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
-
-	cmd = (struct ipa_ioc_nat_dma_cmd *)malloc(size);
-	if (NULL == cmd) {
-		IPAERR("unable to allocate memory\n");
-		return -ENOMEM;
+	if(!num_pdns)
+	{
+		IPAERR("pdn table is already empty\n");
+		return -EIO;
 	}
 
-	cache_ptr = &ipv4_nat_cache.ip4_tbl[tbl_indx];
-	if (!expn_tbl) {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
-	} else {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
+	memset(&zero_test, 0, sizeof(zero_test));
+
+	if(!memcmp((pdns + pdn_index), &zero_test, sizeof(ipa_nat_pdn_entry)))
+	{
+		IPAERR("pdn entry is a zero entry\n");
+		return -EIO;
 	}
 
+	IPADBG("PDN in index %d has ip 0x%X\n", pdn_index, pdns[pdn_index].public_ip);
 
-	if (!Read16BitFieldValue(tbl_ptr[cur_tbl_entry].ip_cksm_enbl,
-													 ENABLE_FIELD)) {
-		IPAERR("Deleting invalid(not enabled) rule\n");
+	pdn_data.pdn_index    = pdn_index;
+	pdn_data.src_metadata = 0;
+	pdn_data.dst_metadata = 0;
+	pdn_data.public_ip    = 0;
+
+	ret = ipa_nati_modify_pdn(&pdn_data);
+	if(ret)
+	{
+		IPAERR("failed modifying PDN\n");
+		return -EIO;
+	}
+
+	memset((pdns + pdn_index), 0, sizeof(ipa_nat_pdn_entry));
+
+	num_pdns--;
+
+	IPADBG("successfully removed pdn from index %d num_pdns %d\n", pdn_index, num_pdns);
+
+	return 0;
+}
+
+/*
+ * ----------------------------------------------------------------------------
+ * Previously public API functions, but have been hijacked (in
+ * ipa_nat_statemach.c).  The new definitions that replaced these, now
+ * call the functions below.
+ * ----------------------------------------------------------------------------
+ */
+int ipa_NATI_post_ipv4_init_cmd(
+	uint32_t tbl_hdl )
+{
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	int ret;
+
+	IPADBG("In\n");
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
 		ret = -EINVAL;
-		goto fail;
+		goto bail;
 	}
 
-	indx_tbl_entry =
-		Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
-		SW_SPEC_PARAM_INDX_TBL_ENTRY_FIELD);
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
 
-	/* ================================================
-	 Base Table rule Deletion
-	 ================================================*/
-	/* Just delete the current rule by disabling the flag field */
-	if (IPA_NAT_DEL_TYPE_ONLY_ONE == rule_pos) {
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
-		cmd->dma[no_of_cmds].data = IPA_NAT_FLAG_DISABLE_BIT_MASK;
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
 
-		cmd->dma[no_of_cmds].offset =
-			 ipa_nati_get_entry_offset(cache_ptr,
-					cmd->dma[no_of_cmds].base_addr,
-					cur_tbl_entry);
-		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_FLAG_FIELD_OFFSET;
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
 	}
 
-	/* Just update the protocol field to invalid */
-	else if (IPA_NAT_DEL_TYPE_HEAD == rule_pos) {
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
-		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_PROTO_FIELD_VALUE;
-
-		cmd->dma[no_of_cmds].offset =
-			 ipa_nati_get_entry_offset(cache_ptr,
-					cmd->dma[no_of_cmds].base_addr,
-					cur_tbl_entry);
-		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_PROTO_FIELD_OFFSET;
-
-		IPADBG("writing invalid proto: 0x%x\n", cmd->dma[no_of_cmds].data);
+	if ( ! nat_cache_ptr->table_cnt ) {
+		IPAERR("No initialized table in NAT cache\n");
+		ret = -EINVAL;
+		goto unlock;
 	}
 
-	/*
-			 Update the previous entry of next_index field value
-			 with current entry next_index field value
-	*/
-	else if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
-		prev_entry =
-			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
-				SW_SPEC_PARAM_PREV_INDEX_FIELD);
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
 
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data =
-			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
-					NEXT_INDEX_FIELD);
+	ret = ipa_nati_post_ipv4_init_cmd(
+		nat_cache_ptr,
+		nat_table,
+		tbl_hdl - 1,
+		true);
 
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
-		if (prev_entry >= cache_ptr->table_entries) {
-			cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
-			prev_entry -= cache_ptr->table_entries;
-		}
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_entry_offset(cache_ptr,
-				cmd->dma[no_of_cmds].base_addr, prev_entry);
-
-		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
+	if (ret) {
+		IPAERR("unable to post nat_init command Error %d\n", ret);
+		goto unlock;
 	}
 
-	/*
-			 Reset the previous entry of next_index field with 0
-	*/
-	else if (IPA_NAT_DEL_TYPE_LAST == rule_pos) {
-		prev_entry =
-			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
-				SW_SPEC_PARAM_PREV_INDEX_FIELD);
+	active_nat_cache_ptr = nat_cache_ptr;
 
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
-
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_BASE_TBL;
-		if (prev_entry >= cache_ptr->table_entries) {
-			cmd->dma[no_of_cmds].base_addr = IPA_NAT_EXPN_TBL;
-			prev_entry -= cache_ptr->table_entries;
-		}
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_entry_offset(cache_ptr,
-				cmd->dma[no_of_cmds].base_addr, prev_entry);
-
-		cmd->dma[no_of_cmds].offset += IPA_NAT_RULE_NEXT_FIELD_OFFSET;
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
 	}
 
-	/* ================================================
-	 Base Table rule Deletion End
-	 ================================================*/
-
-	/* ================================================
-	 Index Table rule Deletion
-	 ================================================*/
-	ipa_nati_find_index_rule_pos(cache_ptr,
-															 indx_tbl_entry,
-															 &indx_rule_pos);
-	IPADBG("Index table entry: 0x%x\n", indx_tbl_entry);
-	IPADBG("and position: %d\n", indx_rule_pos);
-	if (indx_tbl_entry >= cache_ptr->table_entries) {
-		indx_tbl_entry -= cache_ptr->table_entries;
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
-	} else {
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
-	}
-
-	/* Just delete the current rule by resetting nat_table_index field to 0 */
-	if (IPA_NAT_DEL_TYPE_ONLY_ONE == indx_rule_pos) {
-		no_of_cmds++;
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_index_entry_offset(cache_ptr,
-			cmd->dma[no_of_cmds].base_addr,
-			indx_tbl_entry);
-
-		cmd->dma[no_of_cmds].offset +=
-			IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
-	}
-
-	/* copy the next entry values to current entry */
-	else if (IPA_NAT_DEL_TYPE_HEAD == indx_rule_pos) {
-		next_entry =
-			Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
-				INDX_TBL_NEXT_INDEX_FILED);
-
-		next_entry -= cache_ptr->table_entries;
-
-		no_of_cmds++;
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-
-		/* Copy the nat_table_index field value of next entry */
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
-		cmd->dma[no_of_cmds].data =
-			Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
-				INDX_TBL_TBL_ENTRY_FIELD);
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_index_entry_offset(cache_ptr,
-					cmd->dma[no_of_cmds].base_addr,
-					indx_tbl_entry);
-
-		cmd->dma[no_of_cmds].offset +=
-			IPA_NAT_INDEX_RULE_NAT_INDEX_FIELD_OFFSET;
-
-		/* Copy the next_index field value of next entry */
-		no_of_cmds++;
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data =
-			Read16BitFieldValue(indx_tbl_ptr[next_entry].tbl_entry_nxt_indx,
-				INDX_TBL_NEXT_INDEX_FILED);
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_index_entry_offset(cache_ptr,
-				cmd->dma[no_of_cmds].base_addr, indx_tbl_entry);
-
-		cmd->dma[no_of_cmds].offset +=
-			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
-		indx_next_entry = next_entry;
-	}
-
-	/*
-			 Update the previous entry of next_index field value
-			 with current entry next_index field value
-	*/
-	else if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
-		prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
-
-		no_of_cmds++;
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data =
-			Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
-				INDX_TBL_NEXT_INDEX_FILED);
-
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
-		if (prev_entry >= cache_ptr->table_entries) {
-			cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
-			prev_entry -= cache_ptr->table_entries;
-		}
-
-		IPADBG("prev_entry: %d update with cur next_index: %d\n",
-				prev_entry, cmd->dma[no_of_cmds].data);
-		IPADBG("prev_entry: %d exist in table_type:%d\n",
-				prev_entry, cmd->dma[no_of_cmds].base_addr);
-
-		cmd->dma[no_of_cmds].offset =
-			ipa_nati_get_index_entry_offset(cache_ptr,
-				cmd->dma[no_of_cmds].base_addr, prev_entry);
-
-		cmd->dma[no_of_cmds].offset +=
-			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
-	}
-
-	/* Reset the previous entry next_index field with 0 */
-	else if (IPA_NAT_DEL_TYPE_LAST == indx_rule_pos) {
-		prev_entry = cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
-
-		no_of_cmds++;
-		cmd->dma[no_of_cmds].table_index = tbl_indx;
-		cmd->dma[no_of_cmds].data = IPA_NAT_INVALID_NAT_ENTRY;
-
-		cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDX_TBL;
-		if (prev_entry >= cache_ptr->table_entries) {
-			cmd->dma[no_of_cmds].base_addr = IPA_NAT_INDEX_EXPN_TBL;
-			prev_entry -= cache_ptr->table_entries;
-		}
-
-		IPADBG("Reseting prev_entry: %d next_index\n", prev_entry);
-		IPADBG("prev_entry: %d exist in table_type:%d\n",
-			prev_entry, cmd->dma[no_of_cmds].base_addr);
-
-		cmd->dma[no_of_cmds].offset =
-			 ipa_nati_get_index_entry_offset(cache_ptr,
-					cmd->dma[no_of_cmds].base_addr, prev_entry);
-
-		cmd->dma[no_of_cmds].offset +=
-			IPA_NAT_INDEX_RULE_NEXT_FIELD_OFFSET;
-	}
-
-	/* ================================================
-	 Index Table rule Deletion End
-	 ================================================*/
-	cmd->entries = no_of_cmds + 1;
-
-	if (cmd->entries > 1) {
-		ReorderCmds(cmd, size);
-	}
-	if (ioctl(ipv4_nat_cache.ipa_fd, IPA_IOC_NAT_DMA, cmd)) {
-		perror("ipa_nati_post_del_dma_cmd(): ioctl error value");
-		IPAERR("unable to post cmd\n");
-		IPADBG("ipa fd %d\n", ipv4_nat_cache.ipa_fd);
-		ret = -EIO;
-		goto fail;
-	}
-
-	/* if entry exist in IPA_NAT_DEL_TYPE_MIDDLE of list
-			 Update the previous entry in sw specific parameters
-	*/
-	if (IPA_NAT_DEL_TYPE_MIDDLE == rule_pos) {
-		/* Retrieve the current entry prev_entry value */
-		prev_entry =
-			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].sw_spec_params,
-				SW_SPEC_PARAM_PREV_INDEX_FIELD);
-
-		/* Retrieve the next entry */
-		next_entry =
-			Read16BitFieldValue(tbl_ptr[cur_tbl_entry].nxt_indx_pub_port,
-				NEXT_INDEX_FIELD);
-
-		next_entry -= cache_ptr->table_entries;
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
-
-		/* copy the current entry prev_entry value to next entry*/
-		UpdateSwSpecParams(&tbl_ptr[next_entry],
-											 IPA_NAT_SW_PARAM_PREV_INDX_BYTE,
-											 prev_entry);
-	}
-
-	/* Reset the other field values of current delete entry
-			 In case of IPA_NAT_DEL_TYPE_HEAD, don't reset */
-	if (IPA_NAT_DEL_TYPE_HEAD != rule_pos) {
-		memset(&tbl_ptr[cur_tbl_entry], 0, sizeof(struct ipa_nat_rule));
-	}
-
-	if (indx_rule_pos == IPA_NAT_DEL_TYPE_HEAD) {
-
-    /* Update next next entry previous value to current
-       entry as we moved the next entry values
-       to current entry */
-		indx_next_next_entry =
-			Read16BitFieldValue(indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx,
-				INDX_TBL_NEXT_INDEX_FILED);
-
-		if (indx_next_next_entry != 0 &&
-			indx_next_next_entry >= cache_ptr->table_entries) {
-
-			IPADBG("Next Next entry: %d\n", indx_next_next_entry);
-			indx_next_next_entry -= cache_ptr->table_entries;
-
-			IPADBG("Updating entry: %d prev index to: %d\n",
-				indx_next_next_entry, indx_tbl_entry);
-			cache_ptr->index_expn_table_meta[indx_next_next_entry].prev_index =
-				 indx_tbl_entry;
-		}
-
-    /* Now reset the next entry as we copied
-				the next entry to current entry */
-		IPADBG("Resetting, index table entry(Proper): %d\n",
-			(cache_ptr->table_entries + indx_next_entry));
-
-    /* This resets both table entry and next index values */
-		indx_tbl_ptr[indx_next_entry].tbl_entry_nxt_indx = 0;
-
-		/*
-				 In case of IPA_NAT_DEL_TYPE_HEAD, update the sw specific parameters
-				 (index table entry) of base table entry
-		*/
-		indx_tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
-		table_entry =
-				Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
-						INDX_TBL_TBL_ENTRY_FIELD);
-
-		if (table_entry >= cache_ptr->table_entries) {
-			tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
-			table_entry -= cache_ptr->table_entries;
-		} else {
-			tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
-		}
-
-		UpdateSwSpecParams(&tbl_ptr[table_entry],
-				IPA_NAT_SW_PARAM_INDX_TBL_ENTRY_BYTE,
-				indx_tbl_entry);
-	} else {
-		/* Update the prev_entry value (in index_expn_table_meta)
-				 for the next_entry in list with current entry prev_entry value
-		*/
-		if (IPA_NAT_DEL_TYPE_MIDDLE == indx_rule_pos) {
-			next_entry =
-				Read16BitFieldValue(indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx,
-					INDX_TBL_NEXT_INDEX_FILED);
-
-			if (next_entry >= cache_ptr->table_entries) {
-				next_entry -= cache_ptr->table_entries;
-			}
-
-			cache_ptr->index_expn_table_meta[next_entry].prev_index =
-				 cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index;
-
-			cache_ptr->index_expn_table_meta[indx_tbl_entry].prev_index =
-				 IPA_NAT_INVALID_NAT_ENTRY;
-		}
-
-		IPADBG("At, indx_tbl_entry value: %d\n", indx_tbl_entry);
-		IPADBG("At, indx_tbl_entry member address: %p\n",
-					 &indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx);
-
-		indx_tbl_ptr[indx_tbl_entry].tbl_entry_nxt_indx = 0;
-
-	}
-
-fail:
-	free(cmd);
+bail:
+	IPADBG("Out\n");
 
 	return ret;
 }
 
-void ipa_nati_find_index_rule_pos(
-				struct ipa_nat_ip4_table_cache *cache_ptr,
-				uint16_t tbl_entry,
-				del_type *rule_pos)
+/**
+ * ipa_NATI_add_ipv4_tbl() - Adds a new IPv4 NAT table
+ * @ct: [in] the desired cache type to use
+ * @public_ip_addr: [in] public IPv4 address
+ * @number_of_entries: [in] number of NAT entries
+ * @table_handle: [out] handle of new IPv4 NAT table
+ *
+ * This function creates new IPv4 NAT table and posts IPv4 NAT init command to HW
+ *
+ * Returns:	0  On Success, negative on failure
+ */
+int ipa_NATI_add_ipv4_tbl(
+	enum ipa3_nat_mem_in nmi,
+	uint32_t             public_ip_addr,
+	uint16_t             number_of_entries,
+	uint32_t*            tbl_hdl )
 {
-	struct ipa_nat_indx_tbl_rule *tbl_ptr;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	int ret = 0;
 
-	if (tbl_entry >= cache_ptr->table_entries) {
-		tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_expn_addr;
+	IPADBG("In\n");
 
-		tbl_entry -= cache_ptr->table_entries;
-		if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
-					INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
-			*rule_pos = IPA_NAT_DEL_TYPE_LAST;
-		} else {
-			*rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
-		}
-	} else {
-		tbl_ptr =
-			 (struct ipa_nat_indx_tbl_rule *)cache_ptr->index_table_addr;
+	*tbl_hdl = 0;
 
-		if (Read16BitFieldValue(tbl_ptr[tbl_entry].tbl_entry_nxt_indx,
-					INDX_TBL_NEXT_INDEX_FILED) == IPA_NAT_INVALID_NAT_ENTRY) {
-			*rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
-		} else {
-			*rule_pos = IPA_NAT_DEL_TYPE_HEAD;
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	nat_cache_ptr->nmi = nmi;
+
+	if (nat_cache_ptr->table_cnt >= IPA_NAT_MAX_IP4_TBLS) {
+		IPAERR(
+			"Can't add addition NAT table. Maximum %d tables allowed\n",
+			IPA_NAT_MAX_IP4_TBLS);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if ( ! nat_cache_ptr->ipa_desc ) {
+		nat_cache_ptr->ipa_desc = ipa_descriptor_open();
+		if ( nat_cache_ptr->ipa_desc == NULL ) {
+			IPAERR("failed to open IPA driver file descriptor\n");
+			ret = -EIO;
+			goto unlock;
 		}
 	}
+
+	nat_table = &nat_cache_ptr->ip4_tbl[nat_cache_ptr->table_cnt];
+
+	ret = ipa_nati_create_table(
+		nat_cache_ptr,
+		nat_table,
+		public_ip_addr,
+		number_of_entries,
+		nat_cache_ptr->table_cnt);
+
+	if (ret) {
+		IPAERR("unable to create nat table Error: %d\n", ret);
+		goto failed_create_table;
+	}
+
+	/*
+	 * Initialize the ipa hw with nat table dimensions
+	 */
+	ret = ipa_nati_post_ipv4_init_cmd(
+		nat_cache_ptr,
+		nat_table,
+		nat_cache_ptr->table_cnt,
+		false);
+
+	if (ret) {
+		IPAERR("unable to post nat_init command Error %d\n", ret);
+		goto failed_post_init_cmd;
+	}
+
+	active_nat_cache_ptr = nat_cache_ptr;
+
+	/*
+	 * Store the initial public ip address in the cached pdn table
+	 * this is backward compatible for pre IPAv4 versions, we will
+	 * always use this ip as the single PDN address
+	 */
+	pdns[0].public_ip = public_ip_addr;
+	num_pdns = 1;
+
+	nat_cache_ptr->table_cnt++;
+
+	/*
+	 * Return table handle
+	 */
+	*tbl_hdl = MAKE_TBL_HDL(nat_cache_ptr->table_cnt, nmi);
+
+	IPADBG("tbl_hdl value(0x%08X) num_pdns (%d)\n", *tbl_hdl, num_pdns);
+
+	goto unlock;
+
+failed_post_init_cmd:
+	ipa_nati_destroy_table(nat_cache_ptr, nat_table);
+
+failed_create_table:
+	if (!nat_cache_ptr->table_cnt) {
+		ipa_descriptor_close(nat_cache_ptr->ipa_desc);
+		nat_cache_ptr->ipa_desc = NULL;
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
 }
 
-void ipa_nati_find_rule_pos(struct ipa_nat_ip4_table_cache *cache_ptr,
-														uint8_t expn_tbl,
-														uint16_t tbl_entry,
-														del_type *rule_pos)
+int ipa_NATI_del_ipv4_table(
+	uint32_t tbl_hdl )
 {
-	struct ipa_nat_rule *tbl_ptr;
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
 
-	if (expn_tbl) {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_expn_rules_addr;
-		if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
-														NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
-			*rule_pos = IPA_NAT_DEL_TYPE_LAST;
-		} else {
-			*rule_pos = IPA_NAT_DEL_TYPE_MIDDLE;
-		}
-	} else {
-		tbl_ptr = (struct ipa_nat_rule *)cache_ptr->ipv4_rules_addr;
-		if (Read16BitFieldValue(tbl_ptr[tbl_entry].nxt_indx_pub_port,
-					NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
-			*rule_pos = IPA_NAT_DEL_TYPE_ONLY_ONE;
-		} else {
-			*rule_pos = IPA_NAT_DEL_TYPE_HEAD;
+	int ret;
+
+	IPADBG("In\n");
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (! nat_table->mem_desc.valid) {
+		IPAERR("invalid table handle %d\n", tbl_hdl);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_nati_destroy_table(nat_cache_ptr, nat_table);
+	if (ret) {
+		IPAERR("unable to delete NAT table with handle %d\n", tbl_hdl);
+		goto unlock;
+	}
+
+	if (! --nat_cache_ptr->table_cnt) {
+		ipa_descriptor_close(nat_cache_ptr->ipa_desc);
+		nat_cache_ptr->ipa_desc = NULL;
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_NATI_query_timestamp(
+	uint32_t  tbl_hdl,
+	uint32_t  rule_hdl,
+	uint32_t* time_stamp )
+{
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	struct ipa_nat_rule*            rule_ptr;
+
+	char buf[1024];
+	int  ret;
+
+	IPADBG("In\n");
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if ( ! nat_table->mem_desc.valid ) {
+		IPAERR("invalid table handle %d\n", tbl_hdl);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_table_get_entry(
+		&nat_table->table,
+		rule_hdl,
+		(void**) &rule_ptr,
+		NULL);
+
+	if (ret) {
+		IPAERR("Unable to retrive the entry with "
+			   "handle=%u in NAT table with handle=0x%08X\n",
+			   rule_hdl, tbl_hdl);
+		goto unlock;
+	}
+
+	*buf = '\0';
+	IPADBG("rule_hdl(0x%08X) -> %s\n",
+		   rule_hdl,
+		   prep_nat_rule_4print(rule_ptr, buf, sizeof(buf)));
+
+	*time_stamp = rule_ptr->time_stamp;
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_NATI_add_ipv4_rule(
+	uint32_t                 tbl_hdl,
+	const ipa_nat_ipv4_rule* clnt_rule,
+	uint32_t*                rule_hdl)
+{
+	uint32_t cmd_sz =
+		sizeof(struct ipa_ioc_nat_dma_cmd) +
+		(MAX_DMA_ENTRIES_FOR_ADD * sizeof(struct ipa_ioc_nat_dma_one));
+	char cmd_buf[cmd_sz];
+	struct ipa_ioc_nat_dma_cmd* cmd =
+		(struct ipa_ioc_nat_dma_cmd*) cmd_buf;
+
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	struct ipa_nat_rule*            rule;
+
+	uint16_t new_entry_index;
+	uint16_t new_index_tbl_entry_index;
+	uint32_t new_entry_handle;
+	char     buf[1024];
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	memset(cmd_buf, 0, sizeof(cmd_buf));
+
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 ! clnt_rule ||
+		 ! rule_hdl )
+	{
+		IPAERR("Bad arg: tbl_hdl(0x%08X) and/or clnt_rule(%p) and/or rule_hdl(%p)\n",
+			   tbl_hdl, clnt_rule, rule_hdl);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	*rule_hdl = 0;
+
+	IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl);
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	*buf = '\0';
+	IPADBG("tbl_hdl(0x%08X) nmi(%s) %s\n",
+		   tbl_hdl,
+		   ipa3_nat_mem_in_as_str(nmi),
+		   prep_nat_ipv4_rule_4print(clnt_rule, buf, sizeof(buf)));
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
+
+	if (clnt_rule->protocol == IPAHAL_NAT_INVALID_PROTOCOL) {
+		IPAERR("invalid parameter protocol=%d\n", clnt_rule->protocol);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Verify that the rule's PDN is valid
+	 */
+	if (clnt_rule->pdn_index >= IPA_MAX_PDN_NUM ||
+		pdns[clnt_rule->pdn_index].public_ip == 0) {
+		IPAERR("invalid parameters, pdn index %d, public ip = 0x%X\n",
+			   clnt_rule->pdn_index, pdns[clnt_rule->pdn_index].public_ip);
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (! nat_table->mem_desc.valid) {
+		IPAERR("invalid table handle %d\n", tbl_hdl);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	new_entry_index = dst_hash(
+		nat_cache_ptr,
+		pdns[clnt_rule->pdn_index].public_ip,
+		clnt_rule->target_ip,
+		clnt_rule->target_port,
+		clnt_rule->public_port,
+		clnt_rule->protocol,
+		nat_table->table.table_entries - 1);
+
+	ret = ipa_table_add_entry(
+		&nat_table->table,
+		(void*) clnt_rule,
+		&new_entry_index,
+		&new_entry_handle,
+		cmd);
+
+	if (ret) {
+		IPAERR("Failed to add a new NAT entry\n");
+		goto unlock;
+	}
+
+	new_index_tbl_entry_index =
+		src_hash(clnt_rule->private_ip,
+				 clnt_rule->private_port,
+				 clnt_rule->target_ip,
+				 clnt_rule->target_port,
+				 clnt_rule->protocol,
+				 nat_table->table.table_entries - 1);
+
+	ret = ipa_table_add_entry(
+		&nat_table->index_table,
+		(void*) &new_entry_index,
+		&new_index_tbl_entry_index,
+		NULL,
+		cmd);
+
+	if (ret) {
+		IPAERR("failed to add a new NAT index entry\n");
+		goto fail_add_index_entry;
+	}
+
+	rule = ipa_table_get_entry_by_index(
+		&nat_table->table,
+		new_entry_index);
+
+	if (rule == NULL) {
+		IPAERR("Failed to retrieve the entry in index %d for NAT table with handle=%d\n",
+			   new_entry_index, tbl_hdl);
+		ret = -EPERM;
+		goto bail;
+	}
+
+	rule->indx_tbl_entry = new_index_tbl_entry_index;
+
+	rule->redirect   = clnt_rule->redirect;
+	rule->enable     = clnt_rule->enable;
+	rule->time_stamp = clnt_rule->time_stamp;
+
+	IPADBG("new entry:%d, new index entry: %d\n",
+		   new_entry_index, new_index_tbl_entry_index);
+
+	IPADBG("rule_hdl(0x%08X) -> %s\n",
+		   new_entry_handle,
+		   prep_nat_rule_4print(rule, buf, sizeof(buf)));
+
+	ret = ipa_nati_post_ipv4_dma_cmd(nat_cache_ptr, cmd);
+
+	if (ret) {
+		IPAERR("unable to post dma command\n");
+		goto bail;
+	}
+
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = -EPERM;
+		goto done;
+	}
+
+	*rule_hdl = new_entry_handle;
+
+	IPADBG("rule_hdl value(%u)\n", *rule_hdl);
+
+	goto done;
+
+bail:
+	ipa_table_erase_entry(&nat_table->index_table, new_index_tbl_entry_index);
+
+fail_add_index_entry:
+	ipa_table_erase_entry(&nat_table->table, new_entry_index);
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex))
+		IPAERR("unable to unlock the nat mutex\n");
+done:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_NATI_del_ipv4_rule(
+	uint32_t tbl_hdl,
+	uint32_t rule_hdl )
+{
+	uint32_t cmd_sz =
+		sizeof(struct ipa_ioc_nat_dma_cmd) +
+		(MAX_DMA_ENTRIES_FOR_DEL * sizeof(struct ipa_ioc_nat_dma_one));
+	char cmd_buf[cmd_sz];
+	struct ipa_ioc_nat_dma_cmd* cmd =
+		(struct ipa_ioc_nat_dma_cmd*) cmd_buf;
+
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	struct ipa_nat_rule*            table_rule;
+	struct ipa_nat_indx_tbl_rule*   index_table_rule;
+
+	ipa_table_iterator table_iterator;
+	ipa_table_iterator index_table_iterator;
+
+	uint16_t index;
+	char     buf[1024];
+	int      ret = 0;
+
+	IPADBG("In\n");
+
+	memset(cmd_buf, 0, sizeof(cmd_buf));
+
+	IPADBG("tbl_hdl(0x%08X) rule_hdl(%u)\n", tbl_hdl, rule_hdl);
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("Unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (! nat_table->mem_desc.valid) {
+		IPAERR("Invalid table handle 0x%08X\n", tbl_hdl);
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	ret = ipa_table_get_entry(
+		&nat_table->table,
+		rule_hdl,
+		(void**) &table_rule,
+		&index);
+
+	if (ret) {
+		IPAERR("Unable to retrive the entry with rule_hdl=%u\n", rule_hdl);
+		goto unlock;
+	}
+
+	*buf = '\0';
+	IPADBG("rule_hdl(0x%08X) -> %s\n",
+		   rule_hdl,
+		   prep_nat_rule_4print(table_rule, buf, sizeof(buf)));
+
+	ret = ipa_table_iterator_init(
+		&table_iterator,
+		&nat_table->table,
+		table_rule,
+		index);
+
+	if (ret) {
+		IPAERR("Unable to create iterator which points to the "
+			   "entry %u in NAT table with handle=0x%08X\n",
+			   index, tbl_hdl);
+		goto unlock;
+	}
+
+	index = table_rule->indx_tbl_entry;
+
+	index_table_rule = (struct ipa_nat_indx_tbl_rule*)
+		ipa_table_get_entry_by_index(&nat_table->index_table, index);
+
+	if (index_table_rule == NULL) {
+		IPAERR("Unable to retrieve the entry in index %u "
+			   "in NAT index table with handle=0x%08X\n",
+			   index, tbl_hdl);
+		ret = -EPERM;
+		goto unlock;
+	}
+
+	ret = ipa_table_iterator_init(
+		&index_table_iterator,
+		&nat_table->index_table,
+		index_table_rule,
+		index);
+
+	if (ret) {
+		IPAERR("Unable to create iterator which points to the "
+			   "entry %u in NAT index table with handle=0x%08X\n",
+			   index, tbl_hdl);
+		goto unlock;
+	}
+
+	ipa_table_create_delete_command(
+		&nat_table->index_table,
+		cmd,
+		&index_table_iterator);
+
+	if (ipa_table_iterator_is_head_with_tail(&index_table_iterator)) {
+
+		ipa_nati_copy_second_index_entry_to_head(
+			nat_table, &index_table_iterator, cmd);
+		/*
+		 * Iterate to the next entry which should be deleted
+		 */
+		ret = ipa_table_iterator_next(
+			&index_table_iterator, &nat_table->index_table);
+
+		if (ret) {
+			IPAERR("Unable to move the iterator to the next entry "
+				   "(points to the entry %u in NAT index table)\n",
+				   index);
+			goto unlock;
 		}
 	}
+
+	ipa_table_create_delete_command(
+		&nat_table->table,
+		cmd,
+		&table_iterator);
+
+	ret = ipa_nati_post_ipv4_dma_cmd(nat_cache_ptr, cmd);
+
+	if (ret) {
+		IPAERR("Unable to post dma command\n");
+		goto unlock;
+	}
+
+	if (! ipa_table_iterator_is_head_with_tail(&table_iterator)) {
+		/* The entry can be deleted */
+		uint8_t is_prev_empty =
+			(table_iterator.prev_entry != NULL &&
+			 ((struct ipa_nat_rule*)table_iterator.prev_entry)->protocol ==
+			 IPAHAL_NAT_INVALID_PROTOCOL);
+
+		ipa_table_delete_entry(
+			&nat_table->table, &table_iterator, is_prev_empty);
+	}
+
+	ipa_table_delete_entry(
+		&nat_table->index_table,
+		&index_table_iterator,
+		FALSE);
+
+	if (index_table_iterator.curr_index >= nat_table->index_table.table_entries)
+		nat_table->index_expn_table_meta[
+			index_table_iterator.curr_index - nat_table->index_table.table_entries].
+			prev_index = IPA_TABLE_INVALID_ENTRY;
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("Unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+done:
+	IPADBG("Out\n");
+
+	return ret;
 }
 
-void ipa_nati_del_dead_ipv4_head_nodes(uint8_t tbl_indx)
+/*
+ * ----------------------------------------------------------------------------
+ * New function to get sram size.
+ * ----------------------------------------------------------------------------
+ */
+int ipa_nati_get_sram_size(
+	uint32_t* size_ptr)
 {
-	struct ipa_nat_rule *tbl_ptr;
-	uint16_t cnt;
+	struct ipa_nat_cache* nat_cache_ptr =
+		&ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM];
+	struct ipa_nat_in_sram_info nat_sram_info;
+	int ret;
 
-	tbl_ptr =
-	(struct ipa_nat_rule *)ipv4_nat_cache.ip4_tbl[tbl_indx].ipv4_rules_addr;
+	IPADBG("In\n");
 
-	for (cnt = 0;
-			 cnt < ipv4_nat_cache.ip4_tbl[tbl_indx].table_entries;
-			 cnt++) {
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
 
-		if (Read8BitFieldValue(tbl_ptr[cnt].ts_proto,
-					PROTOCOL_FIELD) == IPAHAL_NAT_INVALID_PROTOCOL
-				&&
-				Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
-					NEXT_INDEX_FIELD) == IPA_NAT_INVALID_NAT_ENTRY) {
-			/* Delete the IPA_NAT_DEL_TYPE_HEAD node */
-			IPADBG("deleting the dead node 0x%x\n", cnt);
-			memset(&tbl_ptr[cnt], 0, sizeof(struct ipa_nat_rule));
+	if ( ! nat_cache_ptr->ipa_desc ) {
+		nat_cache_ptr->ipa_desc = ipa_descriptor_open();
+		if ( nat_cache_ptr->ipa_desc == NULL ) {
+			IPAERR("failed to open IPA driver file descriptor\n");
+			ret = -EIO;
+			goto unlock;
 		}
-	} /* end of for loop */
+	}
 
-	return;
+	memset(&nat_sram_info, 0, sizeof(nat_sram_info));
+
+	ret = ioctl(nat_cache_ptr->ipa_desc->fd,
+				IPA_IOC_GET_NAT_IN_SRAM_INFO,
+				&nat_sram_info);
+
+	if (ret) {
+		IPAERR("NAT_IN_SRAM_INFO ioctl failure %d on IPA fd %d\n",
+			   ret, nat_cache_ptr->ipa_desc->fd);
+		goto unlock;
+	}
+
+	if ( (*size_ptr = nat_sram_info.sram_mem_available_for_nat) == 0 )
+	{
+		IPAERR("sram_mem_available_for_nat is zero\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
 }
 
-
-/* ========================================================
-						Debug functions
-	 ========================================================*/
-#ifdef NAT_DUMP
-void ipa_nat_dump_ipv4_table(uint32_t tbl_hdl)
+/*
+ * ----------------------------------------------------------------------------
+ * Utility functions
+ * ----------------------------------------------------------------------------
+ */
+static int print_nat_rule(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
 {
-	struct ipa_nat_rule *tbl_ptr;
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	int cnt;
-	uint8_t atl_one = 0;
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
 
-	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-		IPAERR("invalid table handle passed\n");
+	char buf[1024];
+
+	struct ipa_nat_rule* rule_ptr =
+		(struct ipa_nat_rule*) record_ptr;
+
+	UNUSED(meta_record_ptr);
+	UNUSED(meta_record_index);
+
+	if ( rule_ptr->protocol == IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE )
+	{
+		goto bail;
+	}
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	nmi++; /* stop compiler usage warning */
+
+	printf("  %s %s (0x%04X) (0x%08X) -> %s\n",
+		   (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM",
+		   (is_expn_tbl) ? "EXP " : "BASE",
+		   record_index,
+		   rule_hdl,
+		   prep_nat_rule_4print(rule_ptr, buf, sizeof(buf)));
+
+	fflush(stdout);
+
+	*((bool*) arb_data_ptr) = false;
+
+bail:
+	return 0;
+}
+
+static int print_meta_data(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	struct ipa_nat_indx_tbl_rule* index_entry =
+		(struct ipa_nat_indx_tbl_rule *) record_ptr;
+
+	struct ipa_nat_indx_tbl_meta_info* mi_ptr =
+		(struct ipa_nat_indx_tbl_meta_info*) meta_record_ptr;
+
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+
+	UNUSED(meta_record_index);
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	nmi++; /* stop compiler usage warning */
+
+	if ( mi_ptr )
+	{
+		printf("  %s %s Entry_Index=0x%04X Table_Entry=0x%04X -> "
+			   "Prev_Index=0x%04X Next_Index=0x%04X\n",
+			   (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM",
+			   (is_expn_tbl) ? "EXP " : "BASE",
+			   record_index,
+			   index_entry->tbl_entry,
+			   mi_ptr->prev_index,
+			   index_entry->next_index);
+	}
+	else
+	{
+		printf("  %s %s Entry_Index=0x%04X Table_Entry=0x%04X -> "
+			   "Prev_Index=0xXXXX Next_Index=0x%04X\n",
+			   (table_ptr->nmi == IPA_NAT_MEM_IN_DDR) ? "DDR" : "SRAM",
+			   (is_expn_tbl) ? "EXP " : "BASE",
+			   record_index,
+			   index_entry->tbl_entry,
+			   index_entry->next_index);
+	}
+
+	fflush(stdout);
+
+	*((bool*) arb_data_ptr) = false;
+
+	return 0;
+}
+
+void ipa_nat_dump_ipv4_table(
+	uint32_t tbl_hdl )
+{
+	bool empty;
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
 		return;
 	}
 
-	/* Print ipv4 rules */
-	IPADBG("Dumping ipv4 active rules:\n");
-	tbl_ptr = (struct ipa_nat_rule *)
-	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
-	for (cnt = 0;
-			 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-					ENABLE_FIELD)) {
-			atl_one = 1;
-			ipa_nati_print_rule(&tbl_ptr[cnt], cnt);
-		}
-	}
-	if (!atl_one) {
-		IPADBG("No active base rules, total: %d\n",
-					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
-	}
-	atl_one = 0;
+	printf("\nIPv4 active rules:\n");
 
-	/* Print ipv4 expansion rules */
-	IPADBG("Dumping ipv4 active expansion rules:\n");
-	tbl_ptr = (struct ipa_nat_rule *)
-	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
-	for (cnt = 0;
-			 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-					ENABLE_FIELD)) {
-			atl_one = 1;
-			ipa_nati_print_rule(&tbl_ptr[cnt],
-				(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries));
-		}
-	}
-	if (!atl_one) {
-		IPADBG("No active base expansion rules, total: %d\n",
-					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
-	}
-	atl_one = 0;
+	empty = true;
 
-	/* Print ipv4 index rules */
-	IPADBG("Dumping ipv4 index active rules:\n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
-	for (cnt = 0;
-			 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-					INDX_TBL_TBL_ENTRY_FIELD)) {
-			atl_one = 1;
-			ipa_nati_print_index_rule(&indx_tbl_ptr[cnt], cnt, 0);
-		}
-	}
-	if (!atl_one) {
-		IPADBG("No active index table rules, total:%d\n",
-					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries);
-	}
-	atl_one = 0;
+	ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, print_nat_rule, &empty);
 
-
-	/* Print ipv4 index expansion rules */
-	IPADBG("Dumping ipv4 index expansion active rules:\n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
-	for (cnt = 0;
-			 cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-					INDX_TBL_TBL_ENTRY_FIELD)) {
-			atl_one = 1;
-			ipa_nati_print_index_rule(&indx_tbl_ptr[cnt],
-				(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries),
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_expn_table_meta[cnt].prev_index);
-		}
+	if ( empty )
+	{
+		printf("  Empty\n");
 	}
-	if (!atl_one) {
-		IPADBG("No active index expansion rules, total:%d\n",
-					 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries);
-	}
-	atl_one = 0;
 
+	printf("\nExpansion Index Table Meta Data:\n");
+
+	empty = true;
+
+	ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, print_meta_data, &empty);
+
+	if ( empty )
+	{
+		printf("  Empty\n");
+	}
+
+	printf("\n");
+
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+	}
 }
 
-void ipa_nati_print_rule(
-		struct ipa_nat_rule *param,
-		uint32_t rule_id)
+int ipa_NATI_clear_ipv4_tbl(
+	uint32_t tbl_hdl )
 {
-	struct ipa_nat_sw_rule sw_rule;
-	memcpy(&sw_rule, param, sizeof(sw_rule));
-	uint32_t ip_addr;
+	enum ipa3_nat_mem_in            nmi;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	int ret = 0;
 
-	IPADUMP("rule-id:%d  ", rule_id);
-	ip_addr = sw_rule.target_ip;
-	IPADUMP("Trgt-IP:%d.%d.%d.%d	",
-				((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
-			((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
+	IPADBG("In\n");
 
-	IPADUMP("Trgt-Port:%d  Priv-Port:%d  ", sw_rule.target_port, sw_rule.private_port);
+	BREAK_TBL_HDL(tbl_hdl, nmi, tbl_hdl);
 
-	ip_addr = sw_rule.private_ip;
-	IPADUMP("Priv-IP:%d.%d.%d.%d ",
-							((ip_addr & 0xFF000000) >> 24), ((ip_addr & 0x00FF0000) >> 16),
-							((ip_addr & 0x0000FF00) >> 8), ((ip_addr & 0x000000FF)));
-
-	IPADUMP("Pub-Port:%d	Nxt-indx:%d  ", sw_rule.public_port, sw_rule.next_index);
-	IPADUMP("IP-cksm-delta:0x%x  En-bit:0x%x	", sw_rule.ip_chksum, sw_rule.enable);
-	IPADUMP("TS:0x%x	Proto:0x%x	", sw_rule.time_stamp, sw_rule.protocol);
-	IPADUMP("Prv-indx:%d	indx_tbl_entry:%d	", sw_rule.prev_index, sw_rule.indx_tbl_entry);
-	IPADUMP("Tcp-udp-cksum-delta:0x%x", sw_rule.tcp_udp_chksum);
-	IPADUMP("\n");
-	return;
-}
-
-void ipa_nati_print_index_rule(
-		struct ipa_nat_indx_tbl_rule *param,
-		uint32_t rule_id, uint16_t prev_indx)
-{
-	struct ipa_nat_sw_indx_tbl_rule sw_rule;
-	memcpy(&sw_rule, param, sizeof(sw_rule));
-
-	IPADUMP("rule-id:%d  Table_entry:%d  Next_index:%d, prev_indx:%d",
-					  rule_id, sw_rule.tbl_entry, sw_rule.next_index, prev_indx);
-	IPADUMP("\n");
-	return;
-}
-
-int ipa_nati_query_nat_rules(
-		uint32_t tbl_hdl,
-		nat_table_type tbl_type)
-{
-	struct ipa_nat_rule *tbl_ptr;
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	int cnt = 0, ret = 0;
-
-	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-		IPAERR("invalid table handle passed\n");
-		return ret;
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto bail;
 	}
 
-	/* Print ipv4 rules */
-	if (tbl_type == IPA_NAT_BASE_TBL) {
-		IPADBG("Counting ipv4 active rules:\n");
-		tbl_ptr = (struct ipa_nat_rule *)
-			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_rules_addr;
-		for (cnt = 0;
-				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-				 cnt++) {
-			if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-						ENABLE_FIELD)) {
-				ret++;
-			}
-		}
-		if (!ret) {
-			IPADBG("No active base rules\n");
-		}
+	IPADBG("nmi(%s)\n", ipa3_nat_mem_in_as_str(nmi));
 
-		IPADBG("Number of active base rules: %d\n", ret);
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	if (pthread_mutex_lock(&nat_mutex)) {
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
 	}
 
-	/* Print ipv4 expansion rules */
-	if (tbl_type == IPA_NAT_EXPN_TBL) {
-		IPADBG("Counting ipv4 active expansion rules:\n");
-		tbl_ptr = (struct ipa_nat_rule *)
-			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].ipv4_expn_rules_addr;
-		for (cnt = 0;
-				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-				 cnt++) {
-			if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-						ENABLE_FIELD)) {
-				ret++;
-			}
-		}
-		if (!ret) {
-			IPADBG("No active base expansion rules\n");
-		}
-
-		IPADBG("Number of active base expansion rules: %d\n", ret);
+	if ( ! nat_cache_ptr->table_cnt ) {
+		IPAERR("No initialized table in NAT cache\n");
+		ret = -EINVAL;
+		goto unlock;
 	}
 
-	/* Print ipv4 index rules */
-	if (tbl_type == IPA_NAT_INDX_TBL) {
-		IPADBG("Counting ipv4 index active rules:\n");
-		indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_addr;
-		for (cnt = 0;
-				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-				 cnt++) {
-			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-						INDX_TBL_TBL_ENTRY_FIELD)) {
-				ret++;
-			}
-		}
-		if (!ret) {
-			IPADBG("No active index table rules\n");
-		}
+	nat_table = &nat_cache_ptr->ip4_tbl[tbl_hdl - 1];
 
-		IPADBG("Number of active index table rules: %d\n", ret);
+	ipa_table_reset(&nat_table->table);
+	nat_table->table.cur_tbl_cnt =
+		nat_table->table.cur_expn_tbl_cnt = 0;
+
+	ipa_table_reset(&nat_table->index_table);
+	nat_table->index_table.cur_tbl_cnt =
+		nat_table->index_table.cur_expn_tbl_cnt = 0;
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex)) {
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
 	}
 
-	/* Print ipv4 index expansion rules */
-	if (tbl_type == IPA_NAT_INDEX_EXPN_TBL) {
-		IPADBG("Counting ipv4 index expansion active rules:\n");
-		indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-			 ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_table_expn_addr;
-		for (cnt = 0;
-				 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-				 cnt++) {
-			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-						INDX_TBL_TBL_ENTRY_FIELD)) {
-						ret++;
-			}
-		}
-
-		if (!ret)
-			IPADBG("No active index expansion rules\n");
-
-		IPADBG("Number of active index expansion rules: %d\n", ret);
-	}
+bail:
+	IPADBG("Out\n");
 
 	return ret;
 }
-#endif
+
+int ipa_nati_copy_ipv4_tbl(
+	uint32_t          src_tbl_hdl,
+	uint32_t          dst_tbl_hdl,
+	ipa_table_walk_cb copy_cb )
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! copy_cb )
+	{
+		IPAERR("copy_cb is null\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (pthread_mutex_lock(&nat_mutex))
+	{
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Clear the destination table...
+	 */
+	ret = ipa_NATI_clear_ipv4_tbl(dst_tbl_hdl);
+
+	if ( ret == 0 )
+	{
+		uintptr_t dth = dst_tbl_hdl;
+		/*
+		 * Now walk the source table and pass the valid records to the
+		 * user's copy callback...
+		 */
+		ret = ipa_NATI_walk_ipv4_tbl(
+			src_tbl_hdl, USE_NAT_TABLE, copy_cb, (void*) dth);
+
+		if ( ret != 0 )
+		{
+			IPAERR("ipa_table_walk returned non-zero (%d)\n", ret);
+			goto unlock;
+		}
+	}
+
+unlock:
+	if (pthread_mutex_unlock(&nat_mutex))
+	{
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_NATI_walk_ipv4_tbl(
+	uint32_t          tbl_hdl,
+	WhichTbl2Use      which,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr )
+{
+	enum ipa3_nat_mem_in            nmi;
+	uint32_t                        broken_tbl_hdl;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	ipa_table*                      ipa_tbl_ptr;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 ! VALID_WHICHTBL2USE(which) ||
+		 ! walk_cb )
+	{
+		IPAERR("Bad arg: tbl_hdl(0x%08X) and/or WhichTbl2Use(%u) and/or walk_cb(%p)\n",
+			   tbl_hdl, which, walk_cb);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if ( pthread_mutex_lock(&nat_mutex) )
+	{
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Now walk the table and pass the valid records to the user's
+	 * walk callback...
+	 */
+	BREAK_TBL_HDL(tbl_hdl, nmi, broken_tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) )
+	{
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	if ( ! nat_cache_ptr->table_cnt )
+	{
+		IPAERR("No initialized table in NAT cache\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	nat_table = &nat_cache_ptr->ip4_tbl[broken_tbl_hdl - 1];
+
+	ipa_tbl_ptr =
+		(which == USE_NAT_TABLE) ?
+		&nat_table->table     :
+		&nat_table->index_table;
+
+	ret = ipa_table_walk(ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, walk_cb, arb_data_ptr);
+
+	if ( ret != 0 )
+	{
+		IPAERR("ipa_table_walk returned non-zero (%d)\n", ret);
+		goto unlock;
+	}
+
+unlock:
+	if ( pthread_mutex_unlock(&nat_mutex) )
+	{
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+typedef struct
+{
+	WhichTbl2Use        which;
+	uint32_t            tot_for_avg;
+	ipa_nati_tbl_stats* stats_ptr;
+} chain_stat_help;
+
+static int gen_chain_stats(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	chain_stat_help* csh_ptr = (chain_stat_help*) arb_data_ptr;
+
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+
+	uint32_t             chain_len = 0;
+
+	UNUSED(record_index);
+	UNUSED(meta_record_ptr);
+	UNUSED(meta_record_index);
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	if ( is_expn_tbl )
+	{
+		nmi++; /* stop compiler usage warning */
+		return 1;
+	}
+
+	if ( csh_ptr->which == USE_NAT_TABLE )
+	{
+		struct ipa_nat_rule* list_elem_ptr =
+			(struct ipa_nat_rule*) record_ptr;
+
+		if ( list_elem_ptr->next_index )
+		{
+			chain_len = 1;
+
+			while ( list_elem_ptr->next_index )
+			{
+				chain_len++;
+
+				list_elem_ptr = (struct ipa_nat_rule*)
+					GOTO_REC(table_ptr, list_elem_ptr->next_index);
+			}
+		}
+	}
+	else
+	{
+		struct ipa_nat_indx_tbl_rule* list_elem_ptr =
+			(struct ipa_nat_indx_tbl_rule*) record_ptr;
+
+		if ( list_elem_ptr->next_index )
+		{
+			chain_len = 1;
+
+			while ( list_elem_ptr->next_index )
+			{
+				chain_len++;
+
+				list_elem_ptr = (struct ipa_nat_indx_tbl_rule*)
+					GOTO_REC(table_ptr, list_elem_ptr->next_index);
+			}
+		}
+	}
+
+	if ( chain_len )
+	{
+		csh_ptr->stats_ptr->tot_chains += 1;
+
+		csh_ptr->tot_for_avg += chain_len;
+
+		if ( csh_ptr->stats_ptr->min_chain_len == 0 )
+		{
+			csh_ptr->stats_ptr->min_chain_len = chain_len;
+		}
+		else
+		{
+			csh_ptr->stats_ptr->min_chain_len =
+				min(csh_ptr->stats_ptr->min_chain_len, chain_len);
+		}
+
+		csh_ptr->stats_ptr->max_chain_len =
+			max(csh_ptr->stats_ptr->max_chain_len, chain_len);
+	}
+
+	return 0;
+}
+
+int ipa_NATI_ipv4_tbl_stats(
+	uint32_t            tbl_hdl,
+	ipa_nati_tbl_stats* nat_stats_ptr,
+	ipa_nati_tbl_stats* idx_stats_ptr )
+{
+	enum ipa3_nat_mem_in            nmi;
+	uint32_t                        broken_tbl_hdl;
+	struct ipa_nat_cache*           nat_cache_ptr;
+	struct ipa_nat_ip4_table_cache* nat_table;
+	ipa_table*                      ipa_tbl_ptr;
+
+	chain_stat_help                 csh;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_TBL_HDL(tbl_hdl) ||
+		 ! nat_stats_ptr ||
+		 ! idx_stats_ptr )
+	{
+		IPAERR("Bad arg: "
+			   "tbl_hdl(0x%08X) and/or "
+			   "nat_stats_ptr(%p) and/or "
+			   "idx_stats_ptr(%p)\n",
+			   tbl_hdl,
+			   nat_stats_ptr,
+			   idx_stats_ptr );
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if ( pthread_mutex_lock(&nat_mutex) )
+	{
+		IPAERR("unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	memset(nat_stats_ptr, 0, sizeof(ipa_nati_tbl_stats));
+	memset(idx_stats_ptr, 0, sizeof(ipa_nati_tbl_stats));
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, broken_tbl_hdl);
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) )
+	{
+		IPAERR("Bad cache type argument passed\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	nat_cache_ptr = &ipv4_nat_cache[nmi];
+
+	if ( ! nat_cache_ptr->table_cnt )
+	{
+		IPAERR("No initialized table in NAT cache\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	nat_table = &nat_cache_ptr->ip4_tbl[broken_tbl_hdl - 1];
+
+	/*
+	 * Gather NAT table stats...
+	 */
+	ipa_tbl_ptr = &nat_table->table;
+
+	nat_stats_ptr->nmi                  = nmi;
+
+	nat_stats_ptr->tot_base_ents        = ipa_tbl_ptr->table_entries;
+	nat_stats_ptr->tot_expn_ents        = ipa_tbl_ptr->expn_table_entries;
+	nat_stats_ptr->tot_ents             =
+		nat_stats_ptr->tot_base_ents + nat_stats_ptr->tot_expn_ents;
+
+	nat_stats_ptr->tot_base_ents_filled = ipa_tbl_ptr->cur_tbl_cnt;
+	nat_stats_ptr->tot_expn_ents_filled = ipa_tbl_ptr->cur_expn_tbl_cnt;
+
+	memset(&csh, 0, sizeof(chain_stat_help));
+
+	csh.which     = USE_NAT_TABLE;
+	csh.stats_ptr = nat_stats_ptr;
+
+	ret = ipa_table_walk(
+		ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, gen_chain_stats, &csh);
+
+	if ( ret < 0 )
+	{
+		IPAERR("Error gathering chain stats\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if ( csh.tot_for_avg && nat_stats_ptr->tot_chains )
+	{
+		nat_stats_ptr->avg_chain_len =
+			(float) csh.tot_for_avg / (float) nat_stats_ptr->tot_chains;
+	}
+
+	/*
+	 * Now lets gather index table stats...
+	 */
+	ipa_tbl_ptr = &nat_table->index_table;
+
+	idx_stats_ptr->nmi                  = nmi;
+
+	idx_stats_ptr->tot_base_ents        = ipa_tbl_ptr->table_entries;
+	idx_stats_ptr->tot_expn_ents        = ipa_tbl_ptr->expn_table_entries;
+	idx_stats_ptr->tot_ents             =
+		idx_stats_ptr->tot_base_ents + idx_stats_ptr->tot_expn_ents;
+
+	idx_stats_ptr->tot_base_ents_filled = ipa_tbl_ptr->cur_tbl_cnt;
+	idx_stats_ptr->tot_expn_ents_filled = ipa_tbl_ptr->cur_expn_tbl_cnt;
+
+	memset(&csh, 0, sizeof(chain_stat_help));
+
+	csh.which     = USE_INDEX_TABLE;
+	csh.stats_ptr = idx_stats_ptr;
+
+	ret = ipa_table_walk(
+		ipa_tbl_ptr, 0, WHEN_SLOT_FILLED, gen_chain_stats, &csh);
+
+	if ( ret < 0 )
+	{
+		IPAERR("Error gathering chain stats\n");
+		ret = -EINVAL;
+		goto unlock;
+	}
+
+	if ( csh.tot_for_avg && idx_stats_ptr->tot_chains )
+	{
+		idx_stats_ptr->avg_chain_len =
+			(float) csh.tot_for_avg / (float) idx_stats_ptr->tot_chains;
+	}
+
+	ret = 0;
+
+unlock:
+	if ( pthread_mutex_unlock(&nat_mutex) )
+	{
+		IPAERR("unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_vote_clock(
+    enum ipa_app_clock_vote_type vote_type )
+{
+	struct ipa_nat_cache* nat_cache_ptr =
+		&ipv4_nat_cache[IPA_NAT_MEM_IN_SRAM];
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! nat_cache_ptr->ipa_desc ) {
+		nat_cache_ptr->ipa_desc = ipa_descriptor_open();
+		if ( nat_cache_ptr->ipa_desc == NULL ) {
+			IPAERR("failed to open IPA driver file descriptor\n");
+			ret = -EIO;
+			goto bail;
+		}
+	}
+
+	ret = ioctl(nat_cache_ptr->ipa_desc->fd,
+				IPA_IOC_APP_CLOCK_VOTE,
+				vote_type);
+
+	if (ret) {
+		IPAERR("APP_CLOCK_VOTE ioctl failure %d on IPA fd %d\n",
+			   ret, nat_cache_ptr->ipa_desc->fd);
+		goto bail;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
diff --git a/ipanat/src/ipa_nat_logi.c b/ipanat/src/ipa_nat_logi.c
deleted file mode 100644
index b829b78..0000000
--- a/ipanat/src/ipa_nat_logi.c
+++ /dev/null
@@ -1,49 +0,0 @@
-/* 
-Copyright (c) 2013, The Linux Foundation. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-		* Redistributions of source code must retain the above copyright
-			notice, this list of conditions and the following disclaimer.
-		* Redistributions in binary form must reproduce the above
-			copyright notice, this list of conditions and the following
-			disclaimer in the documentation and/or other materials provided
-			with the distribution.
-		* Neither the name of The Linux Foundation nor the names of its
-			contributors may be used to endorse or promote products derived
-			from this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
-WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
-ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
-BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
-OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
-IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-/*!
-	@file
-	IPACM_log.cpp
-
-	@brief
-	This file implements the IPAM log functionality.
-
-	@Author
-	Skylar Chang
-
-*/
-#include "ipa_nat_logi.h"
-#include <stdlib.h>
-#include <unistd.h>
-
-void log_nat_message(char *msg)
-{
-	 return;
-}
-
-
diff --git a/ipanat/src/ipa_nat_map.cpp b/ipanat/src/ipa_nat_map.cpp
new file mode 100644
index 0000000..d2bcf54
--- /dev/null
+++ b/ipanat/src/ipa_nat_map.cpp
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <map>
+#include <iterator>
+
+#include "ipa_nat_utils.h"
+
+#include "ipa_nat_map.h"
+
+static std::map<uint32_t, uint32_t> map_array[MAP_NUM_MAX];
+
+/******************************************************************************/
+
+int ipa_nat_map_add(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t      val )
+{
+	int ret_val = 0;
+
+	std::pair<std::map<uint32_t, uint32_t>::iterator, bool> ret;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_IPA_USE_MAP(which) )
+	{
+		IPAERR("Bad arg which(%u)\n", which);
+		ret_val = -1;
+		goto bail;
+	}
+
+	IPADBG("[%s] key(%u) -> val(%u)\n",
+		   ipa_which_map_as_str(which), key, val);
+
+	ret = map_array[which].insert(std::pair<uint32_t, uint32_t>(key, val));
+
+	if ( ret.second == false )
+	{
+		IPAERR("[%s] key(%u) already exists in map\n",
+			   ipa_which_map_as_str(which),
+			   key);
+		ret_val = -1;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret_val;
+}
+
+/******************************************************************************/
+
+int ipa_nat_map_find(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t*     val_ptr )
+{
+	int ret_val = 0;
+
+	std::map<uint32_t, uint32_t>::iterator it;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_IPA_USE_MAP(which) )
+	{
+		IPAERR("Bad arg which(%u)\n", which);
+		ret_val = -1;
+		goto bail;
+	}
+
+	IPADBG("[%s] key(%u)\n",
+		   ipa_which_map_as_str(which), key);
+
+	it = map_array[which].find(key);
+
+	if ( it == map_array[which].end() )
+	{
+		IPAERR("[%s] key(%u) not found in map\n",
+			   ipa_which_map_as_str(which),
+			   key);
+		ret_val = -1;
+	}
+	else
+	{
+		if ( val_ptr )
+		{
+			*val_ptr = it->second;
+			IPADBG("[%s] key(%u) -> val(%u)\n",
+				   ipa_which_map_as_str(which),
+				   key, *val_ptr);
+		}
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret_val;
+}
+
+/******************************************************************************/
+
+int ipa_nat_map_del(
+	ipa_which_map which,
+	uint32_t      key,
+	uint32_t*     val_ptr )
+{
+	int ret_val = 0;
+
+	std::map<uint32_t, uint32_t>::iterator it;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_IPA_USE_MAP(which) )
+	{
+		IPAERR("Bad arg which(%u)\n", which);
+		ret_val = -1;
+		goto bail;
+	}
+
+	IPADBG("[%s] key(%u)\n",
+		   ipa_which_map_as_str(which), key);
+
+	it = map_array[which].find(key);
+
+	if ( it == map_array[which].end() )
+	{
+		IPAERR("[%s] key(%u) not found in map\n",
+			   ipa_which_map_as_str(which),
+			   key);
+		ret_val = -1;
+	}
+	else
+	{
+		if ( val_ptr )
+		{
+			*val_ptr = it->second;
+			IPADBG("[%s] key(%u) -> val(%u)\n",
+				   ipa_which_map_as_str(which),
+				   key, *val_ptr);
+		}
+		map_array[which].erase(it);
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret_val;
+}
+
+int ipa_nat_map_clear(
+	ipa_which_map which )
+{
+	int ret_val = 0;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_IPA_USE_MAP(which) )
+	{
+		IPAERR("Bad arg which(%u)\n", which);
+		ret_val = -1;
+		goto bail;
+	}
+
+	map_array[which].clear();
+
+bail:
+	IPADBG("Out\n");
+
+	return ret_val;
+}
+
+int ipa_nat_map_dump(
+	ipa_which_map which )
+{
+	std::map<uint32_t, uint32_t>::iterator it;
+
+	int ret_val = 0;
+
+	IPADBG("In\n");
+
+	if ( ! VALID_IPA_USE_MAP(which) )
+	{
+		IPAERR("Bad arg which(%u)\n", which);
+		ret_val = -1;
+		goto bail;
+	}
+
+	printf("Dumping: %s\n", ipa_which_map_as_str(which));
+
+	for ( it  = map_array[which].begin();
+		  it != map_array[which].end();
+		  it++ )
+	{
+		printf("  Key[%u|0x%08X] -> Value[%u|0x%08X]\n",
+			   it->first,
+			   it->first,
+			   it->second,
+			   it->second);
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret_val;
+}
diff --git a/ipanat/src/ipa_nat_statemach.c b/ipanat/src/ipa_nat_statemach.c
new file mode 100644
index 0000000..c65f88a
--- /dev/null
+++ b/ipanat/src/ipa_nat_statemach.c
@@ -0,0 +1,2477 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <errno.h>
+#include <pthread.h>
+
+#include "ipa_nat_drv.h"
+#include "ipa_nat_drvi.h"
+
+#include "ipa_nat_map.h"
+
+#include "ipa_nat_statemach.h"
+
+#undef PRCNT_OF
+#define PRCNT_OF(v) \
+	((.25) * (v))
+
+#undef  CHOOSE_MEM_SUB
+#define CHOOSE_MEM_SUB() \
+	(nati_obj.curr_state == NATI_STATE_HYBRID) ? \
+	SRAM_SUB : \
+	DDR_SUB
+
+#undef  CHOOSE_MAPS
+#define CHOOSE_MAPS(o2n, n2o) \
+	do { \
+		uint32_t sub = CHOOSE_MEM_SUB(); \
+		o2n = nati_obj.map_pairs[sub].orig2new_map; \
+		n2o = nati_obj.map_pairs[sub].new2orig_map; \
+	} while (0)
+
+#undef  CHOOSE_CNTR
+#define CHOOSE_CNTR() \
+	&(nati_obj.tot_rules_in_table[CHOOSE_MEM_SUB()])
+
+#undef  CHOOSE_SW_STATS
+#define CHOOSE_SW_STATS() \
+	&(nati_obj.sw_stats[CHOOSE_MEM_SUB()])
+
+/*
+ * BACKROUND INFORMATION
+ *
+ * As it relates to why this file exists...
+ *
+ * In the past, a NAT table API was presented to upper layer
+ * applications.  Said API mananged low level details of NAT table
+ * creation, manipulation, and destruction.  The API
+ * managed/manipulated NAT tables that lived exclusively in DDR. DDR
+ * based tables are fine, but lead to uneeded bus accesses to/from DDR
+ * by the IPA while doing its NAT duties. These accesses cause NAT to
+ * take longer than necessary.
+ *
+ * If the DDR bus accesses could be eliminated by storing the table in
+ * the IPA's internal memory (ie. SRAM), the IPA's IP V4 NAT could be
+ * sped up. This leads us to the following description of this file's
+ * intent.
+ *
+ * The purpose and intent of this file is to hijack the API described
+ * above, but in a way that allows the tables to live in both SRAM and
+ * DDR.  The details of whether SRAM or DDR is being used is hidden
+ * from the application.  More specifically, the API will allow the
+ * following to occur completely tranparent to the application using
+ * the API.
+ *
+ *   (1) NAT tables can live exclusively in DDR (traditional and
+ *       historically like before)
+ *
+ *   (2) NAT tables can live simultaneously in SRAM and DDR.  SRAM
+ *       initially being used by the IPA, but both being kept in sync.
+ *       When SRAM becomes too full, a switch to DDR will occur.
+ *
+ *   (3) The same as (2) above, but after the switch to DDR occurs,
+ *       we'll have the ability to switch back to SRAM if/when DDR
+ *       table entry deletions take us to a small enough entry
+ *       count. An entry count that when met, allows us to switch back
+ *       using SRAM again.
+ *
+ * As above, all of these details will just magically happen unknown
+ * to the application using the API.  The implementation is done via a
+ * state machine.
+ */
+
+/*
+ * The following will be used to keep state machine state for and
+ * between API calls...
+ */
+static ipa_nati_obj nati_obj = {
+	.prev_state          = NATI_STATE_NULL,
+	.curr_state          = NATI_STATE_DDR_ONLY,
+	.ddr_tbl_hdl         = 0,
+	.sram_tbl_hdl        = 0,
+	.tot_slots_in_sram   = 0,
+	.back_to_sram_thresh = 0,
+	/*
+	 * Remember:
+	 *   tot_rules_in_table[0] for ddr, and
+	 *   tot_rules_in_table[1] for sram
+	 */
+	.tot_rules_in_table  = { 0, 0 },
+	/*
+	 * Remember:
+	 *   map_pairs[0] for ddr, and
+	 *   map_pairs[1] for sram
+	 */
+	.map_pairs = { {MAP_NUM_00, MAP_NUM_01}, {MAP_NUM_02, MAP_NUM_03} },
+	/*
+	 * Remember:
+	 *   sw_stats[0] for ddr, and
+	 *   sw_stats[1] for sram
+	 */
+	.sw_stats = { {0, 0}, {0, 0} },
+};
+
+/*
+ * The following needed to protect nati_obj above, as well as a number
+ * of data stuctures within the file ipa_nat_drvi.c
+ */
+pthread_mutex_t nat_mutex;
+static bool     nat_mutex_init = false;
+
+static inline int mutex_init(void)
+{
+	static pthread_mutexattr_t nat_mutex_attr;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	ret = pthread_mutexattr_init(&nat_mutex_attr);
+
+	if ( ret != 0 )
+	{
+		IPAERR("pthread_mutexattr_init() failed: ret(%d)\n", ret );
+		goto bail;
+	}
+
+	ret = pthread_mutexattr_settype(
+		&nat_mutex_attr, PTHREAD_MUTEX_RECURSIVE);
+
+	if ( ret != 0 )
+	{
+		IPAERR("pthread_mutexattr_settype() failed: ret(%d)\n",
+			   ret );
+		goto bail;
+	}
+
+	ret = pthread_mutex_init(&nat_mutex, &nat_mutex_attr);
+
+	if ( ret != 0 )
+	{
+		IPAERR("pthread_mutex_init() failed: ret(%d)\n",
+			   ret );
+		goto bail;
+	}
+
+	nat_mutex_init = true;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/*
+ * ****************************************************************************
+ *
+ * HIJACKED API FUNCTIONS START HERE
+ *
+ * ****************************************************************************
+ */
+int ipa_nati_add_ipv4_tbl(
+	uint32_t    public_ip_addr,
+	const char* mem_type_ptr,
+	uint16_t    number_of_entries,
+	uint32_t*   tbl_hdl)
+{
+	table_add_args args = {
+		.public_ip_addr    = public_ip_addr,
+		.number_of_entries = number_of_entries,
+		.tbl_hdl           = tbl_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	/*
+	 * If first time in here, then let XML drive initial state...
+	 */
+	if (nati_obj.prev_state == NATI_STATE_NULL)
+	{
+		SET_NATIOBJ_STATE(
+			&nati_obj,
+			mem_type_str_to_ipa_nati_state(mem_type_ptr));
+	}
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_ADD_TABLE, (void*) &args);
+
+	if ( ret == 0 )
+	{
+		IPADBG("tbl_hdl val(0x%08X)\n", *tbl_hdl);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_del_ipv4_table(
+	uint32_t tbl_hdl)
+{
+	table_del_args args = {
+		.tbl_hdl = tbl_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_DEL_TABLE, (void*) &args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_clear_ipv4_tbl(
+	uint32_t tbl_hdl )
+{
+	table_clear_args args = {
+		.tbl_hdl = tbl_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_CLR_TABLE, (void*) &args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_walk_ipv4_tbl(
+	uint32_t          tbl_hdl,
+	WhichTbl2Use      which,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr )
+{
+	table_walk_args args = {
+		.tbl_hdl      = tbl_hdl,
+		.which        = which,
+		.walk_cb      = walk_cb,
+		.arb_data_ptr = arb_data_ptr,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_WLK_TABLE, (void*) &args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_ipv4_tbl_stats(
+	uint32_t            tbl_hdl,
+	ipa_nati_tbl_stats* nat_stats_ptr,
+	ipa_nati_tbl_stats* idx_stats_ptr )
+{
+	table_stats_args args = {
+		.tbl_hdl       = tbl_hdl,
+		.nat_stats_ptr = nat_stats_ptr,
+		.idx_stats_ptr = idx_stats_ptr,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_TBL_STATS, (void*) &args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_add_ipv4_rule(
+	uint32_t                 tbl_hdl,
+	const ipa_nat_ipv4_rule* clnt_rule,
+	uint32_t*                rule_hdl )
+{
+	rule_add_args args = {
+		.tbl_hdl   = tbl_hdl,
+		.clnt_rule = clnt_rule,
+		.rule_hdl  = rule_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_ADD_RULE, (void*) &args);
+
+	if ( ret == 0 )
+	{
+		IPADBG("rule_hdl val(%u)\n", *rule_hdl);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_del_ipv4_rule(
+	uint32_t tbl_hdl,
+	uint32_t rule_hdl )
+{
+	rule_del_args args = {
+		.tbl_hdl  = tbl_hdl,
+		.rule_hdl = rule_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_DEL_RULE, (void*) &args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nati_query_timestamp(
+	uint32_t  tbl_hdl,
+	uint32_t  rule_hdl,
+	uint32_t* time_stamp)
+{
+	timestap_query_args args = {
+		.tbl_hdl    = tbl_hdl,
+		.rule_hdl   = rule_hdl,
+		.time_stamp = time_stamp,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_GET_TSTAMP, (void*) &args);
+
+	if ( ret == 0 )
+	{
+		IPADBG("time_stamp val(0x%08X)\n", *time_stamp);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_nat_switch_to(
+	enum ipa3_nat_mem_in nmi )
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) || ! IN_HYBRID_STATE() )
+	{
+		IPAERR("Bad nmi(%s) and/or not in hybrid state\n",
+			   ipa3_nat_mem_in_as_str(nmi));
+		ret = -1;
+		goto bail;
+	}
+
+	if ( (nmi == IPA_NAT_MEM_IN_SRAM && nati_obj.curr_state == NATI_STATE_HYBRID_DDR)
+		 ||
+		 (nmi == IPA_NAT_MEM_IN_DDR  && nati_obj.curr_state == NATI_STATE_HYBRID) )
+	{
+		ret = ipa_nati_statemach(&nati_obj, NATI_TRIG_TBL_SWITCH, 0);
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+bool ipa_nat_is_sram_supported(void)
+{
+	return VALID_TBL_HDL(nati_obj.sram_tbl_hdl);
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: migrate_rule
+ *
+ * PARAMS:
+ *
+ *   table_ptr         (IN) The table being walked
+ *
+ *   tbl_rule_hdl      (IN) The nat rule's handle from the source table
+ *
+ *   record_ptr        (IN) The nat rule record from the source table
+ *
+ *   record_index      (IN) The record above's index in the table being walked
+ *
+ *   meta_record_ptr   (IN) If meta data in table, this will be it
+ *
+ *   meta_record_index (IN) The record above's index in the table being walked
+ *
+ *   arb_data_ptr      (IN) The destination table handle
+ *
+ * DESCRIPTION:
+ *
+ *   This routine is intended to copy records from a source table to a
+ *   destination table.
+
+ *   It is used in union with the ipa_nati_copy_ipv4_tbl() API call
+ *   below.
+ *
+ *   It is compatible with the ipa_table_walk() API.
+ *
+ *   In the context of the ipa_nati_copy_ipv4_tbl(), the arguments
+ *   passed in are as enumerated above.
+ *
+ * AN IMPORTANT NOTE ON RULE HANDLES WHEN IN MYBRID MODE
+ *
+ *   The rule_hdl is used to find a rule in the nat table.  It is, in
+ *   effect, an index into the table.  The applcation above us retains
+ *   it for future manipulation of the rule in the table.
+ *
+ *   In hybrid mode, a rule can and will move between SRAM and DDR.
+ *   Because of this, its handle will change.  The application has
+ *   only the original handle and doesn't know of the new handle.  A
+ *   mapping, used in hybrid mode, will maintain a relationship
+ *   between the original handle and the rule's current real handle...
+ *
+ *   To help you get a mindset of how this is done:
+ *
+ *     The original handle will map (point) to the new and new handle
+ *     will map (point) back to original.
+ *
+ * NOTE WELL: There are two sets of maps.  One for each memory type...
+ *
+ * RETURNS:
+ *
+ *   Returns 0 on success, non-zero on failure
+ */
+static int migrate_rule(
+	ipa_table*      table_ptr,
+	uint32_t        tbl_rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	struct ipa_nat_rule* nat_rule_ptr = (struct ipa_nat_rule*) record_ptr;
+	uint32_t             dst_tbl_hdl  = (uint32_t) arb_data_ptr;
+
+	ipa_nat_ipv4_rule    v4_rule;
+
+	uint32_t             orig_rule_hdl;
+	uint32_t             new_rule_hdl;
+
+	uint32_t             src_orig2new_map, src_new2orig_map;
+	uint32_t             dst_orig2new_map, dst_new2orig_map;
+	uint32_t*            cnt_ptr;
+
+	const char*          mig_dir_ptr;
+
+	char                 buf[1024];
+	int                  ret;
+
+	UNUSED(buf);
+	UNUSED(record_index);
+	UNUSED(meta_record_ptr);
+	UNUSED(meta_record_index);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_mem_type(%s) tbl_rule_hdl(%u) -> %s\n",
+		   ipa3_nat_mem_in_as_str(table_ptr->nmi),
+		   tbl_rule_hdl,
+		   prep_nat_rule_4print(nat_rule_ptr, buf, sizeof(buf)));
+
+	IPADBG("dst_tbl_hdl(0x%08X)\n", dst_tbl_hdl);
+
+	/*
+	 * What is the type of the source table?
+	 */
+	if ( table_ptr->nmi == IPA_NAT_MEM_IN_SRAM )
+	{
+		mig_dir_ptr = "SRAM -> DDR";
+
+		src_orig2new_map = nati_obj.map_pairs[SRAM_SUB].orig2new_map;
+		src_new2orig_map = nati_obj.map_pairs[SRAM_SUB].new2orig_map;
+
+		dst_orig2new_map = nati_obj.map_pairs[DDR_SUB].orig2new_map;
+		dst_new2orig_map = nati_obj.map_pairs[DDR_SUB].new2orig_map;
+
+		cnt_ptr          = &(nati_obj.tot_rules_in_table[DDR_SUB]);
+	}
+	else
+	{
+		mig_dir_ptr = "DDR -> SRAM";
+
+		src_orig2new_map = nati_obj.map_pairs[DDR_SUB].orig2new_map;
+		src_new2orig_map = nati_obj.map_pairs[DDR_SUB].new2orig_map;
+
+		dst_orig2new_map = nati_obj.map_pairs[SRAM_SUB].orig2new_map;
+		dst_new2orig_map = nati_obj.map_pairs[SRAM_SUB].new2orig_map;
+
+		cnt_ptr          = &(nati_obj.tot_rules_in_table[SRAM_SUB]);
+	}
+
+	src_orig2new_map++; /* to avoid compiler usage warning */
+
+	if ( nat_rule_ptr->protocol == IPA_NAT_INVALID_PROTO_FIELD_VALUE_IN_RULE )
+	{
+		IPADBG("%s: Special \"first rule in list\" case. "
+			   "Rule's enabled bit on, but protocol implies deleted\n",
+			   mig_dir_ptr);
+		ret = 0;
+		goto bail;
+	}
+
+	ret = ipa_nat_map_find(src_new2orig_map, tbl_rule_hdl, &orig_rule_hdl);
+
+	if ( ret != 0 )
+	{
+		IPAERR("%s: ipa_nat_map_find(src_new2orig_map) fail\n", mig_dir_ptr);
+		goto bail;
+	}
+
+	memset(&v4_rule, 0, sizeof(v4_rule));
+
+	v4_rule.private_ip   = nat_rule_ptr->private_ip;
+	v4_rule.private_port = nat_rule_ptr->private_port;
+	v4_rule.protocol     = nat_rule_ptr->protocol;
+	v4_rule.public_port  = nat_rule_ptr->public_port;
+	v4_rule.target_ip    = nat_rule_ptr->target_ip;
+	v4_rule.target_port  = nat_rule_ptr->target_port;
+	v4_rule.pdn_index    = nat_rule_ptr->pdn_index;
+	v4_rule.redirect     = nat_rule_ptr->redirect;
+	v4_rule.enable       = nat_rule_ptr->enable;
+	v4_rule.time_stamp   = nat_rule_ptr->time_stamp;
+
+	ret = ipa_NATI_add_ipv4_rule(dst_tbl_hdl, &v4_rule, &new_rule_hdl);
+
+	if ( ret != 0 )
+	{
+		IPAERR("%s: ipa_NATI_add_ipv4_rule() fail\n", mig_dir_ptr);
+		goto bail;
+	}
+
+	(*cnt_ptr)++;
+
+	/*
+	 * The following is needed to maintain the original handle and
+	 * have it point to the new handle.
+	 *
+	 * Remember, original handle points to new and the new handle
+	 * points back to original.
+	 */
+	ret = ipa_nat_map_add(dst_orig2new_map, orig_rule_hdl, new_rule_hdl);
+
+	if ( ret != 0 )
+	{
+		IPAERR("%s: ipa_nat_map_add(dst_orig2new_map) fail\n", mig_dir_ptr);
+		goto bail;
+	}
+
+	ret = ipa_nat_map_add(dst_new2orig_map, new_rule_hdl, orig_rule_hdl);
+
+	if ( ret != 0 )
+	{
+		IPAERR("%s: ipa_nat_map_add(dst_new2orig_map) fail\n", mig_dir_ptr);
+		goto bail;
+	}
+
+	IPADBG("orig_rule_hdl(0x%08X) new_rule_hdl(0x%08X)\n",
+		   orig_rule_hdl, new_rule_hdl);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/*
+ * ****************************************************************************
+ *
+ * STATE MACHINE CODE BEGINS HERE
+ *
+ * ****************************************************************************
+ */
+static int _smUndef(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr ); /* forward declaration */
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smDelTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the destruction of the DDR based NAT
+ *   table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smDelTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_del_args* args = (table_del_args*) arb_data_ptr;
+
+	uint32_t tbl_hdl = args->tbl_hdl;
+
+	int ret;
+
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl);
+
+	ret = ipa_NATI_del_ipv4_table(tbl_hdl);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smAddDdrTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the creation of a NAT table in DDR.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smAddDdrTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_add_args* args = (table_add_args*) arb_data_ptr;
+
+	uint32_t  public_ip_addr    = args->public_ip_addr;
+	uint16_t  number_of_entries = args->number_of_entries;
+	uint32_t* tbl_hdl_ptr       = args->tbl_hdl;
+
+	int ret;
+
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("public_ip_addr(0x%08X) number_of_entries(%u) tbl_hdl_ptr(%p)\n",
+		   public_ip_addr, number_of_entries, tbl_hdl_ptr);
+
+	ret = ipa_NATI_add_ipv4_tbl(
+		IPA_NAT_MEM_IN_DDR,
+		public_ip_addr,
+		number_of_entries,
+		&nati_obj_ptr->ddr_tbl_hdl);
+
+	if ( ret == 0 )
+	{
+		*tbl_hdl_ptr = nati_obj_ptr->ddr_tbl_hdl;
+
+		IPADBG("DDR table creation successful: tbl_hdl(0x%08X)\n",
+			   *tbl_hdl_ptr);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smAddSramTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the creation of a NAT table in SRAM.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smAddSramTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_add_args* args = (table_add_args*) arb_data_ptr;
+
+	uint32_t  public_ip_addr    = args->public_ip_addr;
+	uint16_t  number_of_entries = args->number_of_entries;
+	uint32_t* tbl_hdl_ptr       = args->tbl_hdl;
+
+	uint32_t  sram_size = 0;
+
+	int ret;
+
+	UNUSED(number_of_entries);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("public_ip_addr(0x%08X) tbl_hdl_ptr(%p)\n",
+		   public_ip_addr, tbl_hdl_ptr);
+
+	ret = ipa_nati_get_sram_size(&sram_size);
+
+	if ( ret == 0 )
+	{
+		ret = ipa_calc_num_sram_table_entries(
+			sram_size,
+			sizeof(struct ipa_nat_rule),
+			sizeof(struct ipa_nat_indx_tbl_rule),
+			(uint16_t*) &nati_obj_ptr->tot_slots_in_sram);
+
+		if ( ret == 0 )
+		{
+			nati_obj_ptr->back_to_sram_thresh =
+				PRCNT_OF(nati_obj_ptr->tot_slots_in_sram);
+
+			IPADBG("sram_size(%u or 0x%x) tot_slots_in_sram(%u) back_to_sram_thresh(%u)\n",
+				   sram_size,
+				   sram_size,
+				   nati_obj_ptr->tot_slots_in_sram,
+				   nati_obj_ptr->back_to_sram_thresh);
+
+			IPADBG("Voting clock on for sram table creation\n");
+
+			if ( (ret = ipa_nat_vote_clock(IPA_APP_CLK_VOTE)) != 0 )
+			{
+				IPAERR("Voting clock on failed\n");
+				goto done;
+			}
+
+			ret = ipa_NATI_add_ipv4_tbl(
+				IPA_NAT_MEM_IN_SRAM,
+				public_ip_addr,
+				nati_obj_ptr->tot_slots_in_sram,
+				&nati_obj_ptr->sram_tbl_hdl);
+
+			if ( ipa_nat_vote_clock(IPA_APP_CLK_DEVOTE) != 0 )
+			{
+				IPAWARN("Voting clock off failed\n");
+			}
+
+			if ( ret == 0 )
+			{
+				*tbl_hdl_ptr = nati_obj_ptr->sram_tbl_hdl;
+
+				IPADBG("SRAM table creation successful: tbl_hdl(0x%08X)\n",
+					   *tbl_hdl_ptr);
+			}
+		}
+	}
+
+done:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smAddSramAndDdrTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the creation of NAT tables in both DDR
+ *   and in SRAM.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smAddSramAndDdrTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_add_args* args = (table_add_args*) arb_data_ptr;
+
+	uint32_t  public_ip_addr    = args->public_ip_addr;
+	uint16_t  number_of_entries = args->number_of_entries;
+	uint32_t* tbl_hdl_ptr       = args->tbl_hdl;
+
+	uint32_t tbl_hdl;
+
+	int ret;
+
+	UNUSED(tbl_hdl_ptr);
+
+	IPADBG("In\n");
+
+	nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0;
+	nati_obj_ptr->tot_rules_in_table[DDR_SUB]  = 0;
+
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].orig2new_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].new2orig_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].orig2new_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].new2orig_map);
+
+	ret = _smAddSramTbl(nati_obj_ptr, trigger, arb_data_ptr);
+
+	if ( ret == 0 )
+	{
+		if ( nati_obj_ptr->tot_slots_in_sram >= number_of_entries )
+		{
+			/*
+			 * The number of slots in SRAM can accommodate what was
+			 * being requested for DDR, hence no need to use DDR and
+			 * we will continue by using SRAM only...
+			 */
+			SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_SRAM_ONLY);
+		}
+		else
+		{
+			/*
+			 * SRAM not big enough. Let's create secondary DDR based
+			 * table...
+			 */
+			table_add_args new_args = {
+				.public_ip_addr    = public_ip_addr,
+				.number_of_entries = number_of_entries,
+				.tbl_hdl           = &tbl_hdl,  /* to protect app's table handle above */
+			};
+
+			ret = _smAddDdrTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+			if ( ret == 0 )
+			{
+				/*
+				 * The following will tell the IPA to change focus to
+				 * SRAM...
+				 */
+				ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_SRAM, 0);
+			}
+		}
+	}
+	else
+	{
+		/*
+		 * SRAM table creation in HYBRID mode failed.  Can we fall
+		 * back to DDR only?  We need to try and see what happens...
+		 */
+		ret = _smAddDdrTbl(nati_obj_ptr, trigger, arb_data_ptr);
+
+		if ( ret == 0 )
+		{
+			SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_DDR_ONLY);
+		}
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smDelSramAndDdrTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the destruction of the SRAM, then DDR
+ *   based NAT tables.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smDelSramAndDdrTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	int ret;
+
+	IPADBG("In\n");
+
+	nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0;
+	nati_obj_ptr->tot_rules_in_table[DDR_SUB]  = 0;
+
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].orig2new_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[SRAM_SUB].new2orig_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].orig2new_map);
+	ipa_nat_map_clear(nati_obj_ptr->map_pairs[DDR_SUB].new2orig_map);
+
+	ret = _smDelTbl(nati_obj_ptr, trigger, arb_data_ptr);
+
+	if ( ret == 0 )
+	{
+		table_del_args new_args = {
+			.tbl_hdl = nati_obj_ptr->ddr_tbl_hdl,
+		};
+
+		ret = _smDelTbl(nati_obj_ptr, trigger, (void*) &new_args);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smClrTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the clearing of a table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smClrTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_clear_args* args = (table_clear_args*) arb_data_ptr;
+
+	uint32_t tbl_hdl = args->tbl_hdl;
+
+	enum ipa3_nat_mem_in nmi;
+	uint32_t             unused_hdl, sub;
+
+	int ret;
+
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl);
+
+	BREAK_TBL_HDL(tbl_hdl, nmi, unused_hdl);
+
+	unused_hdl++; /* to avoid compiler usage warning */
+
+	if ( ! IPA_VALID_NAT_MEM_IN(nmi) ) {
+		IPAERR("Bad cache type\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	sub = (nmi == IPA_NAT_MEM_IN_SRAM) ? SRAM_SUB : DDR_SUB;
+
+	nati_obj_ptr->tot_rules_in_table[sub] = 0;
+
+	ipa_nat_map_clear(nati_obj.map_pairs[sub].orig2new_map);
+	ipa_nat_map_clear(nati_obj.map_pairs[sub].new2orig_map);
+
+	ret = ipa_NATI_clear_ipv4_tbl(tbl_hdl);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smClrTblHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the clearing of the appropriate hybrid
+ *   table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smClrTblHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_clear_args* args = (table_clear_args*) arb_data_ptr;
+
+	uint32_t tbl_hdl = args->tbl_hdl;
+
+	table_clear_args new_args = {
+		.tbl_hdl =
+		  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+		    tbl_hdl :
+		    nati_obj_ptr->ddr_tbl_hdl,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = _smClrTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smWalkTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the walk of a table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smWalkTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_walk_args* args = (table_walk_args*) arb_data_ptr;
+
+	uint32_t          tbl_hdl = args->tbl_hdl;
+	WhichTbl2Use      which   = args->which;
+	ipa_table_walk_cb walk_cb = args->walk_cb;
+	void*             wadp    = args->arb_data_ptr;
+
+	int ret;
+
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl);
+
+	ret = ipa_NATI_walk_ipv4_tbl(tbl_hdl, which, walk_cb, wadp);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smWalkTblHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the walk of the appropriate hybrid
+ *   table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smWalkTblHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_walk_args* args = (table_walk_args*) arb_data_ptr;
+
+	uint32_t          tbl_hdl = args->tbl_hdl;
+	WhichTbl2Use      which   = args->which;
+	ipa_table_walk_cb walk_cb = args->walk_cb;
+	void*             wadp    = args->arb_data_ptr;
+
+	table_walk_args new_args = {
+		.tbl_hdl =
+		  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+		    tbl_hdl :
+		    nati_obj_ptr->ddr_tbl_hdl,
+		.which        = which,
+		.walk_cb      = walk_cb,
+		.arb_data_ptr = wadp,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = _smWalkTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smStatTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will get size/usage stats for a table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smStatTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_stats_args* args = (table_stats_args*) arb_data_ptr;
+
+	uint32_t            tbl_hdl       = args->tbl_hdl;
+	ipa_nati_tbl_stats* nat_stats_ptr = args->nat_stats_ptr;
+	ipa_nati_tbl_stats* idx_stats_ptr = args->idx_stats_ptr;
+
+	int ret;
+
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X)\n", tbl_hdl);
+
+	ret = ipa_NATI_ipv4_tbl_stats(tbl_hdl, nat_stats_ptr, idx_stats_ptr);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smStatTblHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the retrieval of table size/usage stats
+ *   for the appropriate hybrid table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smStatTblHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	table_stats_args* args = (table_stats_args*) arb_data_ptr;
+
+	uint32_t            tbl_hdl       = args->tbl_hdl;
+	ipa_nati_tbl_stats* nat_stats_ptr = args->nat_stats_ptr;
+	ipa_nati_tbl_stats* idx_stats_ptr = args->idx_stats_ptr;
+
+	table_stats_args new_args = {
+		.tbl_hdl =
+		  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+		    tbl_hdl :
+		    nati_obj_ptr->ddr_tbl_hdl,
+		.nat_stats_ptr = nat_stats_ptr,
+		.idx_stats_ptr = idx_stats_ptr,
+	};
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = _smStatTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smAddRuleToTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the addtion of a NAT rule into the DDR
+ *   based table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smAddRuleToTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	rule_add_args* args = (rule_add_args*) arb_data_ptr;
+
+	uint32_t           tbl_hdl   = args->tbl_hdl;
+	ipa_nat_ipv4_rule* clnt_rule = (ipa_nat_ipv4_rule*) args->clnt_rule;
+	uint32_t*          rule_hdl  = args->rule_hdl;
+
+	char buf[1024];
+
+	int ret;
+
+	UNUSED(buf);
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X) clnt_rule_ptr(%p) rule_hdl_ptr(%p) %s\n",
+		   tbl_hdl, clnt_rule, rule_hdl,
+		   prep_nat_ipv4_rule_4print(clnt_rule, buf, sizeof(buf)));
+
+	clnt_rule->redirect = clnt_rule->enable = clnt_rule->time_stamp = 0;
+
+	ret = ipa_NATI_add_ipv4_rule(tbl_hdl, clnt_rule, rule_hdl);
+
+	if ( ret == 0 )
+	{
+		uint32_t* cnt_ptr = CHOOSE_CNTR();
+
+		(*cnt_ptr)++;
+
+		IPADBG("rule_hdl value(%u or 0x%08X)\n",
+			   *rule_hdl, *rule_hdl);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smDelRuleFromTbl
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the deletion of a NAT rule from the DDR
+ *   based table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smDelRuleFromTbl(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	rule_del_args* args  = (rule_del_args*) arb_data_ptr;
+
+	uint32_t tbl_hdl  = args->tbl_hdl;
+	uint32_t rule_hdl = args->rule_hdl;
+
+	int ret;
+
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X) rule_hdl(%u)\n", tbl_hdl, rule_hdl);
+
+	ret = ipa_NATI_del_ipv4_rule(tbl_hdl, rule_hdl);
+
+	if ( ret == 0 )
+	{
+		uint32_t* cnt_ptr = CHOOSE_CNTR();
+
+		(*cnt_ptr)--;
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smAddRuleHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the addition of a NAT rule into either
+ *   the SRAM or DDR based table.
+ *
+ *   *** !!! HOWEVER *** REMEMBER !!! ***
+ *
+ *   We're here because we're in a HYBRID state...with the potential
+ *   moving between SRAM and DDR.  THIS HAS IMLICATIONS AS IT RELATES
+ *   TO RULE MAPPING.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smAddRuleHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	rule_add_args* args = (rule_add_args*) arb_data_ptr;
+
+	uint32_t           tbl_hdl   = args->tbl_hdl;
+	ipa_nat_ipv4_rule* clnt_rule = (ipa_nat_ipv4_rule*) args->clnt_rule;
+	uint32_t*          rule_hdl  = args->rule_hdl;
+
+	rule_add_args new_args = {
+		.tbl_hdl =
+		  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+		    tbl_hdl :
+		    nati_obj_ptr->ddr_tbl_hdl,
+		.clnt_rule = clnt_rule,
+		.rule_hdl  = rule_hdl,
+	};
+
+	uint32_t orig2new_map, new2orig_map;
+
+	int ret;
+
+	IPADBG("In\n");
+
+	ret = _smAddRuleToTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+	if ( ret == 0 )
+	{
+		/*
+		 * The rule_hdl is used to find a rule in the nat table.  It
+		 * is, in effect, an index into the table.  The applcation
+		 * above us retains it for future manipulation of the rule in
+		 * the table.
+		 *
+		 * In hybrid mode, a rule can and will move between SRAM and
+		 * DDR.  Because of this, its handle will change.  The
+		 * application has only the original handle and doesn't know
+		 * of the new handle.  A mapping, used in hybrid mode, will
+		 * maintain a relationship between the original handle and the
+		 * rule's current real handle...
+		 *
+		 * To help you get a mindset of how this is done:
+		 *
+		 *   The original handle will map (point) to the new and new
+		 *   handle will map (point) back to original.
+		 *
+		 * NOTE WELL: There are two sets of maps.  One for each memory
+		 *            type...
+		 */
+		CHOOSE_MAPS(orig2new_map, new2orig_map);
+
+		ret = ipa_nat_map_add(orig2new_map, *rule_hdl, *rule_hdl);
+
+		if ( ret == 0 )
+		{
+			ret = ipa_nat_map_add(new2orig_map, *rule_hdl, *rule_hdl);
+		}
+	}
+	else
+	{
+		if ( nati_obj_ptr->curr_state == NATI_STATE_HYBRID )
+		{
+			/*
+			 * In hybrid mode, we always start in SRAM...hence
+			 * NATI_STATE_HYBRID implies SRAM.  The rule addition
+			 * above did not work, meaning the SRAM table is full,
+			 * hence let's jump to DDR...
+			 *
+			 * The following will focus us on DDR and cause the copy
+			 * of data from SRAM to DDR.
+			 */
+			IPAINFO("Add of rule failed...attempting table switch\n");
+
+			ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_TBL_SWITCH, 0);
+
+			if ( ret == 0 )
+			{
+				SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID_DDR);
+
+				/*
+				 * Now add the rule to DDR...
+				 */
+				ret = ipa_nati_statemach(nati_obj_ptr, trigger, arb_data_ptr);
+			}
+		}
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smDelRuleHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the deletion of a NAT rule from either
+ *   the SRAM or DDR based table.
+ *
+ *   *** !!! HOWEVER *** REMEMBER !!! ***
+ *
+ *   We're here because we're in a HYBRID state...with the potential
+ *   moving between SRAM and DDR.  THIS HAS IMLICATIONS AS IT RELATES
+ *   TO RULE MAPPING.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smDelRuleHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	rule_del_args* args = (rule_del_args*) arb_data_ptr;
+
+	uint32_t tbl_hdl       = args->tbl_hdl;
+	uint32_t orig_rule_hdl = args->rule_hdl;
+
+	uint32_t new_rule_hdl;
+
+	uint32_t orig2new_map,  new2orig_map;
+
+	int      ret;
+
+	IPADBG("In\n");
+
+	CHOOSE_MAPS(orig2new_map, new2orig_map);
+
+	/*
+	 * The rule_hdl is used to find a rule in the nat table.  It is,
+	 * in effect, an index into the table.  The applcation above us
+	 * retains it for future manipulation of the rule in the table.
+	 *
+	 * In hybrid mode, a rule can and will move between SRAM and DDR.
+	 * Because of this, its handle will change.  The application has
+	 * only the original handle and doesn't know of the new handle.  A
+	 * mapping, used in hybrid mode, will maintain a relationship
+	 * between the original handle and the rule's current real
+	 * handle...
+	 *
+	 * To help you get a mindset of how this is done:
+	 *
+	 *   The original handle will map (point) to the new and new
+	 *   handle will map (point) back to original.
+	 *
+	 * NOTE WELL: There are two sets of maps.  One for each memory
+	 *            type...
+	 */
+	ret = ipa_nat_map_del(orig2new_map, orig_rule_hdl, &new_rule_hdl);
+
+	if ( ret == 0 )
+	{
+		rule_del_args new_args = {
+			.tbl_hdl =
+			  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+			    tbl_hdl :
+			    nati_obj_ptr->ddr_tbl_hdl,
+			.rule_hdl = new_rule_hdl,
+		};
+
+		IPADBG("orig_rule_hdl(0x%08X) -> new_rule_hdl(0x%08X)\n",
+			   orig_rule_hdl, new_rule_hdl);
+
+		ipa_nat_map_del(new2orig_map, new_rule_hdl, NULL);
+
+		ret = _smDelRuleFromTbl(nati_obj_ptr, trigger, (void*) &new_args);
+
+		if ( ret == 0 && nati_obj_ptr->curr_state == NATI_STATE_HYBRID_DDR )
+		{
+			/*
+			 * We need to check when/if we can go back to SRAM.
+			 *
+			 * How/why can we go back?
+			 *
+			 *   Given enough deletions, and when we get to a user
+			 *   defined threshold (ie. a percentage of what SRAM can
+			 *   hold), we can pop back to using SRAM.
+			 */
+			uint32_t* cnt_ptr = CHOOSE_CNTR();
+
+			if ( *cnt_ptr <= nati_obj_ptr->back_to_sram_thresh )
+			{
+				/*
+				 * The following will focus us on SRAM and cause the copy
+				 * of data from DDR to SRAM.
+				 */
+				IPAINFO("Switch back to SRAM threshold has been reached -> "
+						"Total rules in DDR(%u) <= SRAM THRESH(%u)\n",
+						*cnt_ptr,
+						nati_obj_ptr->back_to_sram_thresh);
+
+				ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_TBL_SWITCH, 0);
+
+				if ( ret == 0 )
+				{
+					SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID);
+				}
+				else
+				{
+					/*
+					 * The following will force us stay in DDR for
+					 * now, but the next delete will trigger the
+					 * switch logic above to run again...perhaps it
+					 * will work then.
+					 */
+					ret = 0;
+				}
+			}
+		}
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smGoToDdr
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the IPA to use the DDR based NAT
+ *   table...
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smGoToDdr(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	int ret;
+
+	UNUSED(trigger);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("In\n");
+
+	ret = ipa_NATI_post_ipv4_init_cmd(nati_obj_ptr->ddr_tbl_hdl);
+
+	if ( ret == 0 )
+	{
+		SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID_DDR);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smGoToSram
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause the IPA to use the SRAM based NAT
+ *   table...
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smGoToSram(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	int ret;
+
+	UNUSED(trigger);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("In\n");
+
+	ret = ipa_NATI_post_ipv4_init_cmd(nati_obj_ptr->sram_tbl_hdl);
+
+	if ( ret == 0 )
+	{
+		SET_NATIOBJ_STATE(nati_obj_ptr, NATI_STATE_HYBRID);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smSwitchFromDdrToSram
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause a copy of the DDR table to SRAM and then
+ *   will make the IPA use the SRAM...
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smSwitchFromDdrToSram(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	nati_switch_stats* sw_stats_ptr = CHOOSE_SW_STATS();
+
+	uint32_t*          cnt_ptr      = CHOOSE_CNTR();
+
+	ipa_nati_tbl_stats nat_stats, idx_stats;
+
+	const char*        mem_type;
+
+	uint64_t           start, stop;
+
+	int stats_ret, ret;
+
+	UNUSED(cnt_ptr);
+	UNUSED(trigger);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("In\n");
+
+	stats_ret = ipa_NATI_ipv4_tbl_stats(
+		nati_obj_ptr->ddr_tbl_hdl, &nat_stats, &idx_stats);
+
+	currTimeAs(TimeAsNanSecs, &start);
+
+	/*
+	 * First, switch focus to SRAM...
+	 */
+	ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_SRAM, 0);
+
+	if ( ret == 0 )
+	{
+		/*
+		 * Clear destination counter...
+		 */
+		nati_obj_ptr->tot_rules_in_table[SRAM_SUB] = 0;
+
+		/*
+		 * Clear destination SRAM maps...
+		 */
+		ipa_nat_map_clear(nati_obj.map_pairs[SRAM_SUB].orig2new_map);
+		ipa_nat_map_clear(nati_obj.map_pairs[SRAM_SUB].new2orig_map);
+
+		/*
+		 * Now copy DDR's content to SRAM...
+		 */
+		ret = ipa_nati_copy_ipv4_tbl(
+			nati_obj_ptr->ddr_tbl_hdl,
+			nati_obj_ptr->sram_tbl_hdl,
+			migrate_rule);
+
+		currTimeAs(TimeAsNanSecs, &stop);
+
+		if ( ret == 0 )
+		{
+			sw_stats_ptr->pass += 1;
+
+			IPADBG("Transistion from DDR to SRAM took %f microseconds\n",
+				   (float) (stop - start) / 1000.0);
+		}
+		else
+		{
+			sw_stats_ptr->fail += 1;
+		}
+
+		IPADBG("Transistion pass/fail counts (DDR to SRAM) PASS: %u FAIL: %u\n",
+			   sw_stats_ptr->pass,
+			   sw_stats_ptr->fail);
+
+		if ( stats_ret == 0 )
+		{
+			mem_type = ipa3_nat_mem_in_as_str(nat_stats.nmi);
+
+			/*
+			 * NAT table stats...
+			 */
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT table of size (%u) or (%f) percent\n",
+				   *cnt_ptr,
+				   mem_type,
+				   nat_stats.tot_ents,
+				   ((float) *cnt_ptr / (float) nat_stats.tot_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT BASE table of size (%u) or (%f) percent\n",
+				   nat_stats.tot_base_ents_filled,
+				   mem_type,
+				   nat_stats.tot_base_ents,
+				   ((float) nat_stats.tot_base_ents_filled /
+					(float) nat_stats.tot_base_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT EXPN table of size (%u) or (%f) percent\n",
+				   nat_stats.tot_expn_ents_filled,
+				   mem_type,
+				   nat_stats.tot_expn_ents,
+				   ((float) nat_stats.tot_expn_ents_filled /
+					(float) nat_stats.tot_expn_ents) * 100.0);
+
+			IPADBG("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+				   mem_type,
+				   nat_stats.tot_chains,
+				   nat_stats.min_chain_len,
+				   nat_stats.max_chain_len,
+				   nat_stats.avg_chain_len);
+
+			/*
+			 * INDEX table stats...
+			 */
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX table of size (%u) or (%f) percent\n",
+				   *cnt_ptr,
+				   mem_type,
+				   idx_stats.tot_ents,
+				   ((float) *cnt_ptr / (float) idx_stats.tot_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX BASE table of size (%u) or (%f) percent\n",
+				   idx_stats.tot_base_ents_filled,
+				   mem_type,
+				   idx_stats.tot_base_ents,
+				   ((float) idx_stats.tot_base_ents_filled /
+					(float) idx_stats.tot_base_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX EXPN table of size (%u) or (%f) percent\n",
+				   idx_stats.tot_expn_ents_filled,
+				   mem_type,
+				   idx_stats.tot_expn_ents,
+				   ((float) idx_stats.tot_expn_ents_filled /
+					(float) idx_stats.tot_expn_ents) * 100.0);
+
+			IPADBG("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+				   mem_type,
+				   idx_stats.tot_chains,
+				   idx_stats.min_chain_len,
+				   idx_stats.max_chain_len,
+				   idx_stats.avg_chain_len);
+
+			mem_type++; /* to avoid compiler usage warning */
+		}
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smSwitchFromSramToDdr
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following will cause a copy of the SRAM table to DDR and then
+ *   will make the IPA use the DDR...
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smSwitchFromSramToDdr(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	nati_switch_stats* sw_stats_ptr = CHOOSE_SW_STATS();
+
+	uint32_t*          cnt_ptr      = CHOOSE_CNTR();
+
+	ipa_nati_tbl_stats nat_stats, idx_stats;
+
+	const char*        mem_type;
+
+	uint64_t           start, stop;
+
+	int stats_ret, ret;
+
+	UNUSED(cnt_ptr);
+	UNUSED(trigger);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("In\n");
+
+	stats_ret = ipa_NATI_ipv4_tbl_stats(
+		nati_obj_ptr->sram_tbl_hdl, &nat_stats, &idx_stats);
+
+	currTimeAs(TimeAsNanSecs, &start);
+
+	/*
+	 * First, switch focus to DDR...
+	 */
+	ret = ipa_nati_statemach(nati_obj_ptr, NATI_TRIG_GOTO_DDR, 0);
+
+	if ( ret == 0 )
+	{
+		/*
+		 * Clear destination counter...
+		 */
+		nati_obj_ptr->tot_rules_in_table[DDR_SUB] = 0;
+
+		/*
+		 * Clear destination DDR maps...
+		 */
+		ipa_nat_map_clear(nati_obj.map_pairs[DDR_SUB].orig2new_map);
+		ipa_nat_map_clear(nati_obj.map_pairs[DDR_SUB].new2orig_map);
+
+		/*
+		 * Now copy SRAM's content to DDR...
+		 */
+		ret = ipa_nati_copy_ipv4_tbl(
+			nati_obj_ptr->sram_tbl_hdl,
+			nati_obj_ptr->ddr_tbl_hdl,
+			migrate_rule);
+
+		currTimeAs(TimeAsNanSecs, &stop);
+
+		if ( ret == 0 )
+		{
+			sw_stats_ptr->pass += 1;
+
+			IPADBG("Transistion from SRAM to DDR took %f microseconds\n",
+				   (float) (stop - start) / 1000.0);
+		}
+		else
+		{
+			sw_stats_ptr->fail += 1;
+		}
+
+		IPADBG("Transistion pass/fail counts (SRAM to DDR) PASS: %u FAIL: %u\n",
+			   sw_stats_ptr->pass,
+			   sw_stats_ptr->fail);
+
+		if ( stats_ret == 0 )
+		{
+			mem_type = ipa3_nat_mem_in_as_str(nat_stats.nmi);
+
+			/*
+			 * NAT table stats...
+			 */
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT table of size (%u) or (%f) percent\n",
+				   *cnt_ptr,
+				   mem_type,
+				   nat_stats.tot_ents,
+				   ((float) *cnt_ptr / (float) nat_stats.tot_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT BASE table of size (%u) or (%f) percent\n",
+				   nat_stats.tot_base_ents_filled,
+				   mem_type,
+				   nat_stats.tot_base_ents,
+				   ((float) nat_stats.tot_base_ents_filled /
+					(float) nat_stats.tot_base_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "NAT EXPN table of size (%u) or (%f) percent\n",
+				   nat_stats.tot_expn_ents_filled,
+				   mem_type,
+				   nat_stats.tot_expn_ents,
+				   ((float) nat_stats.tot_expn_ents_filled /
+					(float) nat_stats.tot_expn_ents) * 100.0);
+
+			IPADBG("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+				   mem_type,
+				   nat_stats.tot_chains,
+				   nat_stats.min_chain_len,
+				   nat_stats.max_chain_len,
+				   nat_stats.avg_chain_len);
+
+			/*
+			 * INDEX table stats...
+			 */
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX table of size (%u) or (%f) percent\n",
+				   *cnt_ptr,
+				   mem_type,
+				   idx_stats.tot_ents,
+				   ((float) *cnt_ptr / (float) idx_stats.tot_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX BASE table of size (%u) or (%f) percent\n",
+				   idx_stats.tot_base_ents_filled,
+				   mem_type,
+				   idx_stats.tot_base_ents,
+				   ((float) idx_stats.tot_base_ents_filled /
+					(float) idx_stats.tot_base_ents) * 100.0);
+
+			IPADBG("Able to add (%u) records to %s "
+				   "IDX EXPN table of size (%u) or (%f) percent\n",
+				   idx_stats.tot_expn_ents_filled,
+				   mem_type,
+				   idx_stats.tot_expn_ents,
+				   ((float) idx_stats.tot_expn_ents_filled /
+					(float) idx_stats.tot_expn_ents) * 100.0);
+
+			IPADBG("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+				   mem_type,
+				   idx_stats.tot_chains,
+				   idx_stats.min_chain_len,
+				   idx_stats.max_chain_len,
+				   idx_stats.avg_chain_len);
+
+			mem_type++; /* to avoid compiler usage warning */
+		}
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smGetTmStmp
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   Retrieve rule's timestamp from NAT table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smGetTmStmp(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	timestap_query_args* args  = (timestap_query_args*) arb_data_ptr;
+
+	uint32_t  tbl_hdl    = args->tbl_hdl;
+	uint32_t  rule_hdl   = args->rule_hdl;
+	uint32_t* time_stamp = args->time_stamp;
+
+	int ret;
+
+	UNUSED(nati_obj_ptr);
+	UNUSED(trigger);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("In\n");
+
+	IPADBG("tbl_hdl(0x%08X) rule_hdl(%u) time_stamp_ptr(%p)\n",
+		   tbl_hdl, rule_hdl, time_stamp);
+
+	ret = ipa_NATI_query_timestamp(tbl_hdl, rule_hdl, time_stamp);
+
+	if ( ret == 0 )
+	{
+		IPADBG("time_stamp(0x%08X)\n", *time_stamp);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smGetTmStmpHybrid
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   Retrieve rule's timestamp from the state approriate NAT table.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smGetTmStmpHybrid(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	timestap_query_args* args  = (timestap_query_args*) arb_data_ptr;
+
+	uint32_t  tbl_hdl       = args->tbl_hdl;
+	uint32_t  orig_rule_hdl = args->rule_hdl;
+	uint32_t* time_stamp    = args->time_stamp;
+
+	uint32_t  new_rule_hdl;
+
+	uint32_t  orig2new_map, new2orig_map;
+
+	int       ret;
+
+	IPADBG("In\n");
+
+	CHOOSE_MAPS(orig2new_map, new2orig_map);
+
+	new2orig_map++; /* to avoid compiler usage warning */
+
+	ret = ipa_nat_map_find(orig2new_map, orig_rule_hdl, &new_rule_hdl);
+
+	if ( ret == 0 )
+	{
+		timestap_query_args new_args = {
+			.tbl_hdl =
+			  (nati_obj_ptr->curr_state == NATI_STATE_HYBRID) ?
+			    tbl_hdl :
+			    nati_obj_ptr->ddr_tbl_hdl,
+			.rule_hdl   = new_rule_hdl,
+			.time_stamp = time_stamp,
+		};
+
+		ret = _smGetTmStmp(nati_obj_ptr, trigger, (void*) &new_args);
+	}
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/******************************************************************************/
+/*
+ * The following table relates a nati object's state and a transition
+ * trigger to a callback...
+ */
+static nati_statemach_tuple
+_state_mach_tbl[NATI_STATE_LAST+1][NATI_TRIG_LAST+1] =
+{
+	{
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_ADD_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_DEL_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_CLR_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_WLK_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_TBL_STATS,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_ADD_RULE,   _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_DEL_RULE,   _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_TBL_SWITCH, _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_GOTO_DDR,   _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_GOTO_SRAM,  _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_GET_TSTAMP, _smUndef ),
+		SM_ROW( NATI_STATE_NULL,       NATI_TRIG_LAST,       _smUndef ),
+	},
+
+	{
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_ADD_TABLE,  _smAddDdrTbl),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_DEL_TABLE,  _smDelTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_CLR_TABLE,  _smClrTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_WLK_TABLE,  _smWalkTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_TBL_STATS,  _smStatTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_ADD_RULE,   _smAddRuleToTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_DEL_RULE,   _smDelRuleFromTbl ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_TBL_SWITCH, _smUndef ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_GOTO_DDR,   _smUndef ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_GOTO_SRAM,  _smUndef ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_GET_TSTAMP, _smGetTmStmp ),
+		SM_ROW( NATI_STATE_DDR_ONLY,   NATI_TRIG_LAST,       _smUndef ),
+	},
+
+	{
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_ADD_TABLE,  _smAddSramTbl),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_DEL_TABLE,  _smDelTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_CLR_TABLE,  _smClrTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_WLK_TABLE,  _smWalkTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_TBL_STATS,  _smStatTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_ADD_RULE,   _smAddRuleToTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_DEL_RULE,   _smDelRuleFromTbl ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_TBL_SWITCH, _smUndef ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_GOTO_DDR,   _smUndef ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_GOTO_SRAM,  _smUndef ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_GET_TSTAMP, _smGetTmStmp ),
+		SM_ROW( NATI_STATE_SRAM_ONLY,  NATI_TRIG_LAST,       _smUndef ),
+	},
+
+	{
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_ADD_TABLE,  _smAddSramAndDdrTbl ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_DEL_TABLE,  _smDelSramAndDdrTbl ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_CLR_TABLE,  _smClrTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_WLK_TABLE,  _smWalkTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_TBL_STATS,  _smStatTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_ADD_RULE,   _smAddRuleHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_DEL_RULE,   _smDelRuleHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_TBL_SWITCH, _smSwitchFromSramToDdr ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_GOTO_DDR,   _smGoToDdr ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_GOTO_SRAM,  _smGoToSram ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_GET_TSTAMP, _smGetTmStmpHybrid ),
+		SM_ROW( NATI_STATE_HYBRID,     NATI_TRIG_LAST,       _smUndef ),
+	},
+
+	{
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_ADD_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_DEL_TABLE,  _smDelSramAndDdrTbl ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_CLR_TABLE,  _smClrTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_WLK_TABLE,  _smWalkTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_TBL_STATS,  _smStatTblHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_ADD_RULE,   _smAddRuleHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_DEL_RULE,   _smDelRuleHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_TBL_SWITCH, _smSwitchFromDdrToSram ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GOTO_DDR,   _smGoToDdr ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GOTO_SRAM,  _smGoToSram ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_GET_TSTAMP, _smGetTmStmpHybrid ),
+		SM_ROW( NATI_STATE_HYBRID_DDR, NATI_TRIG_LAST,       _smUndef ),
+	},
+
+	{
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_NULL,       _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_ADD_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_DEL_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_CLR_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_WLK_TABLE,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_TBL_STATS,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_ADD_RULE,   _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_DEL_RULE,   _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_TBL_SWITCH, _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_GOTO_DDR,   _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_GOTO_SRAM,  _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_GET_TSTAMP, _smUndef ),
+		SM_ROW( NATI_STATE_LAST,       NATI_TRIG_LAST,       _smUndef ),
+	},
+};
+
+/******************************************************************************/
+/*
+ * FUNCTION: _smUndef
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Whatever you like
+ *
+ * DESCRIPTION:
+ *
+ *   The following does nothing, except report an undefined action for
+ *   a particular state/trigger combo...
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+static int _smUndef(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	UNUSED(arb_data_ptr);
+
+	IPAERR("CB(%s): undefined action for STATE(%s) with TRIGGER(%s)\n",
+		   _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb_as_str,
+		   _state_mach_tbl[nati_obj_ptr->curr_state][trigger].state_as_str,
+		   _state_mach_tbl[nati_obj_ptr->curr_state][trigger].trigger_as_str);
+
+	return -1;
+}
+
+/******************************************************************************/
+/*
+ * FUNCTION: ipa_nati_statemach
+ *
+ * PARAMS:
+ *
+ *   nati_obj_ptr (IN) A pointer to an initialized nati object
+ *
+ *   trigger      (IN) The trigger to run through the state machine
+ *
+ *   arb_data_ptr (IN) Anything you like.  Will be passed, untouched,
+ *                     to the state/trigger callback function.
+ *
+ * DESCRIPTION:
+ *
+ *   This function allows a nati object and a trigger to be run
+ *   through the state machine.
+ *
+ * RETURNS:
+ *
+ *   zero on success, otherwise non-zero
+ */
+int ipa_nati_statemach(
+	ipa_nati_obj*    nati_obj_ptr,
+	ipa_nati_trigger trigger,
+	void*            arb_data_ptr )
+{
+	const char* ss_ptr  = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].state_as_str;
+	const char* ts_ptr  = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].trigger_as_str;
+	const char* cbs_ptr = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb_as_str;
+
+	bool vote = false;
+
+	int ret;
+
+	UNUSED(ss_ptr);
+	UNUSED(ts_ptr);
+	UNUSED(cbs_ptr);
+
+	IPADBG("In\n");
+
+	if ( ! nat_mutex_init )
+	{
+		ret = mutex_init();
+
+		if ( ret != 0 )
+		{
+			goto bail;
+		}
+	}
+
+	if ( pthread_mutex_lock(&nat_mutex) )
+	{
+		IPAERR("Unable to lock the nat mutex\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	IPADBG("STATE(%s) TRIGGER(%s) CB(%s)\n", ss_ptr, ts_ptr, cbs_ptr);
+
+	vote = VOTE_REQUIRED(trigger);
+
+	if ( vote )
+	{
+		IPADBG("Voting clock on STATE(%s) TRIGGER(%s)\n",
+			   ss_ptr, ts_ptr);
+
+		if ( ipa_nat_vote_clock(IPA_APP_CLK_VOTE) != 0 )
+		{
+			IPAERR("Voting failed STATE(%s) TRIGGER(%s)\n", ss_ptr, ts_ptr);
+			ret = -EINVAL;
+			goto unlock;
+		}
+	}
+
+	ret = _state_mach_tbl[nati_obj_ptr->curr_state][trigger].sm_cb(
+		nati_obj_ptr, trigger, arb_data_ptr);
+
+	if ( vote )
+	{
+		IPADBG("Voting clock off STATE(%s) TRIGGER(%s)\n",
+			   ss_ptr, ts_ptr);
+
+		if ( ipa_nat_vote_clock(IPA_APP_CLK_DEVOTE) != 0 )
+		{
+			IPAERR("Voting failed STATE(%s) TRIGGER(%s)\n", ss_ptr, ts_ptr);
+		}
+	}
+
+unlock:
+	if ( pthread_mutex_unlock(&nat_mutex) )
+	{
+		IPAERR("Unable to unlock the nat mutex\n");
+		ret = (ret) ? ret : -EPERM;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
diff --git a/ipanat/src/ipa_nat_utils.c b/ipanat/src/ipa_nat_utils.c
new file mode 100644
index 0000000..e10207c
--- /dev/null
+++ b/ipanat/src/ipa_nat_utils.c
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2013, 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "ipa_nat_utils.h"
+#include <sys/ioctl.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <stdlib.h>
+
+#define IPA_MAX_MSG_LEN 4096
+
+static char dbg_buff[IPA_MAX_MSG_LEN];
+
+#if !defined(MSM_IPA_TESTS) && !defined(USE_GLIB) && !defined(FEATURE_IPA_ANDROID)
+size_t strlcpy(char* dst, const char* src, size_t size)
+{
+	size_t i;
+
+	if (size == 0)
+		return strlen(src);
+
+	for (i = 0; i < (size - 1) && src[i] != '\0'; ++i)
+		dst[i] = src[i];
+
+	dst[i] = '\0';
+
+	return i + strlen(src + i);
+}
+#endif
+
+ipa_descriptor* ipa_descriptor_open(void)
+{
+	ipa_descriptor* desc_ptr;
+	int res = 0;
+
+	IPADBG("In\n");
+
+	desc_ptr = calloc(1, sizeof(ipa_descriptor));
+
+	if ( desc_ptr == NULL )
+	{
+		IPAERR("Unable to allocate ipa_descriptor\n");
+		goto bail;
+	}
+
+	desc_ptr->fd = open(IPA_DEV_NAME, O_RDONLY);
+
+	if (desc_ptr->fd < 0)
+	{
+		IPAERR("Unable to open ipa device\n");
+		goto free;
+	}
+
+	res = ioctl(desc_ptr->fd, IPA_IOC_GET_HW_VERSION, &desc_ptr->ver);
+
+	if (res == 0)
+	{
+		IPADBG("IPA version is %d\n", desc_ptr->ver);
+	}
+	else
+	{
+		IPAERR("Unable to get IPA version. Error %d\n", res);
+		desc_ptr->ver = IPA_HW_None;
+	}
+
+	goto bail;
+
+free:
+	free(desc_ptr);
+	desc_ptr = NULL;
+
+bail:
+	IPADBG("Out\n");
+
+	return desc_ptr;
+}
+
+void ipa_descriptor_close(
+	ipa_descriptor* desc_ptr)
+{
+	IPADBG("In\n");
+
+	if ( desc_ptr )
+	{
+		if ( desc_ptr->fd >= 0)
+		{
+			close(desc_ptr->fd);
+		}
+		free(desc_ptr);
+	}
+
+	IPADBG("Out\n");
+}
+
+void ipa_read_debug_info(
+	const char* debug_file_path)
+{
+	size_t result;
+	FILE* debug_file;
+
+	debug_file = fopen(debug_file_path, "r");
+	if (debug_file == NULL)
+	{
+		printf("Failed to open %s\n", debug_file_path);
+		return;
+	}
+
+	for (;;)
+	{
+		result = fread(dbg_buff, sizeof(char), IPA_MAX_MSG_LEN, debug_file);
+		if (!result)
+			break;
+
+		if (result < IPA_MAX_MSG_LEN)
+		{
+			if (ferror(debug_file))
+			{
+				printf("Failed to read from %s\n", debug_file_path);
+				break;
+			}
+
+			dbg_buff[result] = '\0';
+		}
+		else
+		{
+			dbg_buff[IPA_MAX_MSG_LEN - 1] = '\0';
+		}
+
+
+		printf("%s", dbg_buff);
+
+		if (feof(debug_file))
+			break;
+	}
+	fclose(debug_file);
+}
+
+void log_nat_message(char *msg)
+{
+	UNUSED(msg);
+
+	return;
+}
+
+int currTimeAs(
+	TimeAs_t  timeAs,
+	uint64_t* valPtr )
+{
+	struct timespec timeSpec;
+
+	int ret = 0;
+
+	if ( ! VALID_TIMEAS(timeAs) || ! valPtr )
+	{
+		IPAERR("Bad arg: timeAs (%u) and/or valPtr (%p)\n",
+			   timeAs, valPtr );
+		ret = -1;
+		goto bail;
+	}
+
+	memset(&timeSpec, 0, sizeof(timeSpec));
+
+	if ( clock_gettime(CLOCK_MONOTONIC, &timeSpec) != 0 )
+	{
+		IPAERR("Can't get system clock time\n" );
+		ret = -1;
+		goto bail;
+	}
+
+	switch( timeAs )
+	{
+	case TimeAsNanSecs:
+		*valPtr =
+			(uint64_t) (SECS2NanSECS((uint64_t) timeSpec.tv_sec) +
+						((uint64_t) timeSpec.tv_nsec));
+		break;
+	case TimeAsMicSecs:
+		*valPtr =
+			(uint64_t) (SECS2MicSECS((uint64_t) timeSpec.tv_sec) +
+						((uint64_t) timeSpec.tv_nsec / 1000));
+		break;
+	case TimeAsMilSecs:
+		*valPtr =
+			(uint64_t) (SECS2MilSECS((uint64_t) timeSpec.tv_sec) +
+						((uint64_t) timeSpec.tv_nsec / 1000000));
+		break;
+	}
+
+bail:
+	return ret;
+}
diff --git a/ipanat/src/ipa_table.c b/ipanat/src/ipa_table.c
new file mode 100644
index 0000000..f73dff0
--- /dev/null
+++ b/ipanat/src/ipa_table.c
@@ -0,0 +1,1344 @@
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "ipa_table.h"
+#include "ipa_nat_utils.h"
+
+#include <errno.h>
+
+#define IPA_BASE_TABLE_PERCENTAGE       .8
+#define IPA_EXPANSION_TABLE_PERCENTAGE  .2
+
+#define IPA_BASE_TABLE_PCNT_4SRAM      1.00
+#define IPA_EXPANSION_TABLE_PCNT_4SRAM 0.43
+
+/*
+ * The table number of entries is limited by Entry ID structure
+ * above. The base table max entries is limited by index into table
+ * bits number.
+ *
+ * The table max ents number is: (base table max ents / base table percentage)
+ *
+ * IPA_TABLE_MAX_ENTRIES = 2^(index into table) / IPA_BASE_TABLE_PERCENTAGE
+ */
+
+static int InsertHead(
+	ipa_table*                  table,
+	void*                       rec_ptr,   /* empty record in table */
+	uint16_t                    rec_index, /* index of record above */
+	void*                       user_data,
+	struct ipa_ioc_nat_dma_cmd* cmd );
+
+static int InsertTail(
+	ipa_table*                  table,
+	void*                       rec_ptr,       /* occupied record at index below */
+	uint16_t*                   rec_index_ptr, /* pointer to index of record above */
+	void*                       user_data,
+	struct ipa_ioc_nat_dma_cmd* cmd );
+
+static uint16_t MakeEntryHdl(
+	ipa_table* tbl,
+	uint16_t   tbl_entry );
+
+static int FindExpnTblFreeEntry(
+	ipa_table* table,
+	void**     free_entry,
+	uint16_t*  entry_index );
+
+static int Get2PowerTightUpperBound(
+	uint16_t num);
+
+static int GetEvenTightUpperBound(
+	uint16_t num);
+
+void ipa_table_init(
+	ipa_table*           table,
+	const char*          table_name,
+	enum ipa3_nat_mem_in nmi,
+	int                  entry_size,
+	void*                meta,
+	int                  meta_entry_size,
+	ipa_table_entry_interface* entry_interface )
+{
+	IPADBG("In\n");
+
+	memset(table, 0, sizeof(ipa_table));
+
+	strlcpy(table->name, table_name, IPA_RESOURCE_NAME_MAX);
+
+	table->nmi             = nmi;
+	table->entry_size      = entry_size;
+	table->meta            = meta;
+	table->meta_entry_size = meta_entry_size;
+	table->entry_interface = entry_interface;
+
+	IPADBG("Table %s with entry size %d has been initialized\n",
+		   table->name, table->entry_size);
+
+	IPADBG("Out\n");
+}
+
+int ipa_table_calculate_entries_num(
+	ipa_table*           table,
+	uint16_t             number_of_entries,
+	enum ipa3_nat_mem_in nmi)
+{
+	uint16_t table_entries, expn_table_entries;
+	float btp, etp;
+	int result = 0;
+
+	IPADBG("In\n");
+
+	if (number_of_entries > IPA_TABLE_MAX_ENTRIES)
+	{
+		IPAERR("Required number of %s entries %d exceeds the maximum %d\n",
+			table->name, number_of_entries, IPA_TABLE_MAX_ENTRIES);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if ( nmi == IPA_NAT_MEM_IN_SRAM )
+	{
+		btp = IPA_BASE_TABLE_PCNT_4SRAM;
+		etp = IPA_EXPANSION_TABLE_PCNT_4SRAM;
+	}
+	else
+	{
+		btp = IPA_BASE_TABLE_PERCENTAGE;
+		etp = IPA_EXPANSION_TABLE_PERCENTAGE;
+	}
+
+	table_entries      = Get2PowerTightUpperBound(number_of_entries * btp);
+	expn_table_entries = GetEvenTightUpperBound(number_of_entries * etp);
+
+	table->tot_tbl_ents = table_entries + expn_table_entries;
+
+	if ( table->tot_tbl_ents > IPA_TABLE_MAX_ENTRIES )
+	{
+		IPAERR("Required number of %s entries %u "
+			   "(user provided %u) exceeds the maximum %u\n",
+			   table->name,
+			   table->tot_tbl_ents,
+			   number_of_entries,
+			   IPA_TABLE_MAX_ENTRIES);
+		result = -EINVAL;
+		goto bail;
+	}
+
+	table->table_entries      = table_entries;
+	table->expn_table_entries = expn_table_entries;
+
+	IPADBG("Num of %s entries:%u expn entries:%u total entries:%u\n",
+		   table->name,
+		   table->table_entries,
+		   table->expn_table_entries,
+		   table->tot_tbl_ents);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+int ipa_table_calculate_size(ipa_table* table)
+{
+	int size = table->entry_size * (table->table_entries + table->expn_table_entries);
+
+	IPADBG("In\n");
+
+	IPADBG("%s size: %d\n", table->name, size);
+
+	IPADBG("Out\n");
+
+	return size;
+}
+
+uint8_t* ipa_table_calculate_addresses(
+	ipa_table* table,
+	uint8_t*   base_addr)
+{
+	uint8_t* result = NULL;
+
+	IPADBG("In\n");
+
+	table->table_addr = base_addr;
+	table->expn_table_addr =
+		table->table_addr + table->entry_size * table->table_entries;
+
+	IPADBG("Table %s addresses: table_addr %pK expn_table_addr %pK\n",
+		   table->name, table->table_addr, table->expn_table_addr);
+
+	result = table->expn_table_addr + table->entry_size * table->expn_table_entries;
+
+	IPADBG("Out\n");
+
+	return result;
+}
+
+void ipa_table_reset(
+	ipa_table* table)
+{
+	uint32_t i, tot;
+
+	IPADBG("In\n");
+
+	IPADBG("memset %s table to 0, %pK\n", table->name, table->table_addr);
+	tot = table->entry_size * table->table_entries;
+	for ( i = 0; i < tot; i++ )
+	{
+	  table->table_addr[i] = '\0';
+	}
+
+	IPADBG("memset %s expn table to 0, %pK\n", table->name, table->expn_table_addr);
+	tot = table->entry_size * table->expn_table_entries;
+	for ( i = 0; i < tot; i++ )
+	{
+	  table->expn_table_addr[i] = '\0';
+	}
+
+	IPADBG("Out\n");
+}
+
+int ipa_table_add_entry(
+	ipa_table* table,
+	void*      user_data,
+	uint16_t*  rec_index_ptr,
+	uint32_t*  rule_hdl,
+	struct ipa_ioc_nat_dma_cmd* cmd )
+{
+	void* rec_ptr;
+	int ret = 0, occupied;
+
+	IPADBG("In\n");
+
+	rec_ptr = GOTO_REC(table, *rec_index_ptr);
+
+	/*
+	 * Check whether there is any collision
+	 */
+	occupied = table->entry_interface->entry_is_valid(rec_ptr);
+
+	if ( ! occupied )
+	{
+		IPADBG("Collision free (in %s) ... found open slot\n", table->name);
+		ret = InsertHead(table, rec_ptr, *rec_index_ptr, user_data, cmd);
+	}
+	else
+	{
+		IPADBG("Collision (in %s) ... will probe for open slot\n", table->name);
+		ret = InsertTail(table, rec_ptr, rec_index_ptr, user_data, cmd);
+	}
+
+	if (ret)
+		goto bail;
+
+	IPADBG("New Entry Index %u in %s\n", *rec_index_ptr, table->name);
+
+	if ( rule_hdl ) {
+		*rule_hdl = MakeEntryHdl(table, *rec_index_ptr);
+		IPADBG("rule_hdl value(%u)\n", *rule_hdl);
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+void ipa_table_create_delete_command(
+	ipa_table* table,
+	struct ipa_ioc_nat_dma_cmd* cmd,
+	ipa_table_iterator* iterator)
+{
+	IPADBG("In\n");
+
+	IPADBG("Delete rule at index(0x%04X) in %s\n",
+		   iterator->curr_index,
+		   table->name);
+
+	if ( ! VALID_INDEX(iterator->prev_index) )
+	{
+		/*
+		 * The following two assigns (ie. the defaults), will cause
+		 * the enabled bit in the record to be set to 0.
+		 */
+		uint16_t      data = 0;
+		dma_help_type ht   = HELP_UPDATE_HEAD;
+
+		if ( VALID_INDEX(iterator->next_index) )
+		{
+			/*
+			 * NOTE WELL HERE:
+			 *
+			 * This record is the first in a chain/list of
+			 * records. Delete means something different in this
+			 * context.
+			 *
+			 * The code below will cause the change of the protocol
+			 * field in the rule record to 0xFF.  It does not set the
+			 * enable bit in the record to 0.  This is done in special
+			 * cases when the record being deleted is the first in a
+			 * list of records.
+			 *
+			 * What does this mean?  It means that the record is
+			 * functionally deleted, but not really deleted.  Why?
+			 * Because the IPA will no longer use it because of the
+			 * bad protocol (ie. functionally deleted), but these
+			 * higher level APIs still see it as "enabled."
+			 *
+			 * This all means that deleted really means two things: 1)
+			 * Not enabled, and 2) Not a valid record.  APIs that walk
+			 * the table...looking for enabled records (ie. the
+			 * enabled bit)....now have to be a bit smarter to see the
+			 * bad protocol as well.
+			 */
+			data = table->entry_interface->
+				entry_get_delete_head_dma_command_data(
+					iterator->curr_entry, iterator->next_entry);
+
+			ht = HELP_DELETE_HEAD;
+		}
+
+		ipa_table_add_dma_cmd(table,
+							  ht,
+							  iterator->curr_entry,
+							  iterator->curr_index,
+							  data,
+							  cmd);
+	}
+	else
+	{
+		ipa_table_add_dma_cmd(table,
+							  HELP_UPDATE_ENTRY,
+							  iterator->prev_entry,
+							  iterator->prev_index,
+							  iterator->next_index,
+							  cmd);
+	}
+
+	IPADBG("Out\n");
+}
+
+void ipa_table_delete_entry(
+	ipa_table*          table,
+	ipa_table_iterator* iterator,
+	uint8_t             is_prev_empty)
+{
+	IPADBG("In\n");
+
+	if ( VALID_INDEX(iterator->next_index) )
+	{
+		/*
+		 * Update the next entry's prev_index field with current
+		 * entry's prev_index
+		 */
+		table->entry_interface->entry_set_prev_index(
+			iterator->next_entry,
+			iterator->next_index,
+			iterator->prev_index,
+			table->meta,
+			table->table_entries);
+	}
+	else if (is_prev_empty)
+	{
+		if (iterator->prev_entry == NULL)
+		{
+			IPAERR("failed to delete of an empty head %d while delete the next entry %d in %s",
+				   iterator->prev_index, iterator->curr_index, table->name);
+		}
+		else
+		{
+			/*
+			 * Delete an empty head rule after the whole tail was deleted
+			 */
+			IPADBG("deleting the dead node %d for %s\n",
+				   iterator->prev_index, table->name);
+
+			memset(iterator->prev_entry, 0, table->entry_size);
+
+			--table->cur_tbl_cnt;
+		}
+	}
+
+	ipa_table_erase_entry(table, iterator->curr_index);
+
+	IPADBG("Out\n");
+}
+
+void ipa_table_erase_entry(
+	ipa_table* table,
+	uint16_t   index)
+{
+	void* entry = GOTO_REC(table, index);
+
+	IPADBG("In\n");
+
+	IPADBG("table(%p) index(%u)\n", table, index);
+
+	memset(entry, 0, table->entry_size);
+
+	if ( index < table->table_entries )
+	{
+		--table->cur_tbl_cnt;
+	}
+	else
+	{
+		--table->cur_expn_tbl_cnt;
+	}
+
+	IPADBG("Out\n");
+}
+
+/**
+ * ipa_table_get_entry() - returns a table entry according to the received entry handle
+ * @table: [in] the table
+ * @entry_handle: [in] entry handle
+ * @entry: [out] the retrieved entry
+ * @entry_index: [out] absolute index of the retrieved entry
+ *
+ * Parse the entry handle to retrieve the entry and its index
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_table_get_entry(
+	ipa_table* table,
+	uint32_t   entry_handle,
+	void**     entry,
+	uint16_t*  entry_index )
+{
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rec_index;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	IPADBG("table(%p) entry_handle(%u) entry(%p) entry_index(%p)\n",
+		   table, entry_handle, entry, entry_index);
+
+	/*
+	 * Retrieve the memory and table type as well as the index
+	 */
+	BREAK_RULE_HDL(table, entry_handle, nmi, is_expn_tbl, rec_index);
+
+	nmi++; /* to eliminate compiler usage warning */
+
+	if ( is_expn_tbl )
+	{
+		IPADBG("Retrieving entry from expansion table\n");
+	}
+	else
+	{
+		IPADBG("Retrieving entry from base (non-expansion) table\n");
+	}
+
+	if ( rec_index >= table->tot_tbl_ents )
+	{
+		IPAERR("The entry handle's record index (%u) exceeds table size (%u)\n",
+			   rec_index, table->tot_tbl_ents);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	*entry = GOTO_REC(table, rec_index);
+
+	if ( entry_index )
+	{
+		*entry_index = rec_index;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+void* ipa_table_get_entry_by_index(
+	ipa_table* table,
+	uint16_t   rec_index )
+{
+	void*    result = NULL;
+
+	IPADBG("In\n");
+
+	IPADBG("table(%p) rec_index(%u)\n",
+		   table,
+		   rec_index);
+
+	if ( ! rec_index || rec_index >= table->tot_tbl_ents )
+	{
+		IPAERR("Invalid record index (%u): It's "
+			   "either zero or exceeds table size (%u)\n",
+			   rec_index, table->tot_tbl_ents);
+		goto bail;
+	}
+
+	result = GOTO_REC(table, rec_index);
+
+bail:
+	IPADBG("Out\n");
+
+	return result;
+}
+
+void ipa_table_dma_cmd_helper_init(
+	ipa_table_dma_cmd_helper* dma_cmd_helper,
+	uint8_t table_indx,
+	ipa_table_dma_type table_type,
+	ipa_table_dma_type expn_table_type,
+	uint32_t offset)
+{
+	IPADBG("In\n");
+
+	dma_cmd_helper->offset = offset;
+	dma_cmd_helper->table_indx = table_indx;
+	dma_cmd_helper->table_type = table_type;
+	dma_cmd_helper->expn_table_type = expn_table_type;
+
+	IPADBG("Out\n");
+}
+
+void ipa_table_dma_cmd_generate(
+	ipa_table_dma_cmd_helper* dma_cmd_helper,
+	uint8_t is_expn,
+	uint32_t entry_offset,
+	uint16_t data,
+	struct ipa_ioc_nat_dma_cmd* cmd)
+{
+	struct ipa_ioc_nat_dma_one* dma = &cmd->dma[cmd->entries];
+
+	IPADBG("In\n");
+
+	IPADBG("is_expn(0x%02X) entry_offset(0x%08X) data(0x%04X)\n",
+		   is_expn, entry_offset, data);
+
+	dma->table_index = dma_cmd_helper->table_indx;
+
+	/*
+	 * DMA parameter base_addr is the table type (see the IPA
+	 * architecture document)
+	 */
+	dma->base_addr =
+		(is_expn) ?
+		dma_cmd_helper->expn_table_type :
+		dma_cmd_helper->table_type;
+
+	dma->offset = dma_cmd_helper->offset + entry_offset;
+
+	dma->data = data;
+
+	IPADBG("dma_entry[%u](table_index(0x%02X) "
+		   "base_addr(0x%02X) data(0x%04X) offset(0x%08X))\n",
+		   cmd->entries,
+		   dma->table_index,
+		   dma->base_addr,
+		   dma->data,
+		   dma->offset);
+
+	cmd->entries++;
+
+	IPADBG("Out\n");
+}
+
+int ipa_table_iterator_init(
+	ipa_table_iterator* iterator,
+	ipa_table*          table,
+	void*               curr_entry,
+	uint16_t            curr_index)
+{
+	int occupied;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	memset(iterator, 0, sizeof(ipa_table_iterator));
+
+	occupied = table->entry_interface->entry_is_valid(curr_entry);
+
+	if ( ! occupied )
+	{
+		IPAERR("Invalid (not enabled) rule %u in %s\n", curr_index, table->name);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	iterator->curr_entry = curr_entry;
+	iterator->curr_index = curr_index;
+
+	iterator->prev_index = table->entry_interface->entry_get_prev_index(
+		curr_entry,
+		curr_index,
+		table->meta,
+		table->table_entries);
+
+	iterator->next_index = table->entry_interface->entry_get_next_index(
+		curr_entry);
+
+	if ( VALID_INDEX(iterator->prev_index) )
+	{
+		iterator->prev_entry = ipa_table_get_entry_by_index(
+			table,
+			iterator->prev_index);
+
+		if ( iterator->prev_entry == NULL )
+		{
+			IPAERR("Failed to retrieve the entry at index 0x%04X for %s\n",
+				   iterator->prev_index, table->name);
+			ret = -EPERM;
+			goto bail;
+		}
+	}
+
+	if ( VALID_INDEX(iterator->next_index) )
+	{
+		iterator->next_entry = ipa_table_get_entry_by_index(
+			table,
+			iterator->next_index);
+
+		if ( iterator->next_entry == NULL )
+		{
+			IPAERR("Failed to retrieve the entry at index 0x%04X for %s\n",
+				   iterator->next_index, table->name);
+			ret = -EPERM;
+			goto bail;
+		}
+	}
+
+	IPADBG("[index/entry] for "
+		   "prev:[0x%04X/%p] "
+		   "curr:[0x%04X/%p] "
+		   "next:[0x%04X/%p] "
+		   "\"%s\"\n",
+		   iterator->prev_index,
+		   iterator->prev_entry,
+		   iterator->curr_index,
+		   iterator->curr_entry,
+		   iterator->next_index,
+		   iterator->next_entry,
+		   table->name);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_table_iterator_next(
+	ipa_table_iterator* iterator,
+	ipa_table*          table)
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	iterator->prev_entry = iterator->curr_entry;
+	iterator->prev_index = iterator->curr_index;
+	iterator->curr_entry = iterator->next_entry;
+	iterator->curr_index = iterator->next_index;
+
+	iterator->next_index = table->entry_interface->entry_get_next_index(
+		iterator->curr_entry);
+
+	if ( ! VALID_INDEX(iterator->next_index) )
+	{
+		iterator->next_entry = NULL;
+	}
+	else
+	{
+		iterator->next_entry = ipa_table_get_entry_by_index(
+			table, iterator->next_index);
+
+		if (iterator->next_entry == NULL)
+		{
+			IPAERR("Failed to retrieve the entry at index %d for %s\n",
+				   iterator->next_index, table->name);
+			ret = -EPERM;
+			goto bail;
+		}
+	}
+
+	IPADBG("Iterator moved to: prev_index=%d curr_index=%d next_index=%d\n",
+		   iterator->prev_index, iterator->curr_index, iterator->next_index);
+
+	IPADBG("                   prev_entry=%pK curr_entry=%pK next_entry=%pK\n",
+		   iterator->prev_entry, iterator->curr_entry, iterator->next_entry);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_table_iterator_end(
+	ipa_table_iterator* iterator,
+	ipa_table*          table_ptr,
+	uint16_t            rec_index,  /* a table slot relative to hash */
+	void*               rec_ptr )   /* occupant record at index above */
+{
+	bool found_end = false;
+
+	int ret;
+
+	IPADBG("In\n");
+
+	if ( ! iterator || ! table_ptr || ! rec_ptr )
+	{
+		IPAERR("Bad arg: iterator(%p) and/or table_ptr (%p) and/or rec_ptr(%p)\n",
+			   iterator, table_ptr, rec_ptr);
+		ret = -1;
+		goto bail;
+	}
+
+	memset(iterator, 0, sizeof(ipa_table_iterator));
+
+	iterator->prev_index = rec_index;
+	iterator->prev_entry = rec_ptr;
+
+	while ( 1 )
+	{
+		uint16_t next_index =
+			table_ptr->entry_interface->entry_get_next_index(iterator->prev_entry);
+
+		if ( ! VALID_INDEX(next_index) )
+		{
+			found_end = true;
+			break;
+		}
+
+		if ( next_index == iterator->prev_index )
+		{
+			IPAERR("next_index(%u) and prev_index(%u) shouldn't be equal in %s\n",
+				   next_index,
+				   iterator->prev_index,
+				   table_ptr->name);
+			break;
+		}
+
+		iterator->prev_index = next_index;
+		iterator->prev_entry = GOTO_REC(table_ptr, next_index);
+	}
+
+	if ( found_end )
+	{
+		IPADBG("Iterator found end of list record\n");
+		ret = 0;
+	}
+	else
+	{
+		IPAERR("Iterator can't find end of list record\n");
+		ret = -1;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_table_iterator_is_head_with_tail(
+	ipa_table_iterator* iterator)
+{
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	ret = VALID_INDEX(iterator->next_index) && ! VALID_INDEX(iterator->prev_index);
+
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int InsertHead(
+	ipa_table*                  table,
+	void*                       rec_ptr,   /* empty record in table */
+	uint16_t                    rec_index, /* index of record above */
+	void*                       user_data,
+	struct ipa_ioc_nat_dma_cmd* cmd )
+{
+	uint16_t enable_data = 0;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	ret = table->entry_interface->entry_head_insert(
+		rec_ptr,
+		user_data,
+		&enable_data);
+
+	if (ret)
+	{
+		IPAERR("unable to insert a new entry to the head in %s\n", table->name);
+		goto bail;
+	}
+
+	ipa_table_add_dma_cmd(
+		table,
+		HELP_UPDATE_HEAD,
+		rec_ptr,
+		rec_index,
+		enable_data,
+		cmd);
+
+	++table->cur_tbl_cnt;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+static int InsertTail(
+	ipa_table*                  table,
+	void*                       rec_ptr,       /* occupied record at index below */
+	uint16_t*                   rec_index_ptr, /* pointer to index of record above */
+	void*                       user_data,
+	struct ipa_ioc_nat_dma_cmd* cmd )
+{
+	bool is_index_tbl = (table->meta) ? true : false;
+
+	ipa_table_iterator iterator;
+
+	uint16_t enable_data = 0;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	/*
+	 * The most important side effect of the following is to set the
+	 * iterator's prev_index and prev_entry...which will be the last
+	 * valid entry on the end of the list.
+	 */
+	ret = ipa_table_iterator_end(&iterator, table, *rec_index_ptr, rec_ptr);
+
+	if ( ret )
+	{
+		IPAERR("Failed to reach the end of list following rec_index(%u) in %s\n",
+			   *rec_index_ptr, table->name);
+		goto bail;
+	}
+
+	/*
+	 * The most important side effect of the following is to set the
+	 * iterator's curr_index and curr_entry with the next available
+	 * expansion table open slot.
+	 */
+	ret = FindExpnTblFreeEntry(table, &iterator.curr_entry, &iterator.curr_index);
+
+	if ( ret )
+	{
+		IPAERR("FindExpnTblFreeEntry of %s failed\n", table->name);
+		goto bail;
+	}
+
+	/*
+	 * Copy data into curr_entry (ie. open slot).
+	 */
+	if ( is_index_tbl )
+	{
+		ret = table->entry_interface->entry_tail_insert(
+			iterator.curr_entry,
+			user_data);
+	}
+	else
+	{
+		/*
+		 * We need enable bit when not index table, hence...
+		 */
+		ret = table->entry_interface->entry_head_insert(
+			iterator.curr_entry,
+			user_data,
+			&enable_data);
+	}
+
+	if (ret)
+	{
+		IPAERR("Unable to insert a new entry to the tail in %s\n", table->name);
+		goto bail;
+	}
+
+	/*
+	 * Update curr_entry's prev_index field with iterator.prev_index
+	 */
+	table->entry_interface->entry_set_prev_index(
+		iterator.curr_entry, /* set by FindExpnTblFreeEntry above */
+		iterator.curr_index, /* set by FindExpnTblFreeEntry above */
+		iterator.prev_index, /* set by ipa_table_iterator_end above */
+		table->meta,
+		table->table_entries);
+
+	if ( ! is_index_tbl )
+	{
+		/*
+		 * Generate dma command to have the IPA update the
+		 * curr_entry's enable field when not the index table...
+		 */
+		ipa_table_add_dma_cmd(
+			table,
+			HELP_UPDATE_HEAD,
+			iterator.curr_entry,
+			iterator.curr_index,
+			enable_data,
+			cmd);
+	}
+
+	/*
+	 * Generate a dma command to have the IPA update the prev_entry's
+	 * next_index with iterator.curr_index.
+	 */
+	ipa_table_add_dma_cmd(
+		table,
+		HELP_UPDATE_ENTRY,
+		iterator.prev_entry,
+		iterator.prev_index,
+		iterator.curr_index,
+		cmd);
+
+	++table->cur_expn_tbl_cnt;
+
+	*rec_index_ptr = iterator.curr_index;
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/**
+ * MakeEntryHdl() - makes an entry handle
+ * @tbl_hdl: [in] tbl - the table
+ * @tbl_entry: [in] tbl_entry - table entry
+ *
+ * Calculate the entry handle which will be returned to client
+ *
+ * Returns: >0 table entry handle
+ */
+static uint16_t MakeEntryHdl(
+	ipa_table* tbl,
+	uint16_t   tbl_entry )
+{
+	uint16_t entry_hdl = 0;
+
+	IPADBG("In\n");
+
+	if (tbl_entry >= tbl->table_entries)
+	{
+		/*
+		 * Update the index into table
+		 */
+		entry_hdl = tbl_entry - tbl->table_entries;
+		entry_hdl = (entry_hdl << IPA_TABLE_TYPE_BITS);
+		/*
+		 * Update the expansion table type bit
+		 */
+		entry_hdl = (entry_hdl | IPA_TABLE_TYPE_MASK);
+	}
+	else
+	{
+		entry_hdl = tbl_entry;
+		entry_hdl = (entry_hdl << IPA_TABLE_TYPE_BITS);
+	}
+
+	/*
+	 * Set memory type bit.
+	 */
+	entry_hdl = entry_hdl | (tbl->nmi << IPA_TABLE_TYPE_MEM_SHIFT);
+
+	IPADBG("In: tbl_entry(%u) Out: entry_hdl(%u)\n", tbl_entry, entry_hdl);
+
+	IPADBG("Out\n");
+
+	return entry_hdl;
+}
+
+static int mt_slot(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	UNUSED(table_ptr);
+	UNUSED(rule_hdl);
+	UNUSED(record_ptr);
+	UNUSED(meta_record_ptr);
+	UNUSED(meta_record_index);
+	UNUSED(arb_data_ptr);
+
+	IPADBG("%s: Empty expansion slot: (%u) in table of size: (%u)\n",
+		   table_ptr->name,
+		   record_index,
+		   table_ptr->tot_tbl_ents);
+
+	return record_index;
+}
+
+/*
+ * returns expn table entry absolute index
+ */
+static int FindExpnTblFreeEntry(
+	ipa_table* table,
+	void**     free_entry,
+	uint16_t*  entry_index )
+{
+	int ret;
+
+	IPADBG("In\n");
+
+	if ( ! table || ! free_entry || ! entry_index )
+	{
+		IPAERR("Bad arg: table(%p) and/or "
+			   "free_entry(%p) and/or entry_index(%p)\n",
+			   table, free_entry, entry_index);
+		ret = -1;
+		goto bail;
+	}
+
+	*entry_index = 0;
+	*free_entry  = NULL;
+
+	/*
+	 * The following will start walk at expansion slots
+	 * (ie. just after table->table_entries)...
+	 */
+	ret = ipa_table_walk(table, table->table_entries, WHEN_SLOT_EMPTY, mt_slot, 0);
+
+	if ( ret > 0 )
+	{
+		*entry_index = (uint16_t) ret;
+
+		*free_entry = GOTO_REC(table, *entry_index);
+
+		IPADBG("%s: entry_index val (%u) free_entry val (%p)\n",
+			   table->name,
+			   *entry_index,
+			   *free_entry);
+
+		ret = 0;
+	}
+	else
+	{
+		if ( ret < 0 )
+		{
+			IPAERR("%s: While searching table for emtpy slot\n",
+				   table->name);
+		}
+		else
+		{
+			IPADBG("%s: No empty slots (ie. expansion table full): "
+				   "BASE (avail/used): (%u/%u) EXPN (avail/used): (%u/%u)\n",
+				   table->name,
+				   table->table_entries,
+				   table->cur_tbl_cnt,
+				   table->expn_table_entries,
+				   table->cur_expn_tbl_cnt);
+		}
+
+		ret = -1;
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+/**
+ * Get2PowerTightUpperBound() - Returns the tight upper bound which is a power of 2
+ * @num: [in] given number
+ *
+ * Returns the tight upper bound for a given number which is power of 2
+ *
+ * Returns: the tight upper bound which is power of 2
+ */
+static int Get2PowerTightUpperBound(uint16_t num)
+{
+	uint16_t tmp = num, prev = 0, curr = 2;
+
+	if (num == 0)
+		return 2;
+
+	while (tmp != 1)
+	{
+		prev = curr;
+		curr <<= 1;
+		tmp >>= 1;
+	}
+
+	return (num == prev) ? prev : curr;
+}
+
+/**
+ * GetEvenTightUpperBound() - Returns the tight upper bound which is an even number
+ * @num: [in] given number
+ *
+ * Returns the tight upper bound for a given number which is an even number
+ *
+ * Returns: the tight upper bound which is an even number
+ */
+static int GetEvenTightUpperBound(uint16_t num)
+{
+	if (num == 0)
+		return 2;
+
+	return (num % 2) ? num + 1 : num;
+}
+
+int ipa_calc_num_sram_table_entries(
+	uint32_t  sram_size,
+	uint32_t  table1_ent_size,
+	uint32_t  table2_ent_size,
+	uint16_t* num_entries_ptr)
+{
+	ipa_table nat_table;
+	ipa_table index_table;
+	int       size = 0;
+	uint16_t  tot;
+
+	IPADBG("In\n");
+
+	IPADBG("sram_size(%x or %u)\n", sram_size, sram_size);
+
+	*num_entries_ptr = 0;
+
+	tot = 1;
+
+	while ( 1 )
+	{
+		IPADBG("Trying %u entries\n", tot);
+
+		ipa_table_init(&nat_table,
+					   "tmp_sram_table1",
+					   IPA_NAT_MEM_IN_DDR,
+					   table1_ent_size,
+					   NULL,
+					   0,
+					   NULL);
+
+		ipa_table_init(&index_table,
+					   "tmp_sram_table1",
+					   IPA_NAT_MEM_IN_DDR,
+					   table2_ent_size,
+					   NULL,
+					   0,
+					   NULL);
+
+		nat_table.table_entries = index_table.table_entries =
+			Get2PowerTightUpperBound(tot * IPA_BASE_TABLE_PCNT_4SRAM);
+		nat_table.expn_table_entries = index_table.expn_table_entries =
+			GetEvenTightUpperBound(tot * IPA_EXPANSION_TABLE_PCNT_4SRAM);
+
+		size  = ipa_table_calculate_size(&nat_table);
+		size += ipa_table_calculate_size(&index_table);
+
+		IPADBG("%u entries consumes size(0x%x or %u)\n", tot, size, size);
+
+		if ( size > sram_size )
+			break;
+
+		*num_entries_ptr = tot;
+
+		++tot;
+	}
+
+	IPADBG("Optimal number of entries: %u\n", *num_entries_ptr);
+
+	IPADBG("Out\n");
+
+	return (*num_entries_ptr) ? 0 : -1;
+}
+
+int ipa_table_walk(
+	ipa_table*        ipa_tbl_ptr,
+	uint16_t          start_index,
+	When2Callback     when2cb,
+	ipa_table_walk_cb walk_cb,
+	void*             arb_data_ptr )
+{
+	uint16_t i;
+	uint32_t tot;
+	uint8_t* rec_ptr;
+	void*    meta_record_ptr;
+	uint16_t meta_record_index;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! ipa_tbl_ptr ||
+		 ! VALID_WHEN2CALLBACK(when2cb) ||
+		 ! walk_cb )
+	{
+		IPAERR("Bad arg: ipa_tbl_ptr(%p) and/or "
+			   "when2cb(%u) and/or walk_cb(%p)\n",
+			   ipa_tbl_ptr,
+			   when2cb,
+			   walk_cb);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	tot =
+		ipa_tbl_ptr->table_entries +
+		ipa_tbl_ptr->expn_table_entries;
+
+	if ( start_index >= tot )
+	{
+		IPAERR("Bad arg: start_index(%u)\n", start_index);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * Go through table...
+	 */
+	for ( i = start_index, rec_ptr = GOTO_REC(ipa_tbl_ptr, start_index);
+		  i < tot;
+		  i++,             rec_ptr += ipa_tbl_ptr->entry_size )
+	{
+		bool call_back;
+
+		if ( ipa_tbl_ptr->entry_interface->entry_is_valid(rec_ptr) )
+		{
+			call_back = (when2cb == WHEN_SLOT_FILLED) ? true : false;
+		}
+		else
+		{
+			call_back = (when2cb == WHEN_SLOT_EMPTY)  ? true : false;
+		}
+
+		if ( call_back )
+		{
+			uint32_t rule_hdl = MakeEntryHdl(ipa_tbl_ptr, i);
+
+			meta_record_ptr   = NULL;
+			meta_record_index = 0;
+
+			if ( i >= ipa_tbl_ptr->table_entries && ipa_tbl_ptr->meta )
+			{
+				meta_record_index = i - ipa_tbl_ptr->table_entries;
+
+				meta_record_ptr = (uint8_t*) ipa_tbl_ptr->meta +
+					(meta_record_index * ipa_tbl_ptr->meta_entry_size);
+			}
+
+			ret = walk_cb(
+				ipa_tbl_ptr,
+				rule_hdl,
+				rec_ptr,
+				i,
+				meta_record_ptr,
+				meta_record_index,
+				arb_data_ptr);
+
+			if ( ret != 0 )
+			{
+				if ( ret < 0 )
+				{
+					IPAERR("walk_cb returned non-zero (%d)\n", ret);
+				}
+				else
+				{
+					IPADBG("walk_cb returned non-zero (%d)\n", ret);
+				}
+				goto bail;
+			}
+		}
+	}
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
+
+int ipa_table_add_dma_cmd(
+	ipa_table*                  tbl_ptr,
+	dma_help_type               help_type,
+	void*                       rec_ptr,
+	uint16_t                    rec_index,
+	uint16_t                    data_for_entry,
+	struct ipa_ioc_nat_dma_cmd* cmd_ptr )
+{
+	uint32_t tab_sz, entry_offset;
+
+	uint8_t is_expn;
+
+	int ret = 0;
+
+	IPADBG("In\n");
+
+	if ( ! tbl_ptr ||
+		 ! VALID_DMA_HELP_TYPE(help_type) ||
+		 ! rec_ptr ||
+		 ! cmd_ptr )
+	{
+		IPAERR("Bad arg: tbl_ptr(%p) and/or help_type(%u) "
+			   "and/or rec_ptr(%p) and/or cmd_ptr(%p)\n",
+			   tbl_ptr,
+			   help_type,
+			   rec_ptr,
+			   cmd_ptr);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	tab_sz =
+		tbl_ptr->table_entries +
+		tbl_ptr->expn_table_entries;
+
+	if ( rec_index >= tab_sz )
+	{
+		IPAERR("Bad arg: rec_index(%u)\n", rec_index);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	is_expn = (rec_index >= tbl_ptr->table_entries);
+
+	entry_offset = (uint8_t*) rec_ptr -
+		((is_expn) ? tbl_ptr->expn_table_addr : tbl_ptr->table_addr);
+
+	ipa_table_dma_cmd_generate(
+		tbl_ptr->dma_help[help_type],
+		is_expn,
+		entry_offset,
+		data_for_entry,
+		cmd_ptr);
+
+bail:
+	IPADBG("Out\n");
+
+	return ret;
+}
diff --git a/ipanat/test/Android.mk b/ipanat/test/Android.mk
index b8ae6a4..b026794 100644
--- a/ipanat/test/Android.mk
+++ b/ipanat/test/Android.mk
@@ -14,7 +14,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
 
 LOCAL_MODULE := ipa_nat_test
-LOCAL_SRC_FILES := ipa_nat_test000.c \
+LOCAL_SRC_FILES := \
+		ipa_nat_testREG.c \
+		ipa_nat_test000.c \
 		ipa_nat_test001.c \
 		ipa_nat_test002.c \
 		ipa_nat_test003.c \
@@ -37,9 +39,12 @@
 		ipa_nat_test020.c \
 		ipa_nat_test021.c \
 		ipa_nat_test022.c \
+		ipa_nat_test023.c \
+		ipa_nat_test024.c \
+		ipa_nat_test025.c \
+		ipa_nat_test999.c \
 		main.c
 
-
 LOCAL_SHARED_LIBRARIES := libipanat
 
 LOCAL_MODULE_TAGS := debug
diff --git a/ipanat/test/Makefile.am b/ipanat/test/Makefile.am
index 3aec070..cefa7ed 100644
--- a/ipanat/test/Makefile.am
+++ b/ipanat/test/Makefile.am
@@ -2,9 +2,11 @@
 	      -I$(top_srcdir)/ipanat/inc
 
 AM_CPPFLAGS += -Wall -Wundef -Wno-trigraphs
-AM_CPPFLAGS += -g
+AM_CPPFLAGS += -g -DDEBUG -DNAT_DEBUG
 
-ipanattest_SOURCES = ipa_nat_test000.c \
+ipanattest_SOURCES = \
+		ipa_nat_testREG.c \
+		ipa_nat_test000.c \
 		ipa_nat_test001.c \
 		ipa_nat_test002.c \
 		ipa_nat_test003.c \
@@ -27,9 +29,12 @@
 		ipa_nat_test020.c \
 		ipa_nat_test021.c \
 		ipa_nat_test022.c \
+		ipa_nat_test023.c \
+		ipa_nat_test024.c \
+		ipa_nat_test025.c \
+		ipa_nat_test999.c \
 		main.c
 
-
 bin_PROGRAMS  =  ipanattest
 
 requiredlibs =  ../src/libipanat.la
@@ -39,4 +44,3 @@
 LOCAL_MODULE := libipanat
 LOCAL_PRELINK_MODULE := false
 include $(BUILD_SHARED_LIBRARY)
-
diff --git a/ipanat/test/README.txt b/ipanat/test/README.txt
index 4e87121..9a84b10 100644
--- a/ipanat/test/README.txt
+++ b/ipanat/test/README.txt
@@ -1,18 +1,66 @@
-1 To run this suite separately(each test case creates table and delete table) use below command
-   - To execute test suite nt times with n entries, command "ipanatest sep nt n"
+INTRODUCTION
+------------
 
-  Example:  To execute test suite 1 time with 100 entries, command "ipanattest sep 100"
+The ipanattest allow its user to drive NAT testing.  It is run thusly:
 
+# ipanattest [-d -r N -i N -e N -m mt]
+Where:
+  -d     Each test is discrete (create table, add rules, destroy table)
+         If not specified, only one table create and destroy for all tests
+  -r N   Where N is the number of times to run the inotify regression test
+  -i N   Where N is the number of times (iterations) to run test
+  -e N   Where N is the number of entries in the NAT
+  -m mt  Where mt is the type of memory to use for the NAT
+         Legal mt's: DDR, SRAM, or HYBRID (ie. use SRAM and DDR)
+  -g M-N Run tests M through N only
 
-2. To run test suite not separately(creates table and delete table only once) use below command
-   - To execute test suite nt times with n entries, command "ipanatest reg nt n"
+More about each command line option:
 
-   Example: To execute test suite 5 times with 32 entries, command "ipanattest reg 5 32"
+-d    Makes each test discrete; meaning that, each test will create a
+      table, add rules, then destory the table.
 
+      Conversely, when -d not specified, each test will not create
+      and destroy a table.  Only one table create and destroy at the
+      start and end of the run...with all test being run in between.
 
-3. To run inotify regression test use command, "ipanattest inotify nt"
+-r N  Will cause the inotify regression test to be run N times.
 
-   Example: To execute inotify 5 times, command "ipanattest inotify 5"
+-i N  Will cause each test to be run N times
 
+-e N  Will cause the creation of a table with N entries
 
-4. if we just give command "ipanattest", runs test suite 1 time with 100 entries (non separate)
+-m mt Will cause the NAT to live in either SRAM, DDR, or both
+      (ie. HYBRID)
+
+-g M-N Will cause test M to N to be run. This allows you to skip
+       or isolate tests
+
+When run with no arguments (ie. defaults):
+
+  1) The tests will be non-discrete
+  2) With only one iteration of the tests
+  3) On a DDR based table with one hundred entries
+  4) No inotify regression will be run
+
+EXAMPLE COMMAND LINES
+---------------------
+
+To execute discrete tests (create, add rules, and delete table for
+each test) one time on a table with one hundred entries:
+
+# ipanattest -d -i 1 -e 100
+
+To execute non-discrete (create and delete table only once) tests five
+times on a table with thirty-two entries:
+
+# ipanattest -i 5 -e 32
+
+To execute inotify regression test 5 times
+
+# ipanattest -r 5
+
+ADDING NEW TESTS
+----------------
+
+In main.c, please see and embellish nt_array[] and use the following
+file as a model: ipa_nat_testMODEL.c
diff --git a/ipanat/test/ipa_nat_test.h b/ipanat/test/ipa_nat_test.h
index d5ac0d5..150e4b6 100644
--- a/ipanat/test/ipa_nat_test.h
+++ b/ipanat/test/ipa_nat_test.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -27,78 +27,154 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-/*===========================================================================
-
-                     INCLUDE FILES FOR MODULE
-
-===========================================================================*/
-#include "stdint.h"  /* uint32_t */
-#include "stdio.h"
+/*
+ * ===========================================================================
+ *
+ * INCLUDE FILES FOR MODULE
+ *
+ * ===========================================================================
+ */
+#include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <stdlib.h>
+#include <time.h>
 #include <netinet/in.h> /* for proto definitions */
 
+#include "ipa_nat_drv.h"
+#include "ipa_nat_drvi.h"
+
+#undef array_sz
+#define array_sz(a) \
+	( sizeof(a)/sizeof(a[0]) )
+
 #define u32 uint32_t
 #define u16 uint16_t
 #define u8  uint8_t
 
+#define RAN_ADDR rand_ip_addr()
+#define RAN_PORT rand_ip_port()
+
+static inline u32 rand_ip_addr()
+{
+	static char buf[64];
+
+	snprintf(
+		buf, sizeof(buf),
+		"%u.%u.%u.%u",
+		(rand() % 254) + 1,
+		 rand() % 255,
+		 rand() % 255,
+		(rand() % 254) + 1);
+
+	return (u32) inet_addr(buf);
+}
+
+static inline u16 rand_ip_port()
+{
+	return (u16) ((rand() % 60535) + 5000);
+}
+
 /*============ Preconditions to run NAT Test cases =========*/
 #define IPA_NAT_TEST_PRE_COND_TE  20
 
-#define CHECK_ERR1(x, tbl_hdl) \
-  if(ipa_nat_validate_ipv4_table(tbl_hdl)) { \
-    if(sep) {\
-       ipa_nat_del_ipv4_tbl(tbl_hdl); \
-     }\
-    return -1;\
-  }\
-  if(x) { \
-    IPAERR("%d\n", ret); \
-    if(sep) {\
-      ipa_nat_del_ipv4_tbl(tbl_hdl); \
-     }\
-     return -1; \
-  }
+#define CHECK_ERR(x)							\
+	if ( x ) {									\
+		IPAERR("Abrupt end of %s with "			\
+			   "err: %d at line: %d\n",			\
+			   __FUNCTION__, x, __LINE__);		\
+		return -1;								\
+	}
 
-#define CHECK_ERR(x) if(x) { \
-    IPAERR("%d\n", ret); \
-    return -1;\
- }
+#define CHECK_ERR_TBL_STOP(x, th)									 \
+	if ( th ) {														 \
+		int _ter_ = ipa_nat_validate_ipv4_table(th);				 \
+		if ( _ter_ ) {												 \
+			if ( sep ) {											 \
+				ipa_nat_del_ipv4_tbl(th);							 \
+			}														 \
+			IPAERR("Abrupt end of %s with "							 \
+				   "err: %d at line: %d\n",							 \
+				   __FUNCTION__, _ter_, __LINE__);					 \
+			return -1;												 \
+		}															 \
+	}																 \
+	if ( x ) {														 \
+		if ( th ) {													 \
+			ipa_nat_dump_ipv4_table(th);							 \
+			if( sep ) {												 \
+				ipa_nat_del_ipv4_tbl(th);							 \
+			}														 \
+		}															 \
+		IPAERR("Abrupt end of %s with "								 \
+			   "err: %d at line: %d\n",								 \
+			   __FUNCTION__, x, __LINE__);							 \
+		return -1;													 \
+	}
 
-#if 0
-#define CHECK_ERR(x) if(x) { \
-    IPAERR("%d\n", ret); \
-    if(sep) {\
-      ipa_nat_del_ipv4_tbl(tbl_hdl); \
-    }\
-    return -1;\
- }
-#endif
+#define CHECK_ERR_TBL_ACTION(x, th, action)							 \
+	if ( th ) {														 \
+		int _ter_ = ipa_nat_validate_ipv4_table(th);				 \
+		if ( _ter_ ) {												 \
+			IPAERR("ipa_nat_validate_ipv4_table() failed "			 \
+				   "in: %s at line: %d\n",							 \
+				   __FUNCTION__, __LINE__);							 \
+			action;													 \
+		}															 \
+	}																 \
+	if ( x ) {														 \
+		if ( th ) {													 \
+			ipa_nat_dump_ipv4_table(th);							 \
+		}															 \
+		IPAERR("error: %d in %s at line: %d\n",						 \
+			   x, __FUNCTION__, __LINE__);							 \
+		action;														 \
+	}
 
-#define IPADBG(fmt, args...) printf(" %s:%d " fmt, __FUNCTION__, __LINE__, ## args)
-#define IPAERR(fmt, args...) printf(" %s:%d " fmt, __FUNCTION__, __LINE__, ## args)
+typedef int (*NatTestFunc)(
+	const char*, u32, int, u32, int, void*);
 
-#define NAT_DUMP
+typedef struct
+{
+	const char* func_name;
+	int         num_ents_trigger;
+	int         test_hold_time_in_secs;
+	NatTestFunc func;
+} NatTests;
+
+#undef NAT_TEST_ENTRY
+#define NAT_TEST_ENTRY(f, n, ht) \
+	{#f, (n), (ht), f}
+
+#define NAT_DEBUG
 int ipa_nat_validate_ipv4_table(u32);
 
-int ipa_nat_test000(int, u32, u8);
-int ipa_nat_test001(int, u32, u8);
-int ipa_nat_test002(int, u32, u8);
-int ipa_nat_test003(int, u32, u8);
-int ipa_nat_test004(int, u32, u8);
-int ipa_nat_test005(int, u32, u8);
-int ipa_nat_test006(int, u32, u8);
-int ipa_nat_test007(int, u32, u8);
-int ipa_nat_test008(int, u32, u8);
-int ipa_nat_test009(int, u32, u8);
-int ipa_nat_test010(int, u32, u8);
-int ipa_nat_test011(int, u32, u8);
-int ipa_nat_test012(int, u32, u8);
-int ipa_nat_test013(int, u32, u8);
-int ipa_nat_test014(int, u32, u8);
-int ipa_nat_test015(int, u32, u8);
-int ipa_nat_test016(int, u32, u8);
-int ipa_nat_test017(int, u32, u8);
-int ipa_nat_test018(int, u32, u8);
-int ipa_nat_test019(int, u32, u8);
-int ipa_nat_test020(int, u32, u8);
-int ipa_nat_test021(int, int);
-int ipa_nat_test022(int, u32, u8);
+int ipa_nat_testREG(const char*, u32, int, u32, int, void*);
+
+int ipa_nat_test000(const char*, u32, int, u32, int, void*);
+int ipa_nat_test001(const char*, u32, int, u32, int, void*);
+int ipa_nat_test002(const char*, u32, int, u32, int, void*);
+int ipa_nat_test003(const char*, u32, int, u32, int, void*);
+int ipa_nat_test004(const char*, u32, int, u32, int, void*);
+int ipa_nat_test005(const char*, u32, int, u32, int, void*);
+int ipa_nat_test006(const char*, u32, int, u32, int, void*);
+int ipa_nat_test007(const char*, u32, int, u32, int, void*);
+int ipa_nat_test008(const char*, u32, int, u32, int, void*);
+int ipa_nat_test009(const char*, u32, int, u32, int, void*);
+int ipa_nat_test010(const char*, u32, int, u32, int, void*);
+int ipa_nat_test011(const char*, u32, int, u32, int, void*);
+int ipa_nat_test012(const char*, u32, int, u32, int, void*);
+int ipa_nat_test013(const char*, u32, int, u32, int, void*);
+int ipa_nat_test014(const char*, u32, int, u32, int, void*);
+int ipa_nat_test015(const char*, u32, int, u32, int, void*);
+int ipa_nat_test016(const char*, u32, int, u32, int, void*);
+int ipa_nat_test017(const char*, u32, int, u32, int, void*);
+int ipa_nat_test018(const char*, u32, int, u32, int, void*);
+int ipa_nat_test019(const char*, u32, int, u32, int, void*);
+int ipa_nat_test020(const char*, u32, int, u32, int, void*);
+int ipa_nat_test021(const char*, u32, int, u32, int, void*);
+int ipa_nat_test022(const char*, u32, int, u32, int, void*);
+int ipa_nat_test023(const char*, u32, int, u32, int, void*);
+int ipa_nat_test024(const char*, u32, int, u32, int, void*);
+int ipa_nat_test025(const char*, u32, int, u32, int, void*);
+int ipa_nat_test999(const char*, u32, int, u32, int, void*);
diff --git a/ipanat/test/ipa_nat_test000.c b/ipanat/test/ipa_nat_test000.c
index 09914ea..764a048 100644
--- a/ipanat/test/ipa_nat_test000.c
+++ b/ipanat/test/ipa_nat_test000.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -35,35 +35,36 @@
 	@brief
 	Verify the following scenario:
 	1. Add ipv4 table
-	2. Delete ipv4 table
 */
 /*===========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test000(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test000(
+	const char* nat_mem_type,
+	u32  pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 
-	int ret;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	int  ret;
 
-	ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-	if (0 != ret)
+	IPADBG("In\n");
+
+	if ( ! sep )
 	{
-		IPAERR("unable to create ipv4 nat table and returning Error:%d\n", ret);
-		return -1;
-	}
-	IPADBG("create nat ipv4 table successfully() \n");
+		IPADBG("calling ipa_nat_add_ipv4_tbl()\n");
 
-	IPADBG("calling ipa_nat_del_ipv4_tbl() \n");
-	ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
-	if (0 != ret)
-	{
-		IPAERR("Unable to delete ipv4 nat table %d\n", ret);
-		return -1;
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, tbl_hdl_ptr);
+		CHECK_ERR_TBL_STOP(ret, *tbl_hdl_ptr);
+
+		IPADBG("create nat ipv4 table successfully()\n");
 	}
-	IPADBG("deleted ipv4 nat table successfully. Test passed \n");
+
+	IPADBG("Out\n");
 
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test001.c b/ipanat/test/ipa_nat_test001.c
index 8daef33..08942d2 100644
--- a/ipanat/test/ipa_nat_test001.c
+++ b/ipanat/test/ipa_nat_test001.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -41,37 +41,48 @@
 /*===========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test001(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test001(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
 
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s()\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
 
-		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-		CHECK_ERR(ret);
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
+	if ( sep )
+	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test002.c b/ipanat/test/ipa_nat_test002.c
index e6f5ae3..83992ec 100644
--- a/ipanat/test/ipa_nat_test002.c
+++ b/ipanat/test/ipa_nat_test002.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,44 +42,51 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test002(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test002(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s()\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test003.c b/ipanat/test/ipa_nat_test003.c
index 0634265..6082620 100644
--- a/ipanat/test/ipa_nat_test003.c
+++ b/ipanat/test/ipa_nat_test003.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,40 +43,62 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test003(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test003(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	ipa_nat_ipv4_rule ipv4_rule = {0};
+	u32 rule_hdl;
+
 	int ret;
-	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
+	IPADBG("In\n");
 
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	if ( sep )
+	{
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
+
+	ipv4_rule.protocol = IPPROTO_TCP;
+	ipv4_rule.public_port = RAN_PORT;
+
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
-
-		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-		CHECK_ERR(ret);
-
-		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-		CHECK_ERR(ret);
-
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test004.c b/ipanat/test/ipa_nat_test004.c
index 02378ff..32936f1 100644
--- a/ipanat/test/ipa_nat_test004.c
+++ b/ipanat/test/ipa_nat_test004.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -35,36 +35,49 @@
 	@brief
 	Verify the following scenario:
 	1. Add ipv4 table
-	2. Query nat table handle
+	2. Delete a bogus table handle
 	3. Delete ipv4 table
 */
 /*===========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test004(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test004(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret = 0;
-	u32 tbl_hdl1 = 0;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	u32 tbl_hdl1 = 0xFFFFFFFF;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
-
-		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
-		CHECK_ERR(ret);
-
-		ret = ipa_nat_del_ipv4_tbl(tbl_hdl1);
-		if(ret == 0)
-		{
-			IPAERR("able to delete table using invalid table handle\n");
-			return -1;
-		}
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
+
+	ret = ipa_nat_del_ipv4_tbl(tbl_hdl1); /* intentionally pass bad handle */
+
+	if ( ret == 0 )
+	{
+		IPAERR("Able to delete table using invalid table handle\n");
+		CHECK_ERR_TBL_STOP(-1, tbl_hdl);
+	}
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test005.c b/ipanat/test/ipa_nat_test005.c
index 12228d1..8ee78fc 100644
--- a/ipanat/test/ipa_nat_test005.c
+++ b/ipanat/test/ipa_nat_test005.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,41 +43,55 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test005(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test005(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
-	int ret = 0;
-	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
+	u32 rule_hdl;
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
+	int ret;
 
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	if (sep)
+	IPADBG("In\n");
+
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
 
-		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-		CHECK_ERR(ret);
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-		ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-		CHECK_ERR(ret);
+	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-		CHECK_ERR(ret);
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
+	if ( sep )
+	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test006.c b/ipanat/test/ipa_nat_test006.c
index 36f0171..7de4f91 100644
--- a/ipanat/test/ipa_nat_test006.c
+++ b/ipanat/test/ipa_nat_test006.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -35,57 +35,67 @@
 	@brief
 	Verify the following scenario:
 	1. Add ipv4 table
-	2. add same ipv rules
-	3. delete first followed by second
-	4. Delete ipv4 table
+	2. Add ipv rule
+	3. Add same ipv rule
+	4. Delete first followed by second
+	5. Delete ipv4 table
 */
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test006(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test006(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
-	int ret=0;
-	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule;
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
+	u32 rule_hdl;
+	u32 rule_hdl1;
 
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	int ret;
 
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test007.c b/ipanat/test/ipa_nat_test007.c
index 4160c02..4d26592 100644
--- a/ipanat/test/ipa_nat_test007.c
+++ b/ipanat/test/ipa_nat_test007.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,47 +42,59 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test007(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test007(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	ipa_nat_ipv4_rule ipv4_rule = {0};
+
+	u32 rule_hdl;
+	u32 rule_hdl1;
+
 	int ret;
-	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule;
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
+
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test008.c b/ipanat/test/ipa_nat_test008.c
index d016055..350e6f9 100644
--- a/ipanat/test/ipa_nat_test008.c
+++ b/ipanat/test/ipa_nat_test008.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,53 +42,62 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test008(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test008(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1;
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
-	ipv4_rule1.target_ip = 0xC1171602; /* 193.23.22.2 */
-	ipv4_rule1.target_port = 1234;
-	ipv4_rule1.private_ip = 0xC2171602; /* 194.23.22.2 */
-	ipv4_rule1.private_port = 5678;
+	ipv4_rule.public_port = RAN_PORT;
+
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 9050;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
+
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test009.c b/ipanat/test/ipa_nat_test009.c
index cf3c40f..7cf74a4 100644
--- a/ipanat/test/ipa_nat_test009.c
+++ b/ipanat/test/ipa_nat_test009.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,55 +42,62 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test009(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test009(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1;
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	ipv4_rule1.target_ip = 0xC1171602; /* 193.23.22.2 */
-	ipv4_rule1.target_port = 1234;
-	ipv4_rule1.private_ip = 0xC2171602; /* 194.23.22.2 */
-	ipv4_rule1.private_port = 5678;
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 9050;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test010.c b/ipanat/test/ipa_nat_test010.c
index 42d7fee..7d9cd88 100644
--- a/ipanat/test/ipa_nat_test010.c
+++ b/ipanat/test/ipa_nat_test010.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,67 +42,75 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test010(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test010(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1, rule_hdl2;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1, ipv4_rule2;
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	ipv4_rule1.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule1.target_port = 1235;
-	ipv4_rule1.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule1.private_port = 5679;
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 9051;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	ipv4_rule2.target_ip = 0xC1171602; /* 193.23.22.2 */
-	ipv4_rule2.target_port = 1235;
-	ipv4_rule2.private_ip = 0xC2171602; /* 194.23.22.2 */
-	ipv4_rule2.private_port = 5679;
+	ipv4_rule2.target_ip = RAN_ADDR;
+	ipv4_rule2.target_port = RAN_PORT;
+	ipv4_rule2.private_ip = RAN_ADDR;
+	ipv4_rule2.private_port = RAN_PORT;
 	ipv4_rule2.protocol = IPPROTO_TCP;
-	ipv4_rule2.public_port = 9051;
+	ipv4_rule2.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
-	if(sep)
+	IPADBG("In\n");
+
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test011.c b/ipanat/test/ipa_nat_test011.c
index bcce76c..525a37f 100644
--- a/ipanat/test/ipa_nat_test011.c
+++ b/ipanat/test/ipa_nat_test011.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,67 +42,75 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test011(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test011(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1, rule_hdl2;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1, ipv4_rule2;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0};
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	ipv4_rule1.target_ip = 0xF1181601;
-	ipv4_rule1.target_port = 1555;
-	ipv4_rule1.private_ip = 0xF2151601;
-	ipv4_rule1.private_port = 5999;
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 9111;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	ipv4_rule2.target_ip = 0xC1166602;
-	ipv4_rule2.target_port = 1555;
-	ipv4_rule2.private_ip = 0xC2155602;
-	ipv4_rule2.private_port = 5777;
+	ipv4_rule2.target_ip = RAN_ADDR;
+	ipv4_rule2.target_port = RAN_PORT;
+	ipv4_rule2.private_ip = RAN_ADDR;
+	ipv4_rule2.private_port = RAN_PORT;
 	ipv4_rule2.protocol = IPPROTO_TCP;
-	ipv4_rule2.public_port = 9000;
+	ipv4_rule2.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test012.c b/ipanat/test/ipa_nat_test012.c
index 9d3c835..81c7d72 100644
--- a/ipanat/test/ipa_nat_test012.c
+++ b/ipanat/test/ipa_nat_test012.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,68 +42,75 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-
-int ipa_nat_test012(int totoal_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test012(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1, rule_hdl2;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1, ipv4_rule2;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0};
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	ipv4_rule1.target_ip = 0xD1171601;
-	ipv4_rule1.target_port = 3512;
-	ipv4_rule1.private_ip = 0xD2471601;
-	ipv4_rule1.private_port = 9997;
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 8881;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	ipv4_rule2.target_ip = 0xC1172452;
-	ipv4_rule2.target_port = 1895;
-	ipv4_rule2.private_ip = 0xC2172452;
-	ipv4_rule2.private_port = 6668;
+	ipv4_rule2.target_ip = RAN_ADDR;
+	ipv4_rule2.target_port = RAN_PORT;
+	ipv4_rule2.private_ip = RAN_ADDR;
+	ipv4_rule2.private_port = RAN_PORT;
 	ipv4_rule2.protocol = IPPROTO_TCP;
-	ipv4_rule2.public_port = 5551;
+	ipv4_rule2.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, totoal_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test013.c b/ipanat/test/ipa_nat_test013.c
index 2b9b005..d962065 100644
--- a/ipanat/test/ipa_nat_test013.c
+++ b/ipanat/test/ipa_nat_test013.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,67 +42,75 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test013(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test013(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl, rule_hdl1, rule_hdl2;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule1, ipv4_rule2;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule1 = {0}, ipv4_rule2 = {0};
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	ipv4_rule1.target_ip = 0xC1171609; /* 193.23.22.9 */
-	ipv4_rule1.target_port = 1235;
-	ipv4_rule1.private_ip = 0xC2171609; /* 194.23.22.9 */
-	ipv4_rule1.private_port = 6579;
+	ipv4_rule1.target_ip = RAN_ADDR;
+	ipv4_rule1.target_port = RAN_PORT;
+	ipv4_rule1.private_ip = RAN_ADDR;
+	ipv4_rule1.private_port = RAN_PORT;
 	ipv4_rule1.protocol = IPPROTO_TCP;
-	ipv4_rule1.public_port = 8951;
+	ipv4_rule1.public_port = RAN_PORT;
 
-	ipv4_rule2.target_ip = 0xC1171606; /* 193.23.22.6 */
-	ipv4_rule2.target_port = 1235;
-	ipv4_rule2.private_ip = 0xC2171606; /* 194.23.22.6 */
-	ipv4_rule2.private_port = 7956;
+	ipv4_rule2.target_ip = RAN_ADDR;
+	ipv4_rule2.target_port = RAN_PORT;
+	ipv4_rule2.private_ip = RAN_ADDR;
+	ipv4_rule2.private_port = RAN_PORT;
 	ipv4_rule2.protocol = IPPROTO_TCP;
-	ipv4_rule2.public_port = 5109;
+	ipv4_rule2.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule1, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test014.c b/ipanat/test/ipa_nat_test014.c
index fd30317..3daa2d9 100644
--- a/ipanat/test/ipa_nat_test014.c
+++ b/ipanat/test/ipa_nat_test014.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,54 +42,61 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test014(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test014(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s()\n", __FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test015.c b/ipanat/test/ipa_nat_test015.c
index eaef923..1dc8f97 100644
--- a/ipanat/test/ipa_nat_test015.c
+++ b/ipanat/test/ipa_nat_test015.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,55 +43,62 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-
-int ipa_nat_test015(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test015(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
-	if(sep)
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test016.c b/ipanat/test/ipa_nat_test016.c
index 23157e2..f1ee4e6 100644
--- a/ipanat/test/ipa_nat_test016.c
+++ b/ipanat/test/ipa_nat_test016.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,54 +43,61 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test016(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test016(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test017.c b/ipanat/test/ipa_nat_test017.c
index d88e611..23369e4 100644
--- a/ipanat/test/ipa_nat_test017.c
+++ b/ipanat/test/ipa_nat_test017.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,54 +43,61 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test017(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test017(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test018.c b/ipanat/test/ipa_nat_test018.c
index c885d4d..ca3d712 100644
--- a/ipanat/test/ipa_nat_test018.c
+++ b/ipanat/test/ipa_nat_test018.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,54 +43,61 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test018(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test018(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test019.c b/ipanat/test/ipa_nat_test019.c
index 3ba3119..883294d 100644
--- a/ipanat/test/ipa_nat_test019.c
+++ b/ipanat/test/ipa_nat_test019.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -43,54 +43,61 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test019(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test019(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule;
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
-
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test020.c b/ipanat/test/ipa_nat_test020.c
index e6871b5..2a32da0 100644
--- a/ipanat/test/ipa_nat_test020.c
+++ b/ipanat/test/ipa_nat_test020.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -42,59 +42,67 @@
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test020(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test020(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
 	int ret;
 	u32 rule_hdl1, rule_hdl2, rule_hdl3, rule_hdl4;
-	ipa_nat_ipv4_rule ipv4_rule;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	ipa_nat_ipv4_rule ipv4_rule = {0};
 
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
 	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	IPADBG("In\n");
 
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR(ret);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl4);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl2);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl4);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
 	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR(ret);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	if(sep)
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
 		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test021.c b/ipanat/test/ipa_nat_test021.c
index 48c4321..a2d5a8d 100644
--- a/ipanat/test/ipa_nat_test021.c
+++ b/ipanat/test/ipa_nat_test021.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -27,55 +27,99 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
+
 /*=========================================================================*/
 /*!
 	@file
-	ipa_nat_test021.c
+	ipa_nat_test021.cpp
 
 	@brief
 	Verify the following scenario:
 	1. Add ipv4 table
-	2. Delete ipv4 table
+	2. add same 3 ipv rules
+	3. delete Head and last entry
+	4. add 2 new same ip4 entries
+	5. Add head entry again
+	6. Delete ipv4 table
 */
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test021(int total_entries, int reg)
+int ipa_nat_test021(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+	int ret;
+	u32 rule_hdl1, rule_hdl2, rule_hdl3;
+	ipa_nat_ipv4_rule ipv4_rule = {0}, ipv4_rule2 = {0};
+	u32 rule_hdl21, rule_hdl22;
 
-	int ret, i;
-	u32 tbl_hdl;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	/* Rule 1 */
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
+	ipv4_rule.protocol = IPPROTO_TCP;
+	ipv4_rule.public_port = RAN_PORT;
 
-	IPADBG("%s():\n",__FUNCTION__);
+	/* Rule 2*/
+	ipv4_rule.target_ip = RAN_ADDR;
+	ipv4_rule.target_port = RAN_PORT;
+	ipv4_rule.private_ip = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
+	ipv4_rule.protocol = IPPROTO_UDP;
+	ipv4_rule.public_port = RAN_PORT;
 
-	for(i=0; i<reg; i++)
+	IPADBG("In\n");
+
+	if ( sep )
 	{
-		IPADBG("executing %d th time:\n",i);
-
-		IPADBG("calling ipa_nat_add_ipv4_tbl() \n");
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		if (0 != ret)
-		{
-			IPAERR("unable to create ipv4 nat table and returning Error:%d\n", ret);
-			IPADBG("executed %d times:\n",i);
-			return -1;
-		}
-		IPADBG("create nat ipv4 table successfully() \n");
-
-		IPADBG("calling ipa_nat_del_ipv4_tbl() \n");
-		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
-		if (0 != ret)
-		{
-			IPAERR("Unable to delete ipv4 nat table %d\n", ret);
-			IPADBG("executed %d times:\n",i);
-			return -1;
-		}
-		IPADBG("deleted ipv4 nat table successfully. Test passed \n");
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
-	IPADBG("executed %d times:\n",(i+1));
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	/* Delete head entry */
+	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	/* Delete Last Entry */
+	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	/* Add 2 different Entries */
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl21);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl22);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	/* Add first entry again */
+	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test022.c b/ipanat/test/ipa_nat_test022.c
index ebdd291..3767e19 100644
--- a/ipanat/test/ipa_nat_test022.c
+++ b/ipanat/test/ipa_nat_test022.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -27,92 +27,273 @@
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-
 /*=========================================================================*/
 /*!
 	@file
-	ipa_nat_test022.cpp
+	ipa_nat_test022.c
 
 	@brief
-	Verify the following scenario:
+	Note: Verify the following scenario:
 	1. Add ipv4 table
-	2. add same 3 ipv rules
-  3. delete Head and last entry
-  4. add 2 new same ip4 entries
-  5. Add head entry again
-	6. Delete ipv4 table
+	2. Add ipv4 rules till filled
+	3. Print stats
+	4. Delete ipv4 table
 */
 /*=========================================================================*/
 
 #include "ipa_nat_test.h"
-#include "ipa_nat_drv.h"
 
-int ipa_nat_test022(int total_entries, u32 tbl_hdl, u8 sep)
+int ipa_nat_test022(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
 {
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	ipa_nat_ipv4_rule  ipv4_rule;
+	u32                rule_hdls[2048];
+
+	ipa_nati_tbl_stats nstats, last_nstats;
+	ipa_nati_tbl_stats istats, last_istats;
+
+	u32                i, tot;
+
+	bool               switched = false;
+
+	const char*        mem_type;
+
 	int ret;
-	u32 rule_hdl1, rule_hdl2, rule_hdl3;
-	ipa_nat_ipv4_rule ipv4_rule, ipv4_rule2;
-	u32 rule_hdl21, rule_hdl22;
 
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	IPADBG("In\n");
 
-	/* Rule 1 */
-	ipv4_rule.target_ip = 0xC1171601; /* 193.23.22.1 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171601; /* 194.23.22.1 */
-	ipv4_rule.private_port = 5678;
-	ipv4_rule.protocol = IPPROTO_TCP;
-	ipv4_rule.public_port = 9050;
-
-	/* Rule 2*/
-	ipv4_rule.target_ip = 0xC1171604; /* 193.23.22.4 */
-	ipv4_rule.target_port = 1234;
-	ipv4_rule.private_ip = 0xC2171603; /* 194.23.22.3 */
-	ipv4_rule.private_port = 5680;
-	ipv4_rule.protocol = IPPROTO_UDP;
-	ipv4_rule.public_port = 9066;
-
-	IPADBG("%s():\n",__FUNCTION__);
-
-	if(sep)
+	if ( sep )
 	{
-		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-		CHECK_ERR1(ret, tbl_hdl);
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 	}
 
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
-	CHECK_ERR1(ret, tbl_hdl);
+	ret = ipa_nati_clear_ipv4_tbl(tbl_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
-	CHECK_ERR1(ret, tbl_hdl);
+	ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
 
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR1(ret, tbl_hdl);
+	IPAINFO("Attempting rule adds to %s table of size: (%u)\n",
+			ipa3_nat_mem_in_as_str(nstats.nmi),
+			nstats.tot_ents);
 
-	/* Delete head entry */
-	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl1);
-	CHECK_ERR1(ret, tbl_hdl);
+	last_nstats = nstats;
+	last_istats = istats;
 
-	/* Delete Last Entry */
-	ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdl3);
-	CHECK_ERR1(ret, tbl_hdl);
+	memset(rule_hdls, 0, sizeof(rule_hdls));
 
-	/* Add 2 different Entries */
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl21);
-	CHECK_ERR1(ret, tbl_hdl);
+	for ( i = tot = 0; i < array_sz(rule_hdls); i++ )
+	{
+		IPADBG("Trying %d ipa_nat_add_ipv4_rule()\n", i);
 
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule2, &rule_hdl22);
-	CHECK_ERR1(ret, tbl_hdl);
+		memset(&ipv4_rule, 0, sizeof(ipv4_rule));
 
-	/* Add first entry again */
-	ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
-	CHECK_ERR1(ret, tbl_hdl);
+		ipv4_rule.protocol     = IPPROTO_TCP;
+		ipv4_rule.public_port  = RAN_PORT;
+		ipv4_rule.target_ip    = RAN_ADDR;
+		ipv4_rule.target_port  = RAN_PORT;
+		ipv4_rule.private_ip   = RAN_ADDR;
+		ipv4_rule.private_port = RAN_PORT;
 
-	if(sep)
+		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdls[i]);
+		CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break);
+
+		IPADBG("Success %d ipa_nat_add_ipv4_rule() -> rule_hdl(0x%08X)\n",
+			   i, rule_hdls[i]);
+
+		ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+		CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break);
+
+		/*
+		 * Are we in hybrid mode and have we switched memory type?
+		 * Check for it and print the appropriate stats.
+		 */
+		if ( nstats.nmi != last_nstats.nmi )
+		{
+			mem_type = ipa3_nat_mem_in_as_str(last_nstats.nmi);
+
+			switched = true;
+
+			/*
+			 * NAT table stats...
+			 */
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT table of size (%u) or (%f) percent\n",
+					tot,
+					mem_type,
+					last_nstats.tot_ents,
+					((float) tot / (float) last_nstats.tot_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT BASE table of size (%u) or (%f) percent\n",
+					last_nstats.tot_base_ents_filled,
+					mem_type,
+					last_nstats.tot_base_ents,
+					((float) last_nstats.tot_base_ents_filled /
+					 (float) last_nstats.tot_base_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT EXPN table of size (%u) or (%f) percent\n",
+					last_nstats.tot_expn_ents_filled,
+					mem_type,
+					last_nstats.tot_expn_ents,
+					((float) last_nstats.tot_expn_ents_filled /
+					 (float) last_nstats.tot_expn_ents) * 100.0);
+
+			IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+					mem_type,
+					last_nstats.tot_chains,
+					last_nstats.min_chain_len,
+					last_nstats.max_chain_len,
+					last_nstats.avg_chain_len);
+
+			/*
+			 * INDEX table stats...
+			 */
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX table of size (%u) or (%f) percent\n",
+					tot,
+					mem_type,
+					last_istats.tot_ents,
+					((float) tot / (float) last_istats.tot_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX BASE table of size (%u) or (%f) percent\n",
+					last_istats.tot_base_ents_filled,
+					mem_type,
+					last_istats.tot_base_ents,
+					((float) last_istats.tot_base_ents_filled /
+					 (float) last_istats.tot_base_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX EXPN table of size (%u) or (%f) percent\n",
+					last_istats.tot_expn_ents_filled,
+					mem_type,
+					last_istats.tot_expn_ents,
+					((float) last_istats.tot_expn_ents_filled /
+					 (float) last_istats.tot_expn_ents) * 100.0);
+
+			IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+					mem_type,
+					last_istats.tot_chains,
+					last_istats.min_chain_len,
+					last_istats.max_chain_len,
+					last_istats.avg_chain_len);
+		}
+
+		last_nstats = nstats;
+		last_istats = istats;
+
+		if ( switched )
+		{
+			switched = false;
+
+			IPAINFO("Continuing rule adds to %s table of size: (%u)\n",
+					ipa3_nat_mem_in_as_str(nstats.nmi),
+					nstats.tot_ents);
+		}
+
+		tot++;
+	}
+
+	ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	mem_type = ipa3_nat_mem_in_as_str(nstats.nmi);
+
+	/*
+	 * NAT table stats...
+	 */
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT table of size (%u) or (%f) percent\n",
+			tot,
+			mem_type,
+			nstats.tot_ents,
+			((float) tot / (float) nstats.tot_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT BASE table of size (%u) or (%f) percent\n",
+			nstats.tot_base_ents_filled,
+			mem_type,
+			nstats.tot_base_ents,
+			((float) nstats.tot_base_ents_filled /
+			 (float) nstats.tot_base_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT EXPN table of size (%u) or (%f) percent\n",
+			nstats.tot_expn_ents_filled,
+			mem_type,
+			nstats.tot_expn_ents,
+			((float) nstats.tot_expn_ents_filled /
+			 (float) nstats.tot_expn_ents) * 100.0);
+
+	IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+			mem_type,
+			nstats.tot_chains,
+			nstats.min_chain_len,
+			nstats.max_chain_len,
+			nstats.avg_chain_len);
+
+	/*
+	 * INDEX table stats...
+	 */
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX table of size (%u) or (%f) percent\n",
+			tot,
+			mem_type,
+			istats.tot_ents,
+			((float) tot / (float) istats.tot_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX BASE table of size (%u) or (%f) percent\n",
+			istats.tot_base_ents_filled,
+			mem_type,
+			istats.tot_base_ents,
+			((float) istats.tot_base_ents_filled /
+			 (float) istats.tot_base_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX EXPN table of size (%u) or (%f) percent\n",
+			istats.tot_expn_ents_filled,
+			mem_type,
+			istats.tot_expn_ents,
+			((float) istats.tot_expn_ents_filled /
+			 (float) istats.tot_expn_ents) * 100.0);
+
+	IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+			mem_type,
+			istats.tot_chains,
+			istats.min_chain_len,
+			istats.max_chain_len,
+			istats.avg_chain_len);
+
+	IPAINFO("Deleting all rules\n");
+
+	for ( i = 0; i < tot; i++ )
+	{
+		IPADBG("Trying %d ipa_nat_del_ipv4_rule(0x%08X)\n",
+			   i, rule_hdls[i]);
+		ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdls[i]);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+		IPADBG("Success ipa_nat_del_ipv4_rule(%d)\n", i);
+	}
+
+	if ( sep )
 	{
 		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
-		CHECK_ERR1(ret, tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
 	}
 
+	IPADBG("Out\n");
+
 	return 0;
 }
diff --git a/ipanat/test/ipa_nat_test023.c b/ipanat/test/ipa_nat_test023.c
new file mode 100644
index 0000000..501b223
--- /dev/null
+++ b/ipanat/test/ipa_nat_test023.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_test023.c
+
+	@brief
+	Verify the following scenario:
+	1. Add ipv4 table
+	2. Add ipv rule three times to cause collisions and linking
+	3. Delete rules in a particular order and observe list for expected
+	   form
+	4. Run 2 and 3 over and over until all delete cominations have been
+	   run
+	5. Delete ipv4 table
+*/
+/*=========================================================================*/
+
+#include "ipa_nat_test.h"
+
+int ipa_nat_test023(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	ipa_nat_ipv4_rule ipv4_rule = {0};
+
+	u32 rule_hdl1;
+	u32 rule_hdl2;
+	u32 rule_hdl3;
+
+	u32* rule_del_combos[6][3] = {
+		{ &rule_hdl1, &rule_hdl2, &rule_hdl3 },
+		{ &rule_hdl1, &rule_hdl3, &rule_hdl2 },
+
+		{ &rule_hdl2, &rule_hdl1, &rule_hdl3 },
+		{ &rule_hdl2, &rule_hdl3, &rule_hdl1 },
+
+		{ &rule_hdl3, &rule_hdl1, &rule_hdl2 },
+		{ &rule_hdl3, &rule_hdl2, &rule_hdl1 },
+	};
+
+	int i, j, ret;
+
+	ipv4_rule.target_ip    = RAN_ADDR;
+	ipv4_rule.target_port  = RAN_PORT;
+	ipv4_rule.private_ip   = RAN_ADDR;
+	ipv4_rule.private_port = RAN_PORT;
+	ipv4_rule.protocol     = IPPROTO_TCP;
+	ipv4_rule.public_port  = RAN_PORT;
+
+	IPADBG("In\n");
+
+	if ( sep )
+	{
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	for ( i = 0; i < 6; i++ )
+	{
+		IPADBG("Adding rule 1\n");
+		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl1);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+		IPADBG("Adding rule 2\n");
+		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl2);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+		IPADBG("Adding rule 3\n");
+		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdl3);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+		ipa_nat_dump_ipv4_table(tbl_hdl);
+
+		for ( j = 0; j < 3; j++ )
+		{
+			u32* rh_ptr = rule_del_combos[i][j];
+
+			IPADBG("Deleting rule %u\n",
+				   ( rh_ptr == &rule_hdl1 ) ? 1 :
+				   ( rh_ptr == &rule_hdl2 ) ? 2 : 3);
+
+			ret = ipa_nat_del_ipv4_rule(tbl_hdl, *rh_ptr);
+			CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+			ipa_nat_dump_ipv4_table(tbl_hdl);
+		}
+	}
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/ipa_nat_test024.c b/ipanat/test/ipa_nat_test024.c
new file mode 100644
index 0000000..216fc39
--- /dev/null
+++ b/ipanat/test/ipa_nat_test024.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_test024.c
+
+	@brief
+	Verify the following scenario:
+	1. Trigger thousands of table memory switches
+
+*/
+/*===========================================================================*/
+
+#include "ipa_nat_test.h"
+
+int ipa_nat_test024(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	int i, ret;
+
+	IPADBG("In\n");
+
+	if ( sep )
+	{
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	for ( i = 0; i < 1000; i++ )
+	{
+		ret = ipa_nat_test022(
+			nat_mem_type, pub_ip_add, total_entries, tbl_hdl, !sep, arb_data_ptr);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/ipa_nat_test025.c b/ipanat/test/ipa_nat_test025.c
new file mode 100644
index 0000000..d1c1b9d
--- /dev/null
+++ b/ipanat/test/ipa_nat_test025.c
@@ -0,0 +1,362 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_test025.c
+
+	@brief
+	Note: Verify the following scenario:
+	1. Similare to test022, but with random deletes during adds
+*/
+/*=========================================================================*/
+
+#include "ipa_nat_test.h"
+
+#undef  VALID_RULE
+#define VALID_RULE(r) ((r) != 0 && (r) != 0xFFFFFFFF)
+
+#undef GET_MAX
+#define GET_MAX(ram, rdm) \
+	do { \
+		while ( (ram = rand() % 20) < 4); \
+		while ( (rdm = rand() % 10) >= ram || rdm == 0 ); \
+		IPADBG("rand_adds_max(%u) rand_dels_max(%u)\n", ram, rdm); \
+	} while (0)
+
+int ipa_nat_test025(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	ipa_nat_ipv4_rule  ipv4_rule;
+	u32                rule_hdls[1024];
+
+	ipa_nati_tbl_stats nstats, last_nstats;
+	ipa_nati_tbl_stats istats, last_istats;
+
+	u32                i;
+	u32                rand_adds_max, rand_dels_max;
+	u32                tot, tot_added, tot_deleted;
+
+	bool               switched = false;
+
+	const char*        mem_type;
+
+	int ret;
+
+	IPADBG("In\n");
+
+	if ( sep )
+	{
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	ret = ipa_nati_clear_ipv4_tbl(tbl_hdl);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	IPAINFO("Attempting rule adds to %s table of size: (%u)\n",
+			ipa3_nat_mem_in_as_str(nstats.nmi),
+			nstats.tot_ents);
+
+	last_nstats = nstats;
+	last_istats = istats;
+
+	memset(rule_hdls, 0, sizeof(rule_hdls));
+
+	GET_MAX(rand_adds_max, rand_dels_max);
+
+	tot = tot_added = tot_deleted = 0;
+
+	for ( i = 0; i < array_sz(rule_hdls); i++ )
+	{
+		IPADBG("Trying %u ipa_nat_add_ipv4_rule()\n", i);
+
+		memset(&ipv4_rule, 0, sizeof(ipv4_rule));
+
+		ipv4_rule.protocol     = IPPROTO_TCP;
+		ipv4_rule.public_port  = RAN_PORT;
+		ipv4_rule.target_ip    = RAN_ADDR;
+		ipv4_rule.target_port  = RAN_PORT;
+		ipv4_rule.private_ip   = RAN_ADDR;
+		ipv4_rule.private_port = RAN_PORT;
+
+		ret = ipa_nat_add_ipv4_rule(tbl_hdl, &ipv4_rule, &rule_hdls[i]);
+		CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break);
+
+		IPADBG("Success %u ipa_nat_add_ipv4_rule() -> rule_hdl(0x%08X)\n",
+			   i, rule_hdls[i]);
+
+		ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+		CHECK_ERR_TBL_ACTION(ret, tbl_hdl, break);
+
+		/*
+		 * Are we in hybrid mode and have we switched memory type?
+		 * Check for it and print the appropriate stats.
+		 */
+		if ( nstats.nmi != last_nstats.nmi )
+		{
+			mem_type = ipa3_nat_mem_in_as_str(last_nstats.nmi);
+
+			switched = true;
+
+			/*
+			 * NAT table stats...
+			 */
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT table of size (%u) or (%f) percent\n",
+					tot,
+					mem_type,
+					last_nstats.tot_ents,
+					((float) tot / (float) last_nstats.tot_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT BASE table of size (%u) or (%f) percent\n",
+					last_nstats.tot_base_ents_filled,
+					mem_type,
+					last_nstats.tot_base_ents,
+					((float) last_nstats.tot_base_ents_filled /
+					 (float) last_nstats.tot_base_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"NAT EXPN table of size (%u) or (%f) percent\n",
+					last_nstats.tot_expn_ents_filled,
+					mem_type,
+					last_nstats.tot_expn_ents,
+					((float) last_nstats.tot_expn_ents_filled /
+					 (float) last_nstats.tot_expn_ents) * 100.0);
+
+			IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+					mem_type,
+					last_nstats.tot_chains,
+					last_nstats.min_chain_len,
+					last_nstats.max_chain_len,
+					last_nstats.avg_chain_len);
+
+			/*
+			 * INDEX table stats...
+			 */
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX table of size (%u) or (%f) percent\n",
+					tot,
+					mem_type,
+					last_istats.tot_ents,
+					((float) tot / (float) last_istats.tot_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX BASE table of size (%u) or (%f) percent\n",
+					last_istats.tot_base_ents_filled,
+					mem_type,
+					last_istats.tot_base_ents,
+					((float) last_istats.tot_base_ents_filled /
+					 (float) last_istats.tot_base_ents) * 100.0);
+
+			IPAINFO("Able to add (%u) records to %s "
+					"IDX EXPN table of size (%u) or (%f) percent\n",
+					last_istats.tot_expn_ents_filled,
+					mem_type,
+					last_istats.tot_expn_ents,
+					((float) last_istats.tot_expn_ents_filled /
+					 (float) last_istats.tot_expn_ents) * 100.0);
+
+			IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+					mem_type,
+					last_istats.tot_chains,
+					last_istats.min_chain_len,
+					last_istats.max_chain_len,
+					last_istats.avg_chain_len);
+		}
+
+		last_nstats = nstats;
+		last_istats = istats;
+
+		tot++;
+
+		if ( ++tot_added == rand_adds_max )
+		{
+			u32  j, k;
+			u32* hdl_ptr[tot];
+
+			for ( j = k = 0; j < array_sz(rule_hdls); j++ )
+			{
+				if ( VALID_RULE(rule_hdls[j]) )
+				{
+					hdl_ptr[k] = &(rule_hdls[j]);
+
+					if ( ++k == tot )
+					{
+						break;
+					}
+				}
+			}
+
+			IPADBG("About to delete %u rules\n", rand_dels_max);
+
+			while ( k )
+			{
+				while ( j = rand() % k, ! VALID_RULE(*(hdl_ptr[j])) );
+
+				IPADBG("Trying ipa_nat_del_ipv4_rule(0x%08X)\n",
+					   *(hdl_ptr[j]));
+
+				ret = ipa_nat_del_ipv4_rule(tbl_hdl, *(hdl_ptr[j]));
+				CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+				IPADBG("Success ipa_nat_del_ipv4_rule(0x%08X)\n", *(hdl_ptr[j]));
+
+				*(hdl_ptr[j]) = 0xFFFFFFFF;
+
+				--tot;
+
+				if ( ++tot_deleted == rand_dels_max )
+				{
+					break;
+				}
+			}
+
+			GET_MAX(rand_adds_max, rand_dels_max);
+
+			tot_added = tot_deleted = 0;
+		}
+
+		if ( switched )
+		{
+			switched = false;
+
+			IPAINFO("Continuing rule adds to %s table of size: (%u)\n",
+					ipa3_nat_mem_in_as_str(nstats.nmi),
+					nstats.tot_ents);
+		}
+	}
+
+	ret = ipa_nati_ipv4_tbl_stats(tbl_hdl, &nstats, &istats);
+	CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+	mem_type = ipa3_nat_mem_in_as_str(nstats.nmi);
+
+	/*
+	 * NAT table stats...
+	 */
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT table of size (%u) or (%f) percent\n",
+			tot,
+			mem_type,
+			nstats.tot_ents,
+			((float) tot / (float) nstats.tot_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT BASE table of size (%u) or (%f) percent\n",
+			nstats.tot_base_ents_filled,
+			mem_type,
+			nstats.tot_base_ents,
+			((float) nstats.tot_base_ents_filled /
+			 (float) nstats.tot_base_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"NAT EXPN table of size (%u) or (%f) percent\n",
+			nstats.tot_expn_ents_filled,
+			mem_type,
+			nstats.tot_expn_ents,
+			((float) nstats.tot_expn_ents_filled /
+			 (float) nstats.tot_expn_ents) * 100.0);
+
+	IPAINFO("%s NAT table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+			mem_type,
+			nstats.tot_chains,
+			nstats.min_chain_len,
+			nstats.max_chain_len,
+			nstats.avg_chain_len);
+
+	/*
+	 * INDEX table stats...
+	 */
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX table of size (%u) or (%f) percent\n",
+			tot,
+			mem_type,
+			istats.tot_ents,
+			((float) tot / (float) istats.tot_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX BASE table of size (%u) or (%f) percent\n",
+			istats.tot_base_ents_filled,
+			mem_type,
+			istats.tot_base_ents,
+			((float) istats.tot_base_ents_filled /
+			 (float) istats.tot_base_ents) * 100.0);
+
+	IPAINFO("Able to add (%u) records to %s "
+			"IDX EXPN table of size (%u) or (%f) percent\n",
+			istats.tot_expn_ents_filled,
+			mem_type,
+			istats.tot_expn_ents,
+			((float) istats.tot_expn_ents_filled /
+			 (float) istats.tot_expn_ents) * 100.0);
+
+	IPAINFO("%s IDX table chains: tot_chains(%u) min_len(%u) max_len(%u) avg_len(%f)\n",
+			mem_type,
+			istats.tot_chains,
+			istats.min_chain_len,
+			istats.max_chain_len,
+			istats.avg_chain_len);
+
+	IPAINFO("Deleting remaining rules\n");
+
+	for ( i = 0; i < array_sz(rule_hdls); i++ )
+	{
+		if ( VALID_RULE(rule_hdls[i]) )
+		{
+			IPADBG("Trying ipa_nat_del_ipv4_rule(0x%08X)\n",
+				   rule_hdls[i]);
+			ret = ipa_nat_del_ipv4_rule(tbl_hdl, rule_hdls[i]);
+			CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+			IPADBG("Success ipa_nat_del_ipv4_rule(%u)\n", rule_hdls[i]);
+		}
+	}
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/ipa_nat_test999.c b/ipanat/test/ipa_nat_test999.c
new file mode 100644
index 0000000..f82ef18
--- /dev/null
+++ b/ipanat/test/ipa_nat_test999.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_test999.c
+
+	@brief
+	Verify the following scenario:
+	1. Delete ipv4 table
+*/
+/*===========================================================================*/
+
+#include "ipa_nat_test.h"
+
+int ipa_nat_test999(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+	int ret;
+
+	IPADBG("In\n");
+
+	if ( ! sep )
+	{
+		IPADBG("calling ipa_nat_del_ipv4_tbl()\n");
+
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+
+		*tbl_hdl_ptr = 0;
+
+		CHECK_ERR(ret);
+
+		IPADBG("deleted ipv4 nat table successfully.\n");
+	}
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/ipa_nat_testMODEL.c b/ipanat/test/ipa_nat_testMODEL.c
new file mode 100644
index 0000000..0f99159
--- /dev/null
+++ b/ipanat/test/ipa_nat_testMODEL.c
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_testXXX.c
+
+	@brief
+	Verify the following scenario:
+
+*/
+/*===========================================================================*/
+
+#include "ipa_nat_test.h"
+
+int ipa_nat_testXXX(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* tbl_hdl_ptr = (int*) arb_data_ptr;
+
+	int ret;
+
+	IPADBG("In\n");
+
+	if ( sep )
+	{
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+	}
+
+	if ( sep )
+	{
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+		*tbl_hdl_ptr = 0;
+		CHECK_ERR(ret);
+	}
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/ipa_nat_testREG.c b/ipanat/test/ipa_nat_testREG.c
new file mode 100644
index 0000000..10e29cf
--- /dev/null
+++ b/ipanat/test/ipa_nat_testREG.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above
+ *    copyright notice, this list of conditions and the following
+ *    disclaimer in the documentation and/or other materials provided
+ *    with the distribution.
+ *  * Neither the name of The Linux Foundation nor the names of its
+ *    contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*=========================================================================*/
+/*!
+	@file
+	ipa_nat_testREG.c
+
+	@brief
+	Verify the following scenario:
+	1. Add ipv4 table
+	2. Delete ipv4 table
+*/
+/*=========================================================================*/
+
+#include "ipa_nat_test.h"
+
+int ipa_nat_testREG(
+	const char* nat_mem_type,
+	u32 pub_ip_add,
+	int total_entries,
+	u32 tbl_hdl,
+	int sep,
+	void* arb_data_ptr)
+{
+	int* ireg_ptr = (int*) arb_data_ptr;
+
+	int  i, ret;
+
+	IPADBG("In\n");
+
+	for ( i = 0; i < *ireg_ptr; i++ )
+	{
+		IPADBG("Executing iteration %d\n", i+1);
+
+		IPADBG("Calling ipa_nat_add_ipv4_tbl()\n");
+
+		ret = ipa_nat_add_ipv4_tbl(pub_ip_add, nat_mem_type, total_entries, &tbl_hdl);
+
+		CHECK_ERR_TBL_STOP(ret, tbl_hdl);
+
+		IPADBG("Iteration %d creation of nat ipv4 table successful\n", i+1);
+
+		IPADBG("Calling ipa_nat_del_ipv4_tbl()\n");
+
+		ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
+
+		CHECK_ERR(ret);
+
+		IPADBG("Iteration %d deletion of ipv4 nat table successful\n", i+1);
+	}
+
+	IPADBG("Executed %d iterations:\n", i);
+
+	IPADBG("Out\n");
+
+	return 0;
+}
diff --git a/ipanat/test/main.c b/ipanat/test/main.c
index c49ce3b..16d46fd 100644
--- a/ipanat/test/main.c
+++ b/ipanat/test/main.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2018-2020 The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -26,615 +26,482 @@
  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
-
-
 #include <stdio.h>
+#include <stdint.h>
+#include <stdbool.h>
 #include <stdlib.h>
 #include <unistd.h>
+#include <libgen.h>
 #include <string.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <errno.h>
 
-#include "ipa_nat_drv.h"
-#include "ipa_nat_drvi.h"
 #include "ipa_nat_test.h"
+#include "ipa_nat_map.h"
 
-extern struct ipa_nat_cache ipv4_nat_cache;
+#undef strcasesame
+#define strcasesame(x, y) \
+	(! strcasecmp((x), (y)))
 
-int chk_for_loop(u32 tbl_hdl)
+static inline const char* legal_mem_type(
+	const char* mt )
 {
-	struct ipa_nat_rule *tbl_ptr;
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	int cnt;
-	uint16_t cur_entry;
+	if ( strcasesame(mt, "DDR") )    return "DDR";
+	if ( strcasesame(mt, "SRAM") )   return "SRAM";
+	if ( strcasesame(mt, "HYBRID") ) return "HYBRID";
+	return NULL;
+}
 
-	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-		IPAERR("invalid table handle passed \n");
+static int nat_rule_loop_check(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+	uint32_t             tbl_hdl = (uint32_t) arb_data_ptr;
+
+	struct ipa_nat_rule* rule_ptr =
+		(struct ipa_nat_rule*) record_ptr;
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	/*
+	 * By virtue of this function being called back by the walk, this
+	 * record_index is valid.  Denote it as such in the map...
+	 */
+	if ( ipa_nat_map_add(MAP_NUM_99, record_index, 1) )
+	{
+		IPAERR("ipa_nat_map_add(index(%u)) failed\n", record_index);
 		return -EINVAL;
 	}
 
-	IPADBG("checking ipv4 rules:\n");
-	tbl_ptr = (struct ipa_nat_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
-	for (cnt = 0;
-		cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-		cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,ENABLE_FIELD)) {
-			if(Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
-							NEXT_INDEX_FIELD) == cnt)
-			{
-				IPAERR("Infinite loop detected, entry\n");
-				ipa_nati_print_rule(&tbl_ptr[cnt], cnt);
-				return -EINVAL;
-			}
-		}
+	if ( rule_ptr->next_index == record_index )
+	{
+		IPAERR("Infinite loop detected in IPv4 %s table, entry %u\n",
+			   (is_expn_tbl) ? "expansion" : "base",
+			   record_index);
+
+		ipa_nat_dump_ipv4_table(tbl_hdl);
+
+		return -EINVAL;
 	}
 
-	/* Print ipv4 expansion rules */
-	IPADBG("checking ipv4 active expansion rules:\n");
-	tbl_ptr = (struct ipa_nat_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
-	for (cnt = 0;
-		cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-		cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-								ENABLE_FIELD)) {
-			cur_entry =
-				cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			if (Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
-							NEXT_INDEX_FIELD) == cur_entry)
-			{
-				IPAERR("Infinite loop detected\n");
-				ipa_nati_print_rule(&tbl_ptr[cnt],
-					(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries));
-				return -EINVAL;
-			}
-		}
-	}
-
-	/* Print ipv4 index rules */
-	IPADBG("checking ipv4 index active rules: \n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
-	for (cnt = 0;
-		 cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_TBL_ENTRY_FIELD)) {
-			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_NEXT_INDEX_FILED) == cnt)
-			{
-				IPAERR("Infinite loop detected\n");
-				ipa_nati_print_index_rule(&indx_tbl_ptr[cnt], cnt, 0);
-				return -EINVAL;
-			}
-		}
-	}
-
-	/* Print ipv4 index expansion rules */
-	IPADBG("Checking ipv4 index expansion active rules: \n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
-	for (cnt = 0;
-		cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_TBL_ENTRY_FIELD)) {
-			cur_entry =
-				cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_NEXT_INDEX_FILED) == cur_entry)
-			{
-				IPAERR("Infinite loop detected\n");
-				ipa_nati_print_index_rule(&indx_tbl_ptr[cnt],
-					(cnt + ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries),
-				ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].index_expn_table_meta[cnt].prev_index);
-				return -EINVAL;
-			}
-		}
-	}
 	return 0;
 }
 
-uint8_t is_base_entry_valid(u32 tbl_hdl, u16 entry)
+static int nat_rule_validity_check(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
 {
-	struct ipa_nat_rule *tbl_ptr;
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+	uint16_t             index;
 
-	if (entry >
-		ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries)
-	{
-		tbl_ptr = (struct ipa_nat_rule *)
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
-		entry -=
-			ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-	}
-	else
-	{
-		tbl_ptr = (struct ipa_nat_rule *)
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
-	}
-	return (Read16BitFieldValue(tbl_ptr[entry].ip_cksm_enbl,
-							ENABLE_FIELD));
-}
+	struct ipa_nat_rule* rule_ptr =
+		(struct ipa_nat_rule*) record_ptr;
 
-uint8_t is_index_entry_valid(u32 tbl_hdl, u16 entry)
-{
-	struct ipa_nat_indx_tbl_rule *tbl_ptr;
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
 
-	if (entry >
-		ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries)
-	{
-		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
-		entry -=
-			ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-	}
-	else
-	{
-		tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
-	}
-	if (Read16BitFieldValue(tbl_ptr[entry].tbl_entry_nxt_indx,
-						INDX_TBL_TBL_ENTRY_FIELD)) {
-		return 1;
-	}
-	else
-	{
-		return 0;
-	}
-}
+	index = rule_ptr->next_index;
 
-int chk_for_validity(u32 tbl_hdl)
-{
-	struct ipa_nat_rule *tbl_ptr;
-	struct ipa_nat_indx_tbl_rule *indx_tbl_ptr;
-	uint16_t nxt_index, prv_index;
-	int cnt;
+	if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) )
+	{
+		IPAERR("Invalid next index %u found in IPv4 %s table entry %u\n",
+			   index,
+			   (is_expn_tbl) ? "expansion" : "base",
+			   rule_index);
 
-	if (IPA_NAT_INVALID_NAT_ENTRY == tbl_hdl ||
-			tbl_hdl > IPA_NAT_MAX_IP4_TBLS) {
-		IPAERR("invalid table handle passed \n");
 		return -EINVAL;
 	}
 
-	/* Validate base table next_indx and prev_indx values */
-	IPADBG("Validating ipv4 active rules: \n");
-	tbl_ptr = (struct ipa_nat_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_rules_addr;
-	for (cnt = 0;
-		cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-						ENABLE_FIELD)) {
-			nxt_index =
-			Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
-						NEXT_INDEX_FIELD);
-			if (!is_base_entry_valid(tbl_hdl, nxt_index)) {
-				IPAERR("Invalid next index found, entry:%d\n", cnt);
-			}
-		}
-	}
+	if ( is_expn_tbl )
+	{
+		index = rule_ptr->prev_index;
 
-	IPADBG("Validating ipv4 expansion active rules: \n");
-	tbl_ptr = (struct ipa_nat_rule *)
-			ipv4_nat_cache.ip4_tbl[tbl_hdl-1].ipv4_expn_rules_addr;
-	for (cnt = 0;
-		cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(tbl_ptr[cnt].ip_cksm_enbl,
-							ENABLE_FIELD)) {
-			/* Validate next index */
-			nxt_index =
-				Read16BitFieldValue(tbl_ptr[cnt].nxt_indx_pub_port,
-									NEXT_INDEX_FIELD);
-			if (!is_base_entry_valid(tbl_hdl, nxt_index)) {
-				IPAERR("Invalid next index found, entry:%d\n", cnt);
-			}
-			/* Validate previous index */
-			prv_index =
-				Read16BitFieldValue(tbl_ptr[cnt].sw_spec_params,
-						SW_SPEC_PARAM_PREV_INDEX_FIELD);
-			if (!is_base_entry_valid(tbl_hdl, prv_index)) {
-				IPAERR("Invalid Previous index found, entry:%d\n", cnt);
-			}
-		}
-	}
+		if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) )
+		{
+			IPAERR("Invalid previous index %u found in IPv4 %s table entry %u\n",
+				   index,
+				   "expansion",
+				   rule_index);
 
-	IPADBG("Validating ipv4 index active rules: \n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_addr;
-	for (cnt = 0;
-		cnt < ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_TBL_ENTRY_FIELD)) {
-			nxt_index =
-				Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-							INDX_TBL_NEXT_INDEX_FILED);
-			if (!is_index_entry_valid(tbl_hdl, nxt_index)) {
-				IPAERR("Invalid next index found, entry:%d\n", cnt);
-			}
-		}
-	}
-
-	IPADBG("Validating ipv4 index expansion active rules: \n");
-	indx_tbl_ptr = (struct ipa_nat_indx_tbl_rule *)
-	ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_table_expn_addr;
-	for (cnt = 0;
-		cnt <= ipv4_nat_cache.ip4_tbl[tbl_hdl - 1].expn_table_entries;
-			 cnt++) {
-		if (Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-								INDX_TBL_TBL_ENTRY_FIELD)) {
-			/* Validate next index*/
-			nxt_index =
-				Read16BitFieldValue(indx_tbl_ptr[cnt].tbl_entry_nxt_indx,
-								INDX_TBL_NEXT_INDEX_FILED);
-			if (!is_index_entry_valid(tbl_hdl, nxt_index)) {
-				IPAERR("Invalid next index found, entry:%d\n", cnt);
-			}
-
-			/* Validate previous index*/
-			prv_index =
-				ipv4_nat_cache.ip4_tbl[tbl_hdl-1].index_expn_table_meta[cnt].prev_index;
-
-			if (!is_index_entry_valid(tbl_hdl, prv_index)) {
-				IPAERR("Invalid Previous index found, entry:%d\n", cnt);
-			}
+			return -EINVAL;
 		}
 	}
 
 	return 0;
 }
 
-int ipa_nat_validate_ipv4_table(u32 tbl_hdl)
+static int index_loop_check(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
 {
-	int ret = 0;
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+	uint32_t             tbl_hdl = (uint32_t) arb_data_ptr;
 
-	ret = chk_for_loop(tbl_hdl);
-	if (ret)
+	struct ipa_nat_indx_tbl_rule* itr_ptr =
+		(struct ipa_nat_indx_tbl_rule*) record_ptr;
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	/*
+	 * By virtue of this function being called back by the walk, this
+	 * record_index is valid.  Denote it as such in the map...
+	 */
+	if ( ipa_nat_map_add(MAP_NUM_99, record_index, 1) )
+	{
+		IPAERR("ipa_nat_map_add(index(%u)) failed\n", record_index);
+		return -EINVAL;
+	}
+
+	if ( itr_ptr->next_index == record_index )
+	{
+		IPAERR("Infinite loop detected in IPv4 index %s table, entry %u\n",
+			   (is_expn_tbl) ? "expansion" : "base",
+			   record_index);
+
+		ipa_nat_dump_ipv4_table(tbl_hdl);
+
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int index_validity_check(
+	ipa_table*      table_ptr,
+	uint32_t        rule_hdl,
+	void*           record_ptr,
+	uint16_t        record_index,
+	void*           meta_record_ptr,
+	uint16_t        meta_record_index,
+	void*           arb_data_ptr )
+{
+	enum ipa3_nat_mem_in nmi;
+	uint8_t              is_expn_tbl;
+	uint16_t             rule_index;
+	uint16_t             index;
+
+	struct ipa_nat_indx_tbl_rule* itr_ptr =
+		(struct ipa_nat_indx_tbl_rule*) record_ptr;
+
+	BREAK_RULE_HDL(table_ptr, rule_hdl, nmi, is_expn_tbl, rule_index);
+
+	index = itr_ptr->next_index;
+
+	if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) )
+	{
+		IPAERR("Invalid next index %u found in IPv4 index %s table entry %u\n",
+			   index,
+			   (is_expn_tbl) ? "expansion" : "base",
+			   rule_index);
+
+		return -EINVAL;
+	}
+
+	if ( is_expn_tbl )
+	{
+		struct ipa_nat_indx_tbl_meta_info* mi_ptr = meta_record_ptr;
+
+		if ( ! mi_ptr )
+		{
+			IPAERR("Missing meta pointer for IPv4 index %s table entry %u\n",
+				   "expansion",
+				   rule_index);
+
+			return -EINVAL;
+		}
+
+		index = mi_ptr->prev_index;
+
+		if ( index && ipa_nat_map_find(MAP_NUM_99, index, NULL) )
+		{
+			IPAERR("Invalid previous index %u found in IPv4 index %s table entry %u\n",
+				   index,
+				   "expansion",
+				   rule_index);
+
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int ipa_nat_validate_ipv4_table(
+	u32 tbl_hdl )
+{
+	int ret;
+
+	/*
+	 * Map MAP_NUM_99 will be used to keep, and to check for,
+	 * record validity.
+	 *
+	 * The first walk will fill it. The second walk will use it...
+	 */
+	ipa_nat_map_clear(MAP_NUM_99);
+
+	IPADBG("Checking IPv4 active rules:\n");
+
+	ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, nat_rule_loop_check, tbl_hdl);
+
+	if ( ret != 0 )
+	{
 		return ret;
-	ret = chk_for_validity(tbl_hdl);
+	}
 
-	return ret;
+	ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_NAT_TABLE, nat_rule_validity_check, 0);
+
+	if ( ret != 0 )
+	{
+		return ret;
+	}
+
+	/*
+	 * Map MAP_NUM_99 will be used to keep, and to check for,
+	 * record validity.
+	 *
+	 * The first walk will fill it. The second walk will use it...
+	 */
+	ipa_nat_map_clear(MAP_NUM_99);
+
+	IPADBG("Checking IPv4 index active rules:\n");
+
+	ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, index_loop_check, tbl_hdl);
+
+	if ( ret != 0 )
+	{
+		return ret;
+	}
+
+	ret = ipa_nati_walk_ipv4_tbl(tbl_hdl, USE_INDEX_TABLE, index_validity_check, 0);
+
+	if ( ret != 0 )
+	{
+		return ret;
+	}
+
+	return 0;
 }
 
-int main(int argc, char* argv[])
+static void
+_dispUsage(
+	const char* progNamePtr )
 {
-	int exec = 0, pass = 0, ret;
-	int cnt, nt=1;
-	int total_entries = 100;
-	u8 sep = 0;
-	u32 tbl_hdl = 0;
-	u32 pub_ip_add = 0x011617c0;   /* "192.23.22.1" */
+	printf(
+		"Usage: %s [-d -r N -i N -e N -m mt]\n"
+		"Where:\n"
+		"  -d     Each test is discrete (create table, add rules, destroy table)\n"
+		"         If not specified, only one table create and destroy for all tests\n"
+		"  -r N   Where N is the number of times to run the inotify regression test\n"
+		"  -i N   Where N is the number of times (iterations) to run test\n"
+		"  -e N   Where N is the number of entries in the NAT\n"
+		"  -m mt  Where mt is the type of memory to use for the NAT\n"
+		"         Legal mt's: DDR, SRAM, or HYBRID (ie. use SRAM and DDR)\n"
+		"  -g M-N Run tests M through N only\n",
+		progNamePtr);
 
-	IPADBG("ipa_nat_testing user space nat driver\n");
+	fflush(stdout);
+}
 
-	if (argc == 4)
+static NatTests nt_array[] = {
+	NAT_TEST_ENTRY(ipa_nat_test000, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test001, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test002, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test003, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test004, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test005, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test006, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test007, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test008, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test009, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_test010, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test011, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test012, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test013, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test014, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test015, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test016, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test017, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test018, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test019, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test020, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test021, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test022, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test023, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test024, IPA_NAT_TEST_PRE_COND_TE, 0),
+	NAT_TEST_ENTRY(ipa_nat_test025, IPA_NAT_TEST_PRE_COND_TE, 0),
+	/*
+	 * Add new tests just above this comment. Keep the following two
+	 * at the end...
+	 */
+	NAT_TEST_ENTRY(ipa_nat_test999, 1, 0),
+	NAT_TEST_ENTRY(ipa_nat_testREG, 1, 0),
+};
+
+int main(
+	int   argc,
+	char* argv[] )
+{
+	int      sep        = 0;
+	int      ireg       = 0;
+	uint32_t nt         = 1;
+	int      total_ents = 100;
+	uint32_t ht         = 0;
+	uint32_t start = 0, end = 0;
+
+	char* nat_mem_type = "DDR";
+
+	uint32_t tbl_hdl    = 0;
+
+	uint32_t pub_ip_addr;
+
+	uint32_t i, ub, cnt, exec, pass;
+
+	void*    adp;
+
+	time_t   t;
+
+	int      c, ret;
+
+	IPADBG("Testing user space nat driver\n");
+
+	while ( (c = getopt(argc, argv, "dr:i:e:m:h:g:?")) != -1 )
 	{
-		if (!strncmp(argv[1], "reg", 3))
+		switch (c)
 		{
-			nt = atoi(argv[2]);
-			total_entries = atoi(argv[3]);
-			IPADBG("Reg: %d, Nat Entries: %d\n", nt, total_entries);
-		}
-		else if (!strncmp(argv[1], "sep", 3))
-		{
+		case 'd':
 			sep = 1;
-			nt = atoi(argv[2]);
-			total_entries = atoi(argv[3]);
+			break;
+		case 'r':
+			ireg = atoi(optarg);
+			break;
+		case 'i':
+			nt = atoi(optarg);
+			break;
+		case 'e':
+			total_ents = atoi(optarg);
+			break;
+		case 'm':
+			if ( ! (nat_mem_type = legal_mem_type(optarg)) )
+			{
+				fprintf(stderr, "Illegal: -m %s\n", optarg);
+				_dispUsage(basename(argv[0]));
+				exit(0);
+			}
+			break;
+		case 'h':
+			ht = atoi(optarg);
+			break;
+		case 'g':
+			if ( sscanf(optarg, "%u-%u", &start, &end) != 2
+				 ||
+				 ( start >= end || end >= array_sz(nt_array) - 1 ) )
+			{
+				fprintf(stderr, "Illegal: -f %s\n", optarg);
+				_dispUsage(basename(argv[0]));
+				exit(0);
+			}
+			break;
+		case '?':
+		default:
+			_dispUsage(basename(argv[0]));
+			exit(0);
+			break;
 		}
 	}
-	else if (argc == 3)
+
+	srand(time(&t));
+
+	pub_ip_addr = RAN_ADDR;
+
+	exec = pass = 0;
+
+	for ( cnt = ret = 0; cnt < nt && ret == 0; cnt++ )
 	{
-		if (!strncmp(argv[1], "inotify", 7))
+		IPADBG("ITERATION [%u] OF TESING\n", cnt + 1);
+
+		if ( ireg )
 		{
-			ipa_nat_test021(total_entries, atoi(argv[2]));
-			return 0;
+			adp = &ireg;
+			i   = array_sz(nt_array) - 1;
+			ub  = array_sz(nt_array);
 		}
-		else if (!strncmp(argv[1], "sep", 3))
+		else
 		{
-			sep = 1;
-			total_entries = atoi(argv[2]);
+			adp = &tbl_hdl;
+			i   = ( end ) ? start : 0;
+			ub  = ( end ) ? end   : array_sz(nt_array) - 1;
+
+			if ( i != 0 && ! sep )
+			{
+				ipa_nat_test000(
+					nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, 0, adp);
+			}
+		}
+
+		for ( ; i < ub && ret == 0; i++ )
+		{
+			if ( total_ents >= nt_array[i].num_ents_trigger )
+			{
+				IPADBG("+------------------------------------------------+\n");
+				IPADBG("|        Executing test: %s         |\n", nt_array[i].func_name);
+				IPADBG("+------------------------------------------------+\n");
+
+				ret = nt_array[i].func(
+					nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, sep, adp);
+
+				exec++;
+
+				if ( ret == 0 )
+				{
+					IPADBG("<<<<< Test %s SUCCEEDED >>>>>\n", nt_array[i].func_name);
+
+					pass++;
+
+					if ( ht || nt_array[i].test_hold_time_in_secs )
+					{
+						ht = (ht) ? ht : nt_array[i].test_hold_time_in_secs;
+
+						sleep(ht);
+					}
+				}
+				else
+				{
+					IPAERR("<<<<< Test %s FAILED >>>>>\n", nt_array[i].func_name);
+				}
+			}
 		}
 	}
-	else if (argc == 2)
+
+	if ( ret && tbl_hdl )
 	{
-		total_entries = atoi(argv[1]);
-		IPADBG("Nat Entries: %d\n", total_entries);
+		ipa_nat_test999(
+			nat_mem_type, pub_ip_addr, total_ents, tbl_hdl, 0, &tbl_hdl);
 	}
 
+	IPADBG("Total NAT Tests Run:%u, Pass:%u, Fail:%u\n",
+		   exec, pass, exec - pass);
 
-	for (cnt=0; cnt<nt; cnt++)
-	{
-		IPADBG("%s():Executing %d time \n",__FUNCTION__, cnt);
-
-		if (!sep)
-		{
-			ret = ipa_nat_add_ipv4_tbl(pub_ip_add, total_entries, &tbl_hdl);
-			CHECK_ERR(ret);
-		}
-
-		if (sep)
-		{
-			IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-			ret = ipa_nat_test000(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test00%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-			ret = ipa_nat_test001(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test00%d Fail\n", exec);
-			}
-			exec++;
-		}
-
-		IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-		ret = ipa_nat_test002(total_entries, tbl_hdl, sep);
-		if (!ret)
-		{
-			pass++;
-		}
-		else
-		{
-			IPAERR("ipa_nat_test00%d Fail\n", exec);
-		}
-		exec++;
-
-		if (sep)
-		{
-			IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-			ret = ipa_nat_test003(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test00%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-			ret = ipa_nat_test004(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test00%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-			ret = ipa_nat_test005(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test00%d Fail\n", exec);
-			}
-			exec++;
-		}
-
-		IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-		ret = ipa_nat_test006(total_entries, tbl_hdl, sep);
-		if (!ret)
-		{
-			pass++;
-		}
-		else
-		{
-			IPAERR("ipa_nat_test00%d Fail\n", exec);
-		}
-		exec++;
-
-		IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-		ret = ipa_nat_test007(total_entries, tbl_hdl, sep);
-		if (!ret)
-		{
-			pass++;
-		}
-		else
-		{
-			IPAERR("ipa_nat_test00%d Fail\n", exec);
-		}
-		exec++;
-
-		IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-		ret = ipa_nat_test008(total_entries, tbl_hdl, sep);
-		if (!ret)
-		{
-			pass++;
-		}
-		else
-		{
-			IPAERR("ipa_nat_test00%d Fail\n", exec);
-		}
-		exec++;
-
-		IPADBG("\n\nExecuting ipa_nat_test00%d\n", exec);
-		ret = ipa_nat_test009(total_entries, tbl_hdl, sep);
-		if (!ret)
-		{
-			pass++;
-		}
-		else
-		{
-			IPAERR("ipa_nat_test00%d Fail\n", exec);
-		}
-		exec++;
-
-		if (total_entries >= IPA_NAT_TEST_PRE_COND_TE)
-		{
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test010(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test011(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test012(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test013(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test014(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test015(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test016(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test017(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test018(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test019(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test020(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-
-			IPADBG("\n\nExecuting ipa_nat_test0%d\n", exec);
-			ret = ipa_nat_test022(total_entries, tbl_hdl, sep);
-			if (!ret)
-			{
-				pass++;
-			}
-			else
-			{
-				IPAERR("ipa_nat_test0%d Fail\n", exec);
-			}
-			exec++;
-		}
-
-		if (!sep)
-		{
-			ret = ipa_nat_del_ipv4_tbl(tbl_hdl);
-			CHECK_ERR(ret);
-		}
-	}
-	/*=======  Printing Results ==========*/
-	IPADBG("Total ipa_nat Tests Run:%d, Pass:%d, Fail:%d\n",exec, pass, exec-pass);
 	return 0;
 }