aboutsummaryrefslogtreecommitdiffstats
path: root/common/dpdk/recipes-extended/dpdk/dpdk
diff options
context:
space:
mode:
Diffstat (limited to 'common/dpdk/recipes-extended/dpdk/dpdk')
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/0001-crypto-ccp-fix-shared-libs-build.patch25
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/0002-net-axgbe-fix-shared-libs-build.patch28
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-01-18-net-axgbe-add-minimal-dev-init-and-uninit-support.patch984
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-02-18-net-axgbe-add-register-map-and-related-macros.patch1672
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-03-18-net-axgbe-add-phy-register-map-and-helper-macros.patch341
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-04-18-net-axgbe-add-structures-for-MAC-initialization-and-reset.patch920
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-05-18-net-axgbe-add-phy-initialization-and-related-apis.patch1974
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-06-18-net-axgbe-add-phy-programming-apis.patch2468
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-07-18-net-axgbe-add-interrupt-handler-for-autonegotiation.patch98
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-08-18-net-axgbe-add-transmit-and-receive-queue-setup-apis.patch903
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-09-18-net-axgbe-add-DMA-programming-and-dev-start-and-stop-apis.patch1027
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-10-18-net-axgbe-add-transmit-and-receive-data-path-apis.patch813
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-11-18-doc-add-documents-for-AMD-axgbe-Ethernet-PMD.patch272
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-12-18-net-axgbe-add-link-status-update.patch90
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-13-18-net-axgbe-add-configure-flow-control-while-link-adjustment.patch44
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-14-18-net-axgbe-add-promiscuous-mode-support.patch120
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-15-18-net-axgbe-add-generic-transmit-and-receive-stats-support.patch121
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-16-18-net-axgbe-add-support-for-build-32-bit-mode.patch308
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-17-18-net-axgbe-add-workaround-for-axgbe-ethernet-training-bug.patch319
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-18-18-net-axgbe-moved-license-headers-to-SPDX-format.patch1942
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-01-20-crypto-ccp-add-AMD-ccp-skeleton-PMD.patch241
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-02-20-crypto-ccp-support-ccp-device-initialization-and-deintialization.patch1809
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-03-20-crypto-ccp-support-basic-pmd-ops.patch209
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-04-20-crypto-ccp-support-session-related-crypto-pmd-ops.patch782
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-05-20-crypto-ccp-support-queue-pair-related-pmd-ops.patch186
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-06-20-crypto-ccp-support-crypto-enqueue-and-dequeue-burst-api.patch584
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-07-20-crypto-ccp-support-sessionless-operations.patch80
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-08-20-crypto-ccp-support-stats-related-crypto-pmd-ops.patch71
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-09-20-crypto-ccp-support-ccp-hwrng-feature.patch85
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-10-20-crypto-ccp-support-aes-cipher-algo.patch449
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-11-20-crypto-ccp-support-3des-cipher-algo.patch244
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-12-20-crypto-ccp-support-aes-cmac-auth-algo.patch388
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-13-20-crypto-ccp-support-aes-gcm-aead-algo.patch357
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-14-20-crypto-ccp-support-sha1-authentication-algo.patch531
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-15-20-crypto-ccp-support-sha2-family-authentication-algo.patch608
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-16-20-crypto-ccp-support-sha3-family-authentication-algo.patch1067
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-17-20-crypto-ccp-support-cpu-based-md5-and-sha2-family-authentication-algo.patch626
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-18-20-test-crypto-add-test-for-AMD-CCP-crypto-poll-mode.patch942
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-19-20-doc-add-document-for-AMD-CCP-crypto-poll-mode-driver.patch263
-rw-r--r--common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-20-20-crypto-ccp-moved-license-headers-to-SPDX-format.patch446
40 files changed, 24437 insertions, 0 deletions
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/0001-crypto-ccp-fix-shared-libs-build.patch b/common/dpdk/recipes-extended/dpdk/dpdk/0001-crypto-ccp-fix-shared-libs-build.patch
new file mode 100644
index 00000000..913a3072
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/0001-crypto-ccp-fix-shared-libs-build.patch
@@ -0,0 +1,25 @@
+From 7fd5e7ac8d476bc7538eb6e11875cb87233f0bdc Mon Sep 17 00:00:00 2001
+From: Awais Belal <awais_belal@mentor.com>
+Date: Wed, 14 Mar 2018 11:27:26 +0500
+Subject: [PATCH 1/2] crypto/ccp: fix shared libs build
+
+Signed-off-by: Awais Belal <awais_belal@mentor.com>
+---
+ drivers/crypto/ccp/Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+index 1475a6c..4ba8757 100644
+--- a/drivers/crypto/ccp/Makefile
++++ b/drivers/crypto/ccp/Makefile
+@@ -19,6 +19,7 @@ LDLIBS += -lcrypto
+ LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+ LDLIBS += -lrte_cryptodev
+ LDLIBS += -lrte_pci -lrte_bus_pci
++LDLIBS += -lrte_bus_vdev
+
+ # versioning export map
+ EXPORT_MAP := rte_pmd_ccp_version.map
+--
+2.11.1
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/0002-net-axgbe-fix-shared-libs-build.patch b/common/dpdk/recipes-extended/dpdk/dpdk/0002-net-axgbe-fix-shared-libs-build.patch
new file mode 100644
index 00000000..876c5271
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/0002-net-axgbe-fix-shared-libs-build.patch
@@ -0,0 +1,28 @@
+From b4fd269981cf81eb82e9d56e83c9bd1bc1d00609 Mon Sep 17 00:00:00 2001
+From: Awais Belal <awais_belal@mentor.com>
+Date: Wed, 14 Mar 2018 11:28:18 +0500
+Subject: [PATCH 2/2] net/axgbe: fix shared libs build
+
+Signed-off-by: Awais Belal <awais_belal@mentor.com>
+---
+ drivers/net/axgbe/Makefile | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index e1e0306..de6bd69 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -11,6 +11,10 @@ LIB = librte_pmd_axgbe.a
+ CFLAGS += -O3
+ CFLAGS += $(WERROR_FLAGS)
+
++LDLIBS += -lrte_eal -lrte_mempool
++LDLIBS += -lrte_pci -lrte_bus_pci
++LDLIBS += -lrte_ethdev
++
+ EXPORT_MAP := rte_pmd_axgbe_version.map
+
+ LIBABIVER := 1
+--
+2.11.1
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-01-18-net-axgbe-add-minimal-dev-init-and-uninit-support.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-01-18-net-axgbe-add-minimal-dev-init-and-uninit-support.patch
new file mode 100644
index 00000000..996cd3bf
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-01-18-net-axgbe-add-minimal-dev-init-and-uninit-support.patch
@@ -0,0 +1,984 @@
+From patchwork Fri Mar 9 08:42:17 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 01/18] net/axgbe: add minimal dev init and uninit support
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35820
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-1-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:17 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ MAINTAINERS | 6 +
+ config/common_base | 6 +
+ doc/guides/rel_notes/release_18_02.rst | 5 +
+ drivers/net/Makefile | 1 +
+ drivers/net/axgbe/Makefile | 146 +++++++++++++++++++
+ drivers/net/axgbe/axgbe_common.h | 172 ++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_ethdev.c | 219 ++++++++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_ethdev.h | 145 ++++++++++++++++++
+ drivers/net/axgbe/axgbe_logs.h | 152 +++++++++++++++++++
+ drivers/net/axgbe/rte_pmd_axgbe_version.map | 4 +
+ mk/rte.app.mk | 1 +
+ 11 files changed, 857 insertions(+)
+ create mode 100644 drivers/net/axgbe/Makefile
+ create mode 100644 drivers/net/axgbe/axgbe_common.h
+ create mode 100644 drivers/net/axgbe/axgbe_ethdev.c
+ create mode 100644 drivers/net/axgbe/axgbe_ethdev.h
+ create mode 100644 drivers/net/axgbe/axgbe_logs.h
+ create mode 100644 drivers/net/axgbe/rte_pmd_axgbe_version.map
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a646ca3..fb2ec2c 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -349,6 +349,12 @@ M: Ferruh Yigit <ferruh.yigit@intel.com>
+ T: git://dpdk.org/next/dpdk-next-net
+ F: doc/guides/nics/features/default.ini
+
++AMD AXGBE PMD
++M: Ravi Kumar <ravi1.kumar@amd.com>
++F: drivers/net/axgbe/
++F: doc/guides/nics/axgbe.rst
++F: doc/guides/nics/features/axgbe.ini
++
+ Link bonding
+ M: Declan Doherty <declan.doherty@intel.com>
+ F: drivers/net/bonding/
+diff --git a/config/common_base b/config/common_base
+index ad03cf4..e738747 100644
+--- a/config/common_base
++++ b/config/common_base
+@@ -412,6 +412,12 @@ CONFIG_RTE_PMD_RING_MAX_TX_RINGS=16
+ CONFIG_RTE_LIBRTE_PMD_SOFTNIC=y
+
+ #
++# Compile AMD PMD
++#
++CONFIG_RTE_LIBRTE_AXGBE_PMD=y
++CONFIG_RTE_LIBRTE_AXGBE_DEBUG_INIT=n
++
++#
+ # Compile the TAP PMD
+ # It is enabled by default for Linux only.
+ #
+diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
+index 3923dc2..b97d91b 100644
+--- a/doc/guides/rel_notes/release_18_02.rst
++++ b/doc/guides/rel_notes/release_18_02.rst
+@@ -41,6 +41,11 @@ New Features
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
++* **Added Ethernet poll mode driver for AMD XGBE devices.**
++
++ Added the new ``axgbe`` ethernet poll mode driver for AMD XGBE devices.
++ See the :doc:`../nics/axgbe` nic driver guide for more details on this
++ new driver.
+
+ API Changes
+ -----------
+diff --git a/drivers/net/Makefile b/drivers/net/Makefile
+index e112732..9ec571d 100644
+--- a/drivers/net/Makefile
++++ b/drivers/net/Makefile
+@@ -12,6 +12,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
+ DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+ DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
+ DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
++DIRS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe
+ DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
+ DIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += cxgbe
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+new file mode 100644
+index 0000000..c8a1e87
+--- /dev/null
++++ b/drivers/net/axgbe/Makefile
+@@ -0,0 +1,146 @@
++#
++# Copyright (c) 2017 Advanced Micro Devices, Inc.
++# All rights reserved.
++#
++# AMD 10Gb Ethernet driver
++#
++# This file is available to you under your choice of the following two
++# licenses:
++#
++# License 1: GPLv2
++#
++# Copyright (c) 2017 Advanced Micro Devices, Inc.
++#
++# This file is free software; you may copy, redistribute and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation, either version 2 of the License, or (at
++# your option) any later version.
++#
++# This file is distributed in the hope that it will be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++# General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program. If not, see <http://www.gnu.org/licenses/>.
++#
++# This file incorporates work covered by the following copyright and
++# permission notice:
++#
++# Copyright (c) 2013 Synopsys, Inc.
++#
++# The Synopsys DWC ETHER XGMAC Software Driver and documentation
++# (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++# Inc. unless otherwise expressly agreed to in writing between Synopsys
++# and you.
++#
++# The Software IS NOT an item of Licensed Software or Licensed Product
++# under any End User Software License Agreement or Agreement for Licensed
++# Product with Synopsys or any supplement thereto. Permission is hereby
++# granted, free of charge, to any person obtaining a copy of this software
++# annotated with this license and the Software, to deal in the Software
++# without restriction, including without limitation the rights to use,
++# copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++# of the Software, and to permit persons to whom the Software is furnished
++# to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included
++# in all copies or substantial portions of the Software.
++#
++# THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++# BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++# PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++# THE POSSIBILITY OF SUCH DAMAGE.
++#
++# License 2: Modified BSD
++#
++# Copyright (c) 2017 Advanced Micro Devices, Inc.
++# All rights reserved.
++#
++# Redistribution and use in source and binary forms, with or without
++# modification, are permitted provided that the following conditions
++# are met:
++#
++# * Redistributions of source code must retain the above copyright
++# notice, this list of conditions and the following disclaimer.
++# * Redistributions in binary form must reproduce the above copyright
++# notice, this list of conditions and the following disclaimer in
++# the documentation and/or other materials provided with the
++# distribution.
++# * Neither the name of Advanced Micro Devices, Inc. nor the
++# names of its contributors may be used to endorse or promote products
++# derived from this software without specific prior written permission.
++#
++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++#
++# This file incorporates work covered by the following copyright and
++# permission notice:
++#
++# Copyright (c) 2013 Synopsys, Inc.
++#
++# The Synopsys DWC ETHER XGMAC Software Driver and documentation
++# (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++# Inc. unless otherwise expressly agreed to in writing between Synopsys
++# and you.
++#
++# The Software IS NOT an item of Licensed Software or Licensed Product
++# under any End User Software License Agreement or Agreement for Licensed
++# Product with Synopsys or any supplement thereto. Permission is hereby
++# granted, free of charge, to any person obtaining a copy of this software
++# annotated with this license and the Software, to deal in the Software
++# without restriction, including without limitation the rights to use,
++# copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++# of the Software, and to permit persons to whom the Software is furnished
++# to do so, subject to the following conditions:
++#
++# The above copyright notice and this permission notice shall be included
++# in all copies or substantial portions of the Software.
++#
++# THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++# BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++# PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++# THE POSSIBILITY OF SUCH DAMAGE.
++
++include $(RTE_SDK)/mk/rte.vars.mk
++
++#
++# library name
++#
++LIB = librte_pmd_axgbe.a
++
++CFLAGS += -O3
++CFLAGS += $(WERROR_FLAGS)
++
++EXPORT_MAP := rte_pmd_axgbe_version.map
++
++LIBABIVER := 1
++
++#
++# all source are stored in SRCS-y
++#
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
++
++include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
+new file mode 100644
+index 0000000..168dbb5
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_common.h
+@@ -0,0 +1,172 @@
++/*-
++ * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __AXGBE_COMMON_H__
++#define __AXGBE_COMMON_H__
++
++#include "axgbe_logs.h"
++
++#include <stdbool.h>
++#include <limits.h>
++#include <sys/queue.h>
++#include <stdio.h>
++#include <stdlib.h>
++#include <string.h>
++#include <errno.h>
++#include <stdint.h>
++#include <stdarg.h>
++#include <unistd.h>
++#include <inttypes.h>
++#include <pthread.h>
++
++#include <rte_byteorder.h>
++#include <rte_memory.h>
++#include <rte_malloc.h>
++#include <rte_hexdump.h>
++#include <rte_log.h>
++#include <rte_debug.h>
++#include <rte_branch_prediction.h>
++#include <rte_eal.h>
++#include <rte_memzone.h>
++#include <rte_ether.h>
++#include <rte_ethdev.h>
++#include <rte_dev.h>
++#include <rte_errno.h>
++#include <rte_dev.h>
++#include <rte_ethdev_pci.h>
++#include <rte_common.h>
++#include <rte_cycles.h>
++#include <rte_io.h>
++
++#define BIT(nr) (1 << (nr))
++#ifndef ARRAY_SIZE
++#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
++#endif
++
++#define AXGBE_HZ 250
++
++#endif /* __AXGBE_COMMON_H__ */
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+new file mode 100644
+index 0000000..0b7894f
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -0,0 +1,219 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++
++static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
++static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
++
++/* The set of PCI devices this driver supports */
++#define AMD_PCI_VENDOR_ID 0x1022
++#define AMD_PCI_AXGBE_DEVICE_ID1 0x1458
++#define AMD_PCI_AXGBE_DEVICE_ID2 0x1459
++
++int axgbe_logtype_init;
++int axgbe_logtype_driver;
++
++static const struct rte_pci_id pci_id_axgbe_map[] = {
++ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_ID1)},
++ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_ID2)},
++ { .vendor_id = 0, },
++};
++
++/*
++ * It returns 0 on success.
++ */
++static int
++eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata;
++ struct rte_pci_device *pci_dev;
++
++ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
++ pdata->eth_dev = eth_dev;
++
++ /*
++ * For secondary processes, we don't initialise any further as primary
++ * has already done this work.
++ */
++ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++ return 0;
++
++ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
++ pdata->pci_dev = pci_dev;
++
++ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
++ eth_dev->data->port_id, pci_dev->id.vendor_id,
++ pci_dev->id.device_id);
++
++ return 0;
++}
++
++static int
++eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
++{
++ /* stub function */
++ PMD_INIT_FUNC_TRACE();
++
++ return 0;
++}
++
++static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
++ struct rte_pci_device *pci_dev)
++{
++ return rte_eth_dev_pci_generic_probe(pci_dev,
++ sizeof(struct axgbe_port), eth_axgbe_dev_init);
++}
++
++static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
++{
++ return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
++}
++
++static struct rte_pci_driver rte_axgbe_pmd = {
++ .id_table = pci_id_axgbe_map,
++ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
++ .probe = eth_axgbe_pci_probe,
++ .remove = eth_axgbe_pci_remove,
++};
++
++RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
++RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
++RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic");
++
++RTE_INIT(axgbe_init_log);
++static void
++axgbe_init_log(void)
++{
++ axgbe_logtype_init = rte_log_register("pmd.axgbe.init");
++ if (axgbe_logtype_init >= 0)
++ rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
++ axgbe_logtype_driver = rte_log_register("pmd.axgbe.driver");
++ if (axgbe_logtype_driver >= 0)
++ rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
++}
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+new file mode 100644
+index 0000000..5f8931f
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -0,0 +1,145 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef RTE_ETH_AXGBE_H_
++#define RTE_ETH_AXGBE_H_
++
++#include <rte_mempool.h>
++#include <rte_lcore.h>
++#include "axgbe_common.h"
++
++/*
++ * Structure to store private data for each port.
++ */
++struct axgbe_port {
++ /* Ethdev where port belongs*/
++ struct rte_eth_dev *eth_dev;
++ /* Pci dev info */
++ const struct rte_pci_device *pci_dev;
++};
++
++#endif /* RTE_ETH_AXGBE_H_ */
+diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h
+new file mode 100644
+index 0000000..aaa8efc
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_logs.h
+@@ -0,0 +1,152 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _AXGBE_LOGS_H_
++#define _AXGBE_LOGS_H_
++
++#include <stdio.h>
++
++extern int axgbe_logtype_init;
++#define PMD_INIT_LOG(level, fmt, args...) \
++ rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \
++ __func__, ##args)
++
++#ifdef RTE_LIBRTE_AXGBE_DEBUG_INIT
++#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
++#else
++#define PMD_INIT_FUNC_TRACE() do { } while (0)
++#endif
++
++extern int axgbe_logtype_driver;
++#define PMD_DRV_LOG_RAW(level, fmt, args...) \
++ rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
++ __func__, ## args)
++
++#define PMD_DRV_LOG(level, fmt, args...) \
++ PMD_DRV_LOG_RAW(level, fmt "\n", ## args)
++
++#endif /* _AXGBE_LOGS_H_ */
+diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map
+new file mode 100644
+index 0000000..b26efa6
+--- /dev/null
++++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map
+@@ -0,0 +1,4 @@
++DPDK_18.05 {
++
++ local: *;
++};
+diff --git a/mk/rte.app.mk b/mk/rte.app.mk
+index 3eb41d1..b77305b 100644
+--- a/mk/rte.app.mk
++++ b/mk/rte.app.mk
+@@ -123,6 +123,7 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += -lrte_pmd_ark
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += -lrte_pmd_avf
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += -lrte_pmd_avp
++_LDLIBS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += -lrte_pmd_axgbe
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += -lrte_pmd_bond
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-02-18-net-axgbe-add-register-map-and-related-macros.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-02-18-net-axgbe-add-register-map-and-related-macros.patch
new file mode 100644
index 00000000..c7303142
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-02-18-net-axgbe-add-register-map-and-related-macros.patch
@@ -0,0 +1,1672 @@
+From patchwork Fri Mar 9 08:42:18 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,02/18] net/axgbe: add register map and related macros
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35821
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-2-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:18 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_common.h | 1644 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 1644 insertions(+)
+
+diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
+index 168dbb5..294f2e4 100644
+--- a/drivers/net/axgbe/axgbe_common.h
++++ b/drivers/net/axgbe/axgbe_common.h
+@@ -169,4 +169,1648 @@
+
+ #define AXGBE_HZ 250
+
++/* DMA register offsets */
++#define DMA_MR 0x3000
++#define DMA_SBMR 0x3004
++#define DMA_ISR 0x3008
++#define DMA_AXIARCR 0x3010
++#define DMA_AXIAWCR 0x3018
++#define DMA_AXIAWRCR 0x301c
++#define DMA_DSR0 0x3020
++#define DMA_DSR1 0x3024
++#define EDMA_TX_CONTROL 0x3040
++#define EDMA_RX_CONTROL 0x3044
++
++/* DMA register entry bit positions and sizes */
++#define DMA_AXIARCR_DRC_INDEX 0
++#define DMA_AXIARCR_DRC_WIDTH 4
++#define DMA_AXIARCR_DRD_INDEX 4
++#define DMA_AXIARCR_DRD_WIDTH 2
++#define DMA_AXIARCR_TEC_INDEX 8
++#define DMA_AXIARCR_TEC_WIDTH 4
++#define DMA_AXIARCR_TED_INDEX 12
++#define DMA_AXIARCR_TED_WIDTH 2
++#define DMA_AXIARCR_THC_INDEX 16
++#define DMA_AXIARCR_THC_WIDTH 4
++#define DMA_AXIARCR_THD_INDEX 20
++#define DMA_AXIARCR_THD_WIDTH 2
++#define DMA_AXIAWCR_DWC_INDEX 0
++#define DMA_AXIAWCR_DWC_WIDTH 4
++#define DMA_AXIAWCR_DWD_INDEX 4
++#define DMA_AXIAWCR_DWD_WIDTH 2
++#define DMA_AXIAWCR_RPC_INDEX 8
++#define DMA_AXIAWCR_RPC_WIDTH 4
++#define DMA_AXIAWCR_RPD_INDEX 12
++#define DMA_AXIAWCR_RPD_WIDTH 2
++#define DMA_AXIAWCR_RHC_INDEX 16
++#define DMA_AXIAWCR_RHC_WIDTH 4
++#define DMA_AXIAWCR_RHD_INDEX 20
++#define DMA_AXIAWCR_RHD_WIDTH 2
++#define DMA_AXIAWCR_RDC_INDEX 24
++#define DMA_AXIAWCR_RDC_WIDTH 4
++#define DMA_AXIAWCR_RDD_INDEX 28
++#define DMA_AXIAWCR_RDD_WIDTH 2
++#define DMA_AXIAWRCR_TDWC_INDEX 0
++#define DMA_AXIAWRCR_TDWC_WIDTH 4
++#define DMA_AXIAWRCR_TDWD_INDEX 4
++#define DMA_AXIAWRCR_TDWD_WIDTH 4
++#define DMA_AXIAWRCR_RDRC_INDEX 8
++#define DMA_AXIAWRCR_RDRC_WIDTH 4
++#define DMA_ISR_MACIS_INDEX 17
++#define DMA_ISR_MACIS_WIDTH 1
++#define DMA_ISR_MTLIS_INDEX 16
++#define DMA_ISR_MTLIS_WIDTH 1
++#define DMA_MR_INTM_INDEX 12
++#define DMA_MR_INTM_WIDTH 2
++#define DMA_MR_SWR_INDEX 0
++#define DMA_MR_SWR_WIDTH 1
++#define DMA_SBMR_WR_OSR_INDEX 24
++#define DMA_SBMR_WR_OSR_WIDTH 6
++#define DMA_SBMR_RD_OSR_INDEX 16
++#define DMA_SBMR_RD_OSR_WIDTH 6
++#define DMA_SBMR_AAL_INDEX 12
++#define DMA_SBMR_AAL_WIDTH 1
++#define DMA_SBMR_EAME_INDEX 11
++#define DMA_SBMR_EAME_WIDTH 1
++#define DMA_SBMR_BLEN_256_INDEX 7
++#define DMA_SBMR_BLEN_256_WIDTH 1
++#define DMA_SBMR_BLEN_32_INDEX 4
++#define DMA_SBMR_BLEN_32_WIDTH 1
++#define DMA_SBMR_UNDEF_INDEX 0
++#define DMA_SBMR_UNDEF_WIDTH 1
++
++/* DMA register values */
++#define DMA_DSR_RPS_WIDTH 4
++#define DMA_DSR_TPS_WIDTH 4
++#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
++#define DMA_DSR0_RPS_START 8
++#define DMA_DSR0_TPS_START 12
++#define DMA_DSRX_FIRST_QUEUE 3
++#define DMA_DSRX_INC 4
++#define DMA_DSRX_QPR 4
++#define DMA_DSRX_RPS_START 0
++#define DMA_DSRX_TPS_START 4
++#define DMA_TPS_STOPPED 0x00
++#define DMA_TPS_SUSPENDED 0x06
++
++/* DMA channel register offsets
++ * Multiple channels can be active. The first channel has registers
++ * that begin at 0x3100. Each subsequent channel has registers that
++ * are accessed using an offset of 0x80 from the previous channel.
++ */
++#define DMA_CH_BASE 0x3100
++#define DMA_CH_INC 0x80
++
++#define DMA_CH_CR 0x00
++#define DMA_CH_TCR 0x04
++#define DMA_CH_RCR 0x08
++#define DMA_CH_TDLR_HI 0x10
++#define DMA_CH_TDLR_LO 0x14
++#define DMA_CH_RDLR_HI 0x18
++#define DMA_CH_RDLR_LO 0x1c
++#define DMA_CH_TDTR_LO 0x24
++#define DMA_CH_RDTR_LO 0x2c
++#define DMA_CH_TDRLR 0x30
++#define DMA_CH_RDRLR 0x34
++#define DMA_CH_IER 0x38
++#define DMA_CH_RIWT 0x3c
++#define DMA_CH_CATDR_LO 0x44
++#define DMA_CH_CARDR_LO 0x4c
++#define DMA_CH_CATBR_HI 0x50
++#define DMA_CH_CATBR_LO 0x54
++#define DMA_CH_CARBR_HI 0x58
++#define DMA_CH_CARBR_LO 0x5c
++#define DMA_CH_SR 0x60
++
++/* DMA channel register entry bit positions and sizes */
++#define DMA_CH_CR_PBLX8_INDEX 16
++#define DMA_CH_CR_PBLX8_WIDTH 1
++#define DMA_CH_CR_SPH_INDEX 24
++#define DMA_CH_CR_SPH_WIDTH 1
++#define DMA_CH_IER_AIE_INDEX 14
++#define DMA_CH_IER_AIE_WIDTH 1
++#define DMA_CH_IER_FBEE_INDEX 12
++#define DMA_CH_IER_FBEE_WIDTH 1
++#define DMA_CH_IER_NIE_INDEX 15
++#define DMA_CH_IER_NIE_WIDTH 1
++#define DMA_CH_IER_RBUE_INDEX 7
++#define DMA_CH_IER_RBUE_WIDTH 1
++#define DMA_CH_IER_RIE_INDEX 6
++#define DMA_CH_IER_RIE_WIDTH 1
++#define DMA_CH_IER_RSE_INDEX 8
++#define DMA_CH_IER_RSE_WIDTH 1
++#define DMA_CH_IER_TBUE_INDEX 2
++#define DMA_CH_IER_TBUE_WIDTH 1
++#define DMA_CH_IER_TIE_INDEX 0
++#define DMA_CH_IER_TIE_WIDTH 1
++#define DMA_CH_IER_TXSE_INDEX 1
++#define DMA_CH_IER_TXSE_WIDTH 1
++#define DMA_CH_RCR_PBL_INDEX 16
++#define DMA_CH_RCR_PBL_WIDTH 6
++#define DMA_CH_RCR_RBSZ_INDEX 1
++#define DMA_CH_RCR_RBSZ_WIDTH 14
++#define DMA_CH_RCR_SR_INDEX 0
++#define DMA_CH_RCR_SR_WIDTH 1
++#define DMA_CH_RIWT_RWT_INDEX 0
++#define DMA_CH_RIWT_RWT_WIDTH 8
++#define DMA_CH_SR_FBE_INDEX 12
++#define DMA_CH_SR_FBE_WIDTH 1
++#define DMA_CH_SR_RBU_INDEX 7
++#define DMA_CH_SR_RBU_WIDTH 1
++#define DMA_CH_SR_RI_INDEX 6
++#define DMA_CH_SR_RI_WIDTH 1
++#define DMA_CH_SR_RPS_INDEX 8
++#define DMA_CH_SR_RPS_WIDTH 1
++#define DMA_CH_SR_TBU_INDEX 2
++#define DMA_CH_SR_TBU_WIDTH 1
++#define DMA_CH_SR_TI_INDEX 0
++#define DMA_CH_SR_TI_WIDTH 1
++#define DMA_CH_SR_TPS_INDEX 1
++#define DMA_CH_SR_TPS_WIDTH 1
++#define DMA_CH_TCR_OSP_INDEX 4
++#define DMA_CH_TCR_OSP_WIDTH 1
++#define DMA_CH_TCR_PBL_INDEX 16
++#define DMA_CH_TCR_PBL_WIDTH 6
++#define DMA_CH_TCR_ST_INDEX 0
++#define DMA_CH_TCR_ST_WIDTH 1
++#define DMA_CH_TCR_TSE_INDEX 12
++#define DMA_CH_TCR_TSE_WIDTH 1
++
++/* DMA channel register values */
++#define DMA_OSP_DISABLE 0x00
++#define DMA_OSP_ENABLE 0x01
++#define DMA_PBL_1 1
++#define DMA_PBL_2 2
++#define DMA_PBL_4 4
++#define DMA_PBL_8 8
++#define DMA_PBL_16 16
++#define DMA_PBL_32 32
++#define DMA_PBL_64 64 /* 8 x 8 */
++#define DMA_PBL_128 128 /* 8 x 16 */
++#define DMA_PBL_256 256 /* 8 x 32 */
++#define DMA_PBL_X8_DISABLE 0x00
++#define DMA_PBL_X8_ENABLE 0x01
++
++/* MAC register offsets */
++#define MAC_TCR 0x0000
++#define MAC_RCR 0x0004
++#define MAC_PFR 0x0008
++#define MAC_WTR 0x000c
++#define MAC_HTR0 0x0010
++#define MAC_VLANTR 0x0050
++#define MAC_VLANHTR 0x0058
++#define MAC_VLANIR 0x0060
++#define MAC_IVLANIR 0x0064
++#define MAC_RETMR 0x006c
++#define MAC_Q0TFCR 0x0070
++#define MAC_RFCR 0x0090
++#define MAC_RQC0R 0x00a0
++#define MAC_RQC1R 0x00a4
++#define MAC_RQC2R 0x00a8
++#define MAC_RQC3R 0x00ac
++#define MAC_ISR 0x00b0
++#define MAC_IER 0x00b4
++#define MAC_RTSR 0x00b8
++#define MAC_PMTCSR 0x00c0
++#define MAC_RWKPFR 0x00c4
++#define MAC_LPICSR 0x00d0
++#define MAC_LPITCR 0x00d4
++#define MAC_VR 0x0110
++#define MAC_DR 0x0114
++#define MAC_HWF0R 0x011c
++#define MAC_HWF1R 0x0120
++#define MAC_HWF2R 0x0124
++#define MAC_MDIOSCAR 0x0200
++#define MAC_MDIOSCCDR 0x0204
++#define MAC_MDIOISR 0x0214
++#define MAC_MDIOIER 0x0218
++#define MAC_MDIOCL22R 0x0220
++#define MAC_GPIOCR 0x0278
++#define MAC_GPIOSR 0x027c
++#define MAC_MACA0HR 0x0300
++#define MAC_MACA0LR 0x0304
++#define MAC_MACA1HR 0x0308
++#define MAC_MACA1LR 0x030c
++#define MAC_RSSCR 0x0c80
++#define MAC_RSSAR 0x0c88
++#define MAC_RSSDR 0x0c8c
++#define MAC_TSCR 0x0d00
++#define MAC_SSIR 0x0d04
++#define MAC_STSR 0x0d08
++#define MAC_STNR 0x0d0c
++#define MAC_STSUR 0x0d10
++#define MAC_STNUR 0x0d14
++#define MAC_TSAR 0x0d18
++#define MAC_TSSR 0x0d20
++#define MAC_TXSNR 0x0d30
++#define MAC_TXSSR 0x0d34
++
++#define MAC_QTFCR_INC 4
++#define MAC_MACA_INC 4
++#define MAC_HTR_INC 4
++
++#define MAC_RQC2_INC 4
++#define MAC_RQC2_Q_PER_REG 4
++
++/* MAC register entry bit positions and sizes */
++#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
++#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
++#define MAC_HWF0R_ARPOFFSEL_INDEX 9
++#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
++#define MAC_HWF0R_EEESEL_INDEX 13
++#define MAC_HWF0R_EEESEL_WIDTH 1
++#define MAC_HWF0R_GMIISEL_INDEX 1
++#define MAC_HWF0R_GMIISEL_WIDTH 1
++#define MAC_HWF0R_MGKSEL_INDEX 7
++#define MAC_HWF0R_MGKSEL_WIDTH 1
++#define MAC_HWF0R_MMCSEL_INDEX 8
++#define MAC_HWF0R_MMCSEL_WIDTH 1
++#define MAC_HWF0R_RWKSEL_INDEX 6
++#define MAC_HWF0R_RWKSEL_WIDTH 1
++#define MAC_HWF0R_RXCOESEL_INDEX 16
++#define MAC_HWF0R_RXCOESEL_WIDTH 1
++#define MAC_HWF0R_SAVLANINS_INDEX 27
++#define MAC_HWF0R_SAVLANINS_WIDTH 1
++#define MAC_HWF0R_SMASEL_INDEX 5
++#define MAC_HWF0R_SMASEL_WIDTH 1
++#define MAC_HWF0R_TSSEL_INDEX 12
++#define MAC_HWF0R_TSSEL_WIDTH 1
++#define MAC_HWF0R_TSSTSSEL_INDEX 25
++#define MAC_HWF0R_TSSTSSEL_WIDTH 2
++#define MAC_HWF0R_TXCOESEL_INDEX 14
++#define MAC_HWF0R_TXCOESEL_WIDTH 1
++#define MAC_HWF0R_VLHASH_INDEX 4
++#define MAC_HWF0R_VLHASH_WIDTH 1
++#define MAC_HWF1R_ADDR64_INDEX 14
++#define MAC_HWF1R_ADDR64_WIDTH 2
++#define MAC_HWF1R_ADVTHWORD_INDEX 13
++#define MAC_HWF1R_ADVTHWORD_WIDTH 1
++#define MAC_HWF1R_DBGMEMA_INDEX 19
++#define MAC_HWF1R_DBGMEMA_WIDTH 1
++#define MAC_HWF1R_DCBEN_INDEX 16
++#define MAC_HWF1R_DCBEN_WIDTH 1
++#define MAC_HWF1R_HASHTBLSZ_INDEX 24
++#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
++#define MAC_HWF1R_L3L4FNUM_INDEX 27
++#define MAC_HWF1R_L3L4FNUM_WIDTH 4
++#define MAC_HWF1R_NUMTC_INDEX 21
++#define MAC_HWF1R_NUMTC_WIDTH 3
++#define MAC_HWF1R_RSSEN_INDEX 20
++#define MAC_HWF1R_RSSEN_WIDTH 1
++#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
++#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
++#define MAC_HWF1R_SPHEN_INDEX 17
++#define MAC_HWF1R_SPHEN_WIDTH 1
++#define MAC_HWF1R_TSOEN_INDEX 18
++#define MAC_HWF1R_TSOEN_WIDTH 1
++#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
++#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
++#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
++#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
++#define MAC_HWF2R_PPSOUTNUM_INDEX 24
++#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
++#define MAC_HWF2R_RXCHCNT_INDEX 12
++#define MAC_HWF2R_RXCHCNT_WIDTH 4
++#define MAC_HWF2R_RXQCNT_INDEX 0
++#define MAC_HWF2R_RXQCNT_WIDTH 4
++#define MAC_HWF2R_TXCHCNT_INDEX 18
++#define MAC_HWF2R_TXCHCNT_WIDTH 4
++#define MAC_HWF2R_TXQCNT_INDEX 6
++#define MAC_HWF2R_TXQCNT_WIDTH 4
++#define MAC_IER_TSIE_INDEX 12
++#define MAC_IER_TSIE_WIDTH 1
++#define MAC_ISR_MMCRXIS_INDEX 9
++#define MAC_ISR_MMCRXIS_WIDTH 1
++#define MAC_ISR_MMCTXIS_INDEX 10
++#define MAC_ISR_MMCTXIS_WIDTH 1
++#define MAC_ISR_PMTIS_INDEX 4
++#define MAC_ISR_PMTIS_WIDTH 1
++#define MAC_ISR_SMI_INDEX 1
++#define MAC_ISR_SMI_WIDTH 1
++#define MAC_ISR_LSI_INDEX 0
++#define MAC_ISR_LSI_WIDTH 1
++#define MAC_ISR_LS_INDEX 24
++#define MAC_ISR_LS_WIDTH 2
++#define MAC_ISR_TSIS_INDEX 12
++#define MAC_ISR_TSIS_WIDTH 1
++#define MAC_MACA1HR_AE_INDEX 31
++#define MAC_MACA1HR_AE_WIDTH 1
++#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
++#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
++#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
++#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
++#define MAC_MDIOSCAR_DA_INDEX 21
++#define MAC_MDIOSCAR_DA_WIDTH 5
++#define MAC_MDIOSCAR_PA_INDEX 16
++#define MAC_MDIOSCAR_PA_WIDTH 5
++#define MAC_MDIOSCAR_RA_INDEX 0
++#define MAC_MDIOSCAR_RA_WIDTH 16
++#define MAC_MDIOSCAR_REG_INDEX 0
++#define MAC_MDIOSCAR_REG_WIDTH 21
++#define MAC_MDIOSCCDR_BUSY_INDEX 22
++#define MAC_MDIOSCCDR_BUSY_WIDTH 1
++#define MAC_MDIOSCCDR_CMD_INDEX 16
++#define MAC_MDIOSCCDR_CMD_WIDTH 2
++#define MAC_MDIOSCCDR_CR_INDEX 19
++#define MAC_MDIOSCCDR_CR_WIDTH 3
++#define MAC_MDIOSCCDR_DATA_INDEX 0
++#define MAC_MDIOSCCDR_DATA_WIDTH 16
++#define MAC_MDIOSCCDR_SADDR_INDEX 18
++#define MAC_MDIOSCCDR_SADDR_WIDTH 1
++#define MAC_PFR_HMC_INDEX 2
++#define MAC_PFR_HMC_WIDTH 1
++#define MAC_PFR_HPF_INDEX 10
++#define MAC_PFR_HPF_WIDTH 1
++#define MAC_PFR_HUC_INDEX 1
++#define MAC_PFR_HUC_WIDTH 1
++#define MAC_PFR_PM_INDEX 4
++#define MAC_PFR_PM_WIDTH 1
++#define MAC_PFR_PR_INDEX 0
++#define MAC_PFR_PR_WIDTH 1
++#define MAC_PFR_VTFE_INDEX 16
++#define MAC_PFR_VTFE_WIDTH 1
++#define MAC_PMTCSR_MGKPKTEN_INDEX 1
++#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
++#define MAC_PMTCSR_PWRDWN_INDEX 0
++#define MAC_PMTCSR_PWRDWN_WIDTH 1
++#define MAC_PMTCSR_RWKFILTRST_INDEX 31
++#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
++#define MAC_PMTCSR_RWKPKTEN_INDEX 2
++#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
++#define MAC_Q0TFCR_PT_INDEX 16
++#define MAC_Q0TFCR_PT_WIDTH 16
++#define MAC_Q0TFCR_TFE_INDEX 1
++#define MAC_Q0TFCR_TFE_WIDTH 1
++#define MAC_RCR_ACS_INDEX 1
++#define MAC_RCR_ACS_WIDTH 1
++#define MAC_RCR_CST_INDEX 2
++#define MAC_RCR_CST_WIDTH 1
++#define MAC_RCR_DCRCC_INDEX 3
++#define MAC_RCR_DCRCC_WIDTH 1
++#define MAC_RCR_HDSMS_INDEX 12
++#define MAC_RCR_HDSMS_WIDTH 3
++#define MAC_RCR_IPC_INDEX 9
++#define MAC_RCR_IPC_WIDTH 1
++#define MAC_RCR_JE_INDEX 8
++#define MAC_RCR_JE_WIDTH 1
++#define MAC_RCR_LM_INDEX 10
++#define MAC_RCR_LM_WIDTH 1
++#define MAC_RCR_RE_INDEX 0
++#define MAC_RCR_RE_WIDTH 1
++#define MAC_RFCR_PFCE_INDEX 8
++#define MAC_RFCR_PFCE_WIDTH 1
++#define MAC_RFCR_RFE_INDEX 0
++#define MAC_RFCR_RFE_WIDTH 1
++#define MAC_RFCR_UP_INDEX 1
++#define MAC_RFCR_UP_WIDTH 1
++#define MAC_RQC0R_RXQ0EN_INDEX 0
++#define MAC_RQC0R_RXQ0EN_WIDTH 2
++#define MAC_RSSAR_ADDRT_INDEX 2
++#define MAC_RSSAR_ADDRT_WIDTH 1
++#define MAC_RSSAR_CT_INDEX 1
++#define MAC_RSSAR_CT_WIDTH 1
++#define MAC_RSSAR_OB_INDEX 0
++#define MAC_RSSAR_OB_WIDTH 1
++#define MAC_RSSAR_RSSIA_INDEX 8
++#define MAC_RSSAR_RSSIA_WIDTH 8
++#define MAC_RSSCR_IP2TE_INDEX 1
++#define MAC_RSSCR_IP2TE_WIDTH 1
++#define MAC_RSSCR_RSSE_INDEX 0
++#define MAC_RSSCR_RSSE_WIDTH 1
++#define MAC_RSSCR_TCP4TE_INDEX 2
++#define MAC_RSSCR_TCP4TE_WIDTH 1
++#define MAC_RSSCR_UDP4TE_INDEX 3
++#define MAC_RSSCR_UDP4TE_WIDTH 1
++#define MAC_RSSDR_DMCH_INDEX 0
++#define MAC_RSSDR_DMCH_WIDTH 4
++#define MAC_SSIR_SNSINC_INDEX 8
++#define MAC_SSIR_SNSINC_WIDTH 8
++#define MAC_SSIR_SSINC_INDEX 16
++#define MAC_SSIR_SSINC_WIDTH 8
++#define MAC_TCR_SS_INDEX 29
++#define MAC_TCR_SS_WIDTH 2
++#define MAC_TCR_TE_INDEX 0
++#define MAC_TCR_TE_WIDTH 1
++#define MAC_TSCR_AV8021ASMEN_INDEX 28
++#define MAC_TSCR_AV8021ASMEN_WIDTH 1
++#define MAC_TSCR_SNAPTYPSEL_INDEX 16
++#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
++#define MAC_TSCR_TSADDREG_INDEX 5
++#define MAC_TSCR_TSADDREG_WIDTH 1
++#define MAC_TSCR_TSCFUPDT_INDEX 1
++#define MAC_TSCR_TSCFUPDT_WIDTH 1
++#define MAC_TSCR_TSCTRLSSR_INDEX 9
++#define MAC_TSCR_TSCTRLSSR_WIDTH 1
++#define MAC_TSCR_TSENA_INDEX 0
++#define MAC_TSCR_TSENA_WIDTH 1
++#define MAC_TSCR_TSENALL_INDEX 8
++#define MAC_TSCR_TSENALL_WIDTH 1
++#define MAC_TSCR_TSEVNTENA_INDEX 14
++#define MAC_TSCR_TSEVNTENA_WIDTH 1
++#define MAC_TSCR_TSINIT_INDEX 2
++#define MAC_TSCR_TSINIT_WIDTH 1
++#define MAC_TSCR_TSIPENA_INDEX 11
++#define MAC_TSCR_TSIPENA_WIDTH 1
++#define MAC_TSCR_TSIPV4ENA_INDEX 13
++#define MAC_TSCR_TSIPV4ENA_WIDTH 1
++#define MAC_TSCR_TSIPV6ENA_INDEX 12
++#define MAC_TSCR_TSIPV6ENA_WIDTH 1
++#define MAC_TSCR_TSMSTRENA_INDEX 15
++#define MAC_TSCR_TSMSTRENA_WIDTH 1
++#define MAC_TSCR_TSVER2ENA_INDEX 10
++#define MAC_TSCR_TSVER2ENA_WIDTH 1
++#define MAC_TSCR_TXTSSTSM_INDEX 24
++#define MAC_TSCR_TXTSSTSM_WIDTH 1
++#define MAC_TSSR_TXTSC_INDEX 15
++#define MAC_TSSR_TXTSC_WIDTH 1
++#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
++#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
++#define MAC_VLANHTR_VLHT_INDEX 0
++#define MAC_VLANHTR_VLHT_WIDTH 16
++#define MAC_VLANIR_VLTI_INDEX 20
++#define MAC_VLANIR_VLTI_WIDTH 1
++#define MAC_VLANIR_CSVL_INDEX 19
++#define MAC_VLANIR_CSVL_WIDTH 1
++#define MAC_VLANTR_DOVLTC_INDEX 20
++#define MAC_VLANTR_DOVLTC_WIDTH 1
++#define MAC_VLANTR_ERSVLM_INDEX 19
++#define MAC_VLANTR_ERSVLM_WIDTH 1
++#define MAC_VLANTR_ESVL_INDEX 18
++#define MAC_VLANTR_ESVL_WIDTH 1
++#define MAC_VLANTR_ETV_INDEX 16
++#define MAC_VLANTR_ETV_WIDTH 1
++#define MAC_VLANTR_EVLS_INDEX 21
++#define MAC_VLANTR_EVLS_WIDTH 2
++#define MAC_VLANTR_EVLRXS_INDEX 24
++#define MAC_VLANTR_EVLRXS_WIDTH 1
++#define MAC_VLANTR_VL_INDEX 0
++#define MAC_VLANTR_VL_WIDTH 16
++#define MAC_VLANTR_VTHM_INDEX 25
++#define MAC_VLANTR_VTHM_WIDTH 1
++#define MAC_VLANTR_VTIM_INDEX 17
++#define MAC_VLANTR_VTIM_WIDTH 1
++#define MAC_VR_DEVID_INDEX 8
++#define MAC_VR_DEVID_WIDTH 8
++#define MAC_VR_SNPSVER_INDEX 0
++#define MAC_VR_SNPSVER_WIDTH 8
++#define MAC_VR_USERVER_INDEX 16
++#define MAC_VR_USERVER_WIDTH 8
++
++/* MMC register offsets */
++#define MMC_CR 0x0800
++#define MMC_RISR 0x0804
++#define MMC_TISR 0x0808
++#define MMC_RIER 0x080c
++#define MMC_TIER 0x0810
++#define MMC_TXOCTETCOUNT_GB_LO 0x0814
++#define MMC_TXOCTETCOUNT_GB_HI 0x0818
++#define MMC_TXFRAMECOUNT_GB_LO 0x081c
++#define MMC_TXFRAMECOUNT_GB_HI 0x0820
++#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
++#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
++#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
++#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
++#define MMC_TX64OCTETS_GB_LO 0x0834
++#define MMC_TX64OCTETS_GB_HI 0x0838
++#define MMC_TX65TO127OCTETS_GB_LO 0x083c
++#define MMC_TX65TO127OCTETS_GB_HI 0x0840
++#define MMC_TX128TO255OCTETS_GB_LO 0x0844
++#define MMC_TX128TO255OCTETS_GB_HI 0x0848
++#define MMC_TX256TO511OCTETS_GB_LO 0x084c
++#define MMC_TX256TO511OCTETS_GB_HI 0x0850
++#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
++#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
++#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
++#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
++#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
++#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
++#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
++#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
++#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
++#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
++#define MMC_TXUNDERFLOWERROR_LO 0x087c
++#define MMC_TXUNDERFLOWERROR_HI 0x0880
++#define MMC_TXOCTETCOUNT_G_LO 0x0884
++#define MMC_TXOCTETCOUNT_G_HI 0x0888
++#define MMC_TXFRAMECOUNT_G_LO 0x088c
++#define MMC_TXFRAMECOUNT_G_HI 0x0890
++#define MMC_TXPAUSEFRAMES_LO 0x0894
++#define MMC_TXPAUSEFRAMES_HI 0x0898
++#define MMC_TXVLANFRAMES_G_LO 0x089c
++#define MMC_TXVLANFRAMES_G_HI 0x08a0
++#define MMC_RXFRAMECOUNT_GB_LO 0x0900
++#define MMC_RXFRAMECOUNT_GB_HI 0x0904
++#define MMC_RXOCTETCOUNT_GB_LO 0x0908
++#define MMC_RXOCTETCOUNT_GB_HI 0x090c
++#define MMC_RXOCTETCOUNT_G_LO 0x0910
++#define MMC_RXOCTETCOUNT_G_HI 0x0914
++#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
++#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
++#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
++#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
++#define MMC_RXCRCERROR_LO 0x0928
++#define MMC_RXCRCERROR_HI 0x092c
++#define MMC_RXRUNTERROR 0x0930
++#define MMC_RXJABBERERROR 0x0934
++#define MMC_RXUNDERSIZE_G 0x0938
++#define MMC_RXOVERSIZE_G 0x093c
++#define MMC_RX64OCTETS_GB_LO 0x0940
++#define MMC_RX64OCTETS_GB_HI 0x0944
++#define MMC_RX65TO127OCTETS_GB_LO 0x0948
++#define MMC_RX65TO127OCTETS_GB_HI 0x094c
++#define MMC_RX128TO255OCTETS_GB_LO 0x0950
++#define MMC_RX128TO255OCTETS_GB_HI 0x0954
++#define MMC_RX256TO511OCTETS_GB_LO 0x0958
++#define MMC_RX256TO511OCTETS_GB_HI 0x095c
++#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
++#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
++#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
++#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
++#define MMC_RXUNICASTFRAMES_G_LO 0x0970
++#define MMC_RXUNICASTFRAMES_G_HI 0x0974
++#define MMC_RXLENGTHERROR_LO 0x0978
++#define MMC_RXLENGTHERROR_HI 0x097c
++#define MMC_RXOUTOFRANGETYPE_LO 0x0980
++#define MMC_RXOUTOFRANGETYPE_HI 0x0984
++#define MMC_RXPAUSEFRAMES_LO 0x0988
++#define MMC_RXPAUSEFRAMES_HI 0x098c
++#define MMC_RXFIFOOVERFLOW_LO 0x0990
++#define MMC_RXFIFOOVERFLOW_HI 0x0994
++#define MMC_RXVLANFRAMES_GB_LO 0x0998
++#define MMC_RXVLANFRAMES_GB_HI 0x099c
++#define MMC_RXWATCHDOGERROR 0x09a0
++
++/* MMC register entry bit positions and sizes */
++#define MMC_CR_CR_INDEX 0
++#define MMC_CR_CR_WIDTH 1
++#define MMC_CR_CSR_INDEX 1
++#define MMC_CR_CSR_WIDTH 1
++#define MMC_CR_ROR_INDEX 2
++#define MMC_CR_ROR_WIDTH 1
++#define MMC_CR_MCF_INDEX 3
++#define MMC_CR_MCF_WIDTH 1
++#define MMC_CR_MCT_INDEX 4
++#define MMC_CR_MCT_WIDTH 2
++#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
++#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
++#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
++#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
++#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
++#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
++#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
++#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
++#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
++#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
++#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
++#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
++#define MMC_RISR_RXCRCERROR_INDEX 5
++#define MMC_RISR_RXCRCERROR_WIDTH 1
++#define MMC_RISR_RXRUNTERROR_INDEX 6
++#define MMC_RISR_RXRUNTERROR_WIDTH 1
++#define MMC_RISR_RXJABBERERROR_INDEX 7
++#define MMC_RISR_RXJABBERERROR_WIDTH 1
++#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
++#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
++#define MMC_RISR_RXOVERSIZE_G_INDEX 9
++#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
++#define MMC_RISR_RX64OCTETS_GB_INDEX 10
++#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
++#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
++#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
++#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
++#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
++#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
++#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
++#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
++#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
++#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
++#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
++#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
++#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
++#define MMC_RISR_RXLENGTHERROR_INDEX 17
++#define MMC_RISR_RXLENGTHERROR_WIDTH 1
++#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
++#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
++#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
++#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
++#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
++#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
++#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
++#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
++#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
++#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
++#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
++#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
++#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
++#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
++#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
++#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
++#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
++#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
++#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
++#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
++#define MMC_TISR_TX64OCTETS_GB_INDEX 4
++#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
++#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
++#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
++#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
++#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
++#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
++#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
++#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
++#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
++#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
++#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
++#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
++#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
++#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
++#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
++#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
++#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
++#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
++#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
++#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
++#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
++#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
++#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
++#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
++#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
++#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
++#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
++
++/* MTL register offsets */
++#define MTL_OMR 0x1000
++#define MTL_FDCR 0x1008
++#define MTL_FDSR 0x100c
++#define MTL_FDDR 0x1010
++#define MTL_ISR 0x1020
++#define MTL_RQDCM0R 0x1030
++#define MTL_TCPM0R 0x1040
++#define MTL_TCPM1R 0x1044
++
++#define MTL_RQDCM_INC 4
++#define MTL_RQDCM_Q_PER_REG 4
++#define MTL_TCPM_INC 4
++#define MTL_TCPM_TC_PER_REG 4
++
++/* MTL register entry bit positions and sizes */
++#define MTL_OMR_ETSALG_INDEX 5
++#define MTL_OMR_ETSALG_WIDTH 2
++#define MTL_OMR_RAA_INDEX 2
++#define MTL_OMR_RAA_WIDTH 1
++
++/* MTL queue register offsets
++ * Multiple queues can be active. The first queue has registers
++ * that begin at 0x1100. Each subsequent queue has registers that
++ * are accessed using an offset of 0x80 from the previous queue.
++ */
++#define MTL_Q_BASE 0x1100
++#define MTL_Q_INC 0x80
++
++#define MTL_Q_TQOMR 0x00
++#define MTL_Q_TQUR 0x04
++#define MTL_Q_TQDR 0x08
++#define MTL_Q_RQOMR 0x40
++#define MTL_Q_RQMPOCR 0x44
++#define MTL_Q_RQDR 0x48
++#define MTL_Q_RQFCR 0x50
++#define MTL_Q_IER 0x70
++#define MTL_Q_ISR 0x74
++
++/* MTL queue register entry bit positions and sizes */
++#define MTL_Q_RQDR_PRXQ_INDEX 16
++#define MTL_Q_RQDR_PRXQ_WIDTH 14
++#define MTL_Q_RQDR_RXQSTS_INDEX 4
++#define MTL_Q_RQDR_RXQSTS_WIDTH 2
++#define MTL_Q_RQFCR_RFA_INDEX 1
++#define MTL_Q_RQFCR_RFA_WIDTH 6
++#define MTL_Q_RQFCR_RFD_INDEX 17
++#define MTL_Q_RQFCR_RFD_WIDTH 6
++#define MTL_Q_RQOMR_EHFC_INDEX 7
++#define MTL_Q_RQOMR_EHFC_WIDTH 1
++#define MTL_Q_RQOMR_RQS_INDEX 16
++#define MTL_Q_RQOMR_RQS_WIDTH 9
++#define MTL_Q_RQOMR_RSF_INDEX 5
++#define MTL_Q_RQOMR_RSF_WIDTH 1
++#define MTL_Q_RQOMR_RTC_INDEX 0
++#define MTL_Q_RQOMR_RTC_WIDTH 2
++#define MTL_Q_TQDR_TRCSTS_INDEX 1
++#define MTL_Q_TQDR_TRCSTS_WIDTH 2
++#define MTL_Q_TQDR_TXQSTS_INDEX 4
++#define MTL_Q_TQDR_TXQSTS_WIDTH 1
++#define MTL_Q_TQOMR_FTQ_INDEX 0
++#define MTL_Q_TQOMR_FTQ_WIDTH 1
++#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
++#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
++#define MTL_Q_TQOMR_TQS_INDEX 16
++#define MTL_Q_TQOMR_TQS_WIDTH 10
++#define MTL_Q_TQOMR_TSF_INDEX 1
++#define MTL_Q_TQOMR_TSF_WIDTH 1
++#define MTL_Q_TQOMR_TTC_INDEX 4
++#define MTL_Q_TQOMR_TTC_WIDTH 3
++#define MTL_Q_TQOMR_TXQEN_INDEX 2
++#define MTL_Q_TQOMR_TXQEN_WIDTH 2
++
++/* MTL queue register value */
++#define MTL_RSF_DISABLE 0x00
++#define MTL_RSF_ENABLE 0x01
++#define MTL_TSF_DISABLE 0x00
++#define MTL_TSF_ENABLE 0x01
++
++#define MTL_RX_THRESHOLD_64 0x00
++#define MTL_RX_THRESHOLD_96 0x02
++#define MTL_RX_THRESHOLD_128 0x03
++#define MTL_TX_THRESHOLD_32 0x01
++#define MTL_TX_THRESHOLD_64 0x00
++#define MTL_TX_THRESHOLD_96 0x02
++#define MTL_TX_THRESHOLD_128 0x03
++#define MTL_TX_THRESHOLD_192 0x04
++#define MTL_TX_THRESHOLD_256 0x05
++#define MTL_TX_THRESHOLD_384 0x06
++#define MTL_TX_THRESHOLD_512 0x07
++
++#define MTL_ETSALG_WRR 0x00
++#define MTL_ETSALG_WFQ 0x01
++#define MTL_ETSALG_DWRR 0x02
++#define MTL_RAA_SP 0x00
++#define MTL_RAA_WSP 0x01
++
++#define MTL_Q_DISABLED 0x00
++#define MTL_Q_ENABLED 0x02
++
++/* MTL traffic class register offsets
++ * Multiple traffic classes can be active. The first class has registers
++ * that begin at 0x1100. Each subsequent queue has registers that
++ * are accessed using an offset of 0x80 from the previous queue.
++ */
++#define MTL_TC_BASE MTL_Q_BASE
++#define MTL_TC_INC MTL_Q_INC
++
++#define MTL_TC_ETSCR 0x10
++#define MTL_TC_ETSSR 0x14
++#define MTL_TC_QWR 0x18
++
++/* MTL traffic class register entry bit positions and sizes */
++#define MTL_TC_ETSCR_TSA_INDEX 0
++#define MTL_TC_ETSCR_TSA_WIDTH 2
++#define MTL_TC_QWR_QW_INDEX 0
++#define MTL_TC_QWR_QW_WIDTH 21
++
++/* MTL traffic class register value */
++#define MTL_TSA_SP 0x00
++#define MTL_TSA_ETS 0x02
++
++/* PCS register offsets */
++#define PCS_V1_WINDOW_SELECT 0x03fc
++#define PCS_V2_WINDOW_DEF 0x9060
++#define PCS_V2_WINDOW_SELECT 0x9064
++
++/* PCS register entry bit positions and sizes */
++#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
++#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
++#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
++#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
++
++/* SerDes integration register offsets */
++#define SIR0_KR_RT_1 0x002c
++#define SIR0_STATUS 0x0040
++#define SIR1_SPEED 0x0000
++
++/* SerDes integration register entry bit positions and sizes */
++#define SIR0_KR_RT_1_RESET_INDEX 11
++#define SIR0_KR_RT_1_RESET_WIDTH 1
++#define SIR0_STATUS_RX_READY_INDEX 0
++#define SIR0_STATUS_RX_READY_WIDTH 1
++#define SIR0_STATUS_TX_READY_INDEX 8
++#define SIR0_STATUS_TX_READY_WIDTH 1
++#define SIR1_SPEED_CDR_RATE_INDEX 12
++#define SIR1_SPEED_CDR_RATE_WIDTH 4
++#define SIR1_SPEED_DATARATE_INDEX 4
++#define SIR1_SPEED_DATARATE_WIDTH 2
++#define SIR1_SPEED_PLLSEL_INDEX 3
++#define SIR1_SPEED_PLLSEL_WIDTH 1
++#define SIR1_SPEED_RATECHANGE_INDEX 6
++#define SIR1_SPEED_RATECHANGE_WIDTH 1
++#define SIR1_SPEED_TXAMP_INDEX 8
++#define SIR1_SPEED_TXAMP_WIDTH 4
++#define SIR1_SPEED_WORDMODE_INDEX 0
++#define SIR1_SPEED_WORDMODE_WIDTH 3
++
++/* SerDes RxTx register offsets */
++#define RXTX_REG6 0x0018
++#define RXTX_REG20 0x0050
++#define RXTX_REG22 0x0058
++#define RXTX_REG114 0x01c8
++#define RXTX_REG129 0x0204
++
++/* SerDes RxTx register entry bit positions and sizes */
++#define RXTX_REG6_RESETB_RXD_INDEX 8
++#define RXTX_REG6_RESETB_RXD_WIDTH 1
++#define RXTX_REG20_BLWC_ENA_INDEX 2
++#define RXTX_REG20_BLWC_ENA_WIDTH 1
++#define RXTX_REG114_PQ_REG_INDEX 9
++#define RXTX_REG114_PQ_REG_WIDTH 7
++#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
++#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
++
++/* MAC Control register offsets */
++#define XP_PROP_0 0x0000
++#define XP_PROP_1 0x0004
++#define XP_PROP_2 0x0008
++#define XP_PROP_3 0x000c
++#define XP_PROP_4 0x0010
++#define XP_PROP_5 0x0014
++#define XP_MAC_ADDR_LO 0x0020
++#define XP_MAC_ADDR_HI 0x0024
++#define XP_ECC_ISR 0x0030
++#define XP_ECC_IER 0x0034
++#define XP_ECC_CNT0 0x003c
++#define XP_ECC_CNT1 0x0040
++#define XP_DRIVER_INT_REQ 0x0060
++#define XP_DRIVER_INT_RO 0x0064
++#define XP_DRIVER_SCRATCH_0 0x0068
++#define XP_DRIVER_SCRATCH_1 0x006c
++#define XP_INT_EN 0x0078
++#define XP_I2C_MUTEX 0x0080
++#define XP_MDIO_MUTEX 0x0084
++
++/* MAC Control register entry bit positions and sizes */
++#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
++#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
++#define XP_DRIVER_INT_RO_STATUS_INDEX 0
++#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
++#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
++#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
++#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
++#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
++#define XP_ECC_CNT0_RX_DED_INDEX 24
++#define XP_ECC_CNT0_RX_DED_WIDTH 8
++#define XP_ECC_CNT0_RX_SEC_INDEX 16
++#define XP_ECC_CNT0_RX_SEC_WIDTH 8
++#define XP_ECC_CNT0_TX_DED_INDEX 8
++#define XP_ECC_CNT0_TX_DED_WIDTH 8
++#define XP_ECC_CNT0_TX_SEC_INDEX 0
++#define XP_ECC_CNT0_TX_SEC_WIDTH 8
++#define XP_ECC_CNT1_DESC_DED_INDEX 8
++#define XP_ECC_CNT1_DESC_DED_WIDTH 8
++#define XP_ECC_CNT1_DESC_SEC_INDEX 0
++#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
++#define XP_ECC_IER_DESC_DED_INDEX 0
++#define XP_ECC_IER_DESC_DED_WIDTH 1
++#define XP_ECC_IER_DESC_SEC_INDEX 1
++#define XP_ECC_IER_DESC_SEC_WIDTH 1
++#define XP_ECC_IER_RX_DED_INDEX 2
++#define XP_ECC_IER_RX_DED_WIDTH 1
++#define XP_ECC_IER_RX_SEC_INDEX 3
++#define XP_ECC_IER_RX_SEC_WIDTH 1
++#define XP_ECC_IER_TX_DED_INDEX 4
++#define XP_ECC_IER_TX_DED_WIDTH 1
++#define XP_ECC_IER_TX_SEC_INDEX 5
++#define XP_ECC_IER_TX_SEC_WIDTH 1
++#define XP_ECC_ISR_DESC_DED_INDEX 0
++#define XP_ECC_ISR_DESC_DED_WIDTH 1
++#define XP_ECC_ISR_DESC_SEC_INDEX 1
++#define XP_ECC_ISR_DESC_SEC_WIDTH 1
++#define XP_ECC_ISR_RX_DED_INDEX 2
++#define XP_ECC_ISR_RX_DED_WIDTH 1
++#define XP_ECC_ISR_RX_SEC_INDEX 3
++#define XP_ECC_ISR_RX_SEC_WIDTH 1
++#define XP_ECC_ISR_TX_DED_INDEX 4
++#define XP_ECC_ISR_TX_DED_WIDTH 1
++#define XP_ECC_ISR_TX_SEC_INDEX 5
++#define XP_ECC_ISR_TX_SEC_WIDTH 1
++#define XP_I2C_MUTEX_BUSY_INDEX 31
++#define XP_I2C_MUTEX_BUSY_WIDTH 1
++#define XP_I2C_MUTEX_ID_INDEX 29
++#define XP_I2C_MUTEX_ID_WIDTH 2
++#define XP_I2C_MUTEX_ACTIVE_INDEX 0
++#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
++#define XP_MAC_ADDR_HI_VALID_INDEX 31
++#define XP_MAC_ADDR_HI_VALID_WIDTH 1
++#define XP_PROP_0_CONN_TYPE_INDEX 28
++#define XP_PROP_0_CONN_TYPE_WIDTH 3
++#define XP_PROP_0_MDIO_ADDR_INDEX 16
++#define XP_PROP_0_MDIO_ADDR_WIDTH 5
++#define XP_PROP_0_PORT_ID_INDEX 0
++#define XP_PROP_0_PORT_ID_WIDTH 8
++#define XP_PROP_0_PORT_MODE_INDEX 8
++#define XP_PROP_0_PORT_MODE_WIDTH 4
++#define XP_PROP_0_PORT_SPEEDS_INDEX 23
++#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
++#define XP_PROP_1_MAX_RX_DMA_INDEX 24
++#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
++#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
++#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
++#define XP_PROP_1_MAX_TX_DMA_INDEX 16
++#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
++#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
++#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
++#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
++#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
++#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
++#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
++#define XP_PROP_3_GPIO_MASK_INDEX 28
++#define XP_PROP_3_GPIO_MASK_WIDTH 4
++#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
++#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
++#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
++#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
++#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
++#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
++#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
++#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
++#define XP_PROP_3_GPIO_ADDR_INDEX 8
++#define XP_PROP_3_GPIO_ADDR_WIDTH 3
++#define XP_PROP_3_MDIO_RESET_INDEX 0
++#define XP_PROP_3_MDIO_RESET_WIDTH 2
++#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
++#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
++#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
++#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
++#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
++#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
++#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
++#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
++#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
++#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
++#define XP_PROP_4_MUX_CHAN_INDEX 4
++#define XP_PROP_4_MUX_CHAN_WIDTH 3
++#define XP_PROP_4_REDRV_ADDR_INDEX 16
++#define XP_PROP_4_REDRV_ADDR_WIDTH 7
++#define XP_PROP_4_REDRV_IF_INDEX 23
++#define XP_PROP_4_REDRV_IF_WIDTH 1
++#define XP_PROP_4_REDRV_LANE_INDEX 24
++#define XP_PROP_4_REDRV_LANE_WIDTH 3
++#define XP_PROP_4_REDRV_MODEL_INDEX 28
++#define XP_PROP_4_REDRV_MODEL_WIDTH 3
++#define XP_PROP_4_REDRV_PRESENT_INDEX 31
++#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
++
++/* I2C Control register offsets */
++#define IC_CON 0x0000
++#define IC_TAR 0x0004
++#define IC_DATA_CMD 0x0010
++#define IC_INTR_STAT 0x002c
++#define IC_INTR_MASK 0x0030
++#define IC_RAW_INTR_STAT 0x0034
++#define IC_CLR_INTR 0x0040
++#define IC_CLR_TX_ABRT 0x0054
++#define IC_CLR_STOP_DET 0x0060
++#define IC_ENABLE 0x006c
++#define IC_TXFLR 0x0074
++#define IC_RXFLR 0x0078
++#define IC_TX_ABRT_SOURCE 0x0080
++#define IC_ENABLE_STATUS 0x009c
++#define IC_COMP_PARAM_1 0x00f4
++
++/* I2C Control register entry bit positions and sizes */
++#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
++#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
++#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
++#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
++#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
++#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
++#define IC_CON_MASTER_MODE_INDEX 0
++#define IC_CON_MASTER_MODE_WIDTH 1
++#define IC_CON_RESTART_EN_INDEX 5
++#define IC_CON_RESTART_EN_WIDTH 1
++#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
++#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
++#define IC_CON_SLAVE_DISABLE_INDEX 6
++#define IC_CON_SLAVE_DISABLE_WIDTH 1
++#define IC_CON_SPEED_INDEX 1
++#define IC_CON_SPEED_WIDTH 2
++#define IC_DATA_CMD_CMD_INDEX 8
++#define IC_DATA_CMD_CMD_WIDTH 1
++#define IC_DATA_CMD_STOP_INDEX 9
++#define IC_DATA_CMD_STOP_WIDTH 1
++#define IC_ENABLE_ABORT_INDEX 1
++#define IC_ENABLE_ABORT_WIDTH 1
++#define IC_ENABLE_EN_INDEX 0
++#define IC_ENABLE_EN_WIDTH 1
++#define IC_ENABLE_STATUS_EN_INDEX 0
++#define IC_ENABLE_STATUS_EN_WIDTH 1
++#define IC_INTR_MASK_TX_EMPTY_INDEX 4
++#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
++#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
++#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
++#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
++#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
++#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
++#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
++#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
++#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
++
++/* I2C Control register value */
++#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
++#define IC_TX_ABRT_ARB_LOST 0x1000
++
++/* Descriptor/Packet entry bit positions and sizes */
++#define RX_PACKET_ERRORS_CRC_INDEX 2
++#define RX_PACKET_ERRORS_CRC_WIDTH 1
++#define RX_PACKET_ERRORS_FRAME_INDEX 3
++#define RX_PACKET_ERRORS_FRAME_WIDTH 1
++#define RX_PACKET_ERRORS_LENGTH_INDEX 0
++#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
++#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
++#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
++
++#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
++#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
++#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
++#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
++#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
++#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
++#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
++#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
++
++#define RX_NORMAL_DESC0_OVT_INDEX 0
++#define RX_NORMAL_DESC0_OVT_WIDTH 16
++#define RX_NORMAL_DESC2_HL_INDEX 0
++#define RX_NORMAL_DESC2_HL_WIDTH 10
++#define RX_NORMAL_DESC3_CDA_INDEX 27
++#define RX_NORMAL_DESC3_CDA_WIDTH 1
++#define RX_NORMAL_DESC3_CTXT_INDEX 30
++#define RX_NORMAL_DESC3_CTXT_WIDTH 1
++#define RX_NORMAL_DESC3_ES_INDEX 15
++#define RX_NORMAL_DESC3_ES_WIDTH 1
++#define RX_NORMAL_DESC3_ETLT_INDEX 16
++#define RX_NORMAL_DESC3_ETLT_WIDTH 4
++#define RX_NORMAL_DESC3_FD_INDEX 29
++#define RX_NORMAL_DESC3_FD_WIDTH 1
++#define RX_NORMAL_DESC3_INTE_INDEX 30
++#define RX_NORMAL_DESC3_INTE_WIDTH 1
++#define RX_NORMAL_DESC3_L34T_INDEX 20
++#define RX_NORMAL_DESC3_L34T_WIDTH 4
++#define RX_NORMAL_DESC3_LD_INDEX 28
++#define RX_NORMAL_DESC3_LD_WIDTH 1
++#define RX_NORMAL_DESC3_OWN_INDEX 31
++#define RX_NORMAL_DESC3_OWN_WIDTH 1
++#define RX_NORMAL_DESC3_PL_INDEX 0
++#define RX_NORMAL_DESC3_PL_WIDTH 14
++#define RX_NORMAL_DESC3_RSV_INDEX 26
++#define RX_NORMAL_DESC3_RSV_WIDTH 1
++
++#define RX_DESC3_L34T_IPV4_TCP 1
++#define RX_DESC3_L34T_IPV4_UDP 2
++#define RX_DESC3_L34T_IPV4_ICMP 3
++#define RX_DESC3_L34T_IPV6_TCP 9
++#define RX_DESC3_L34T_IPV6_UDP 10
++#define RX_DESC3_L34T_IPV6_ICMP 11
++
++#define RX_CONTEXT_DESC3_TSA_INDEX 4
++#define RX_CONTEXT_DESC3_TSA_WIDTH 1
++#define RX_CONTEXT_DESC3_TSD_INDEX 6
++#define RX_CONTEXT_DESC3_TSD_WIDTH 1
++
++#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
++#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
++#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
++#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
++#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
++#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
++#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
++#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
++
++#define TX_CONTEXT_DESC2_MSS_INDEX 0
++#define TX_CONTEXT_DESC2_MSS_WIDTH 15
++#define TX_CONTEXT_DESC3_CTXT_INDEX 30
++#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
++#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
++#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
++#define TX_CONTEXT_DESC3_VLTV_INDEX 16
++#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
++#define TX_CONTEXT_DESC3_VT_INDEX 0
++#define TX_CONTEXT_DESC3_VT_WIDTH 16
++
++#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
++#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
++#define TX_NORMAL_DESC2_IC_INDEX 31
++#define TX_NORMAL_DESC2_IC_WIDTH 1
++#define TX_NORMAL_DESC2_TTSE_INDEX 30
++#define TX_NORMAL_DESC2_TTSE_WIDTH 1
++#define TX_NORMAL_DESC2_VTIR_INDEX 14
++#define TX_NORMAL_DESC2_VTIR_WIDTH 2
++#define TX_NORMAL_DESC3_CIC_INDEX 16
++#define TX_NORMAL_DESC3_CIC_WIDTH 2
++#define TX_NORMAL_DESC3_CPC_INDEX 26
++#define TX_NORMAL_DESC3_CPC_WIDTH 2
++#define TX_NORMAL_DESC3_CTXT_INDEX 30
++#define TX_NORMAL_DESC3_CTXT_WIDTH 1
++#define TX_NORMAL_DESC3_FD_INDEX 29
++#define TX_NORMAL_DESC3_FD_WIDTH 1
++#define TX_NORMAL_DESC3_FL_INDEX 0
++#define TX_NORMAL_DESC3_FL_WIDTH 15
++#define TX_NORMAL_DESC3_LD_INDEX 28
++#define TX_NORMAL_DESC3_LD_WIDTH 1
++#define TX_NORMAL_DESC3_OWN_INDEX 31
++#define TX_NORMAL_DESC3_OWN_WIDTH 1
++#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
++#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
++#define TX_NORMAL_DESC3_TCPPL_INDEX 0
++#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
++#define TX_NORMAL_DESC3_TSE_INDEX 18
++#define TX_NORMAL_DESC3_TSE_WIDTH 1
++
++#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
++
++/* MDIO undefined or vendor specific registers */
++#ifndef MDIO_PMA_10GBR_PMD_CTRL
++#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
++#endif
++
++#ifndef MDIO_PMA_10GBR_FECCTRL
++#define MDIO_PMA_10GBR_FECCTRL 0x00ab
++#endif
++
++#ifndef MDIO_PCS_DIG_CTRL
++#define MDIO_PCS_DIG_CTRL 0x8000
++#endif
++
++#ifndef MDIO_AN_XNP
++#define MDIO_AN_XNP 0x0016
++#endif
++
++#ifndef MDIO_AN_LPX
++#define MDIO_AN_LPX 0x0019
++#endif
++
++#ifndef MDIO_AN_COMP_STAT
++#define MDIO_AN_COMP_STAT 0x0030
++#endif
++
++#ifndef MDIO_AN_INTMASK
++#define MDIO_AN_INTMASK 0x8001
++#endif
++
++#ifndef MDIO_AN_INT
++#define MDIO_AN_INT 0x8002
++#endif
++
++#ifndef MDIO_VEND2_AN_ADVERTISE
++#define MDIO_VEND2_AN_ADVERTISE 0x0004
++#endif
++
++#ifndef MDIO_VEND2_AN_LP_ABILITY
++#define MDIO_VEND2_AN_LP_ABILITY 0x0005
++#endif
++
++#ifndef MDIO_VEND2_AN_CTRL
++#define MDIO_VEND2_AN_CTRL 0x8001
++#endif
++
++#ifndef MDIO_VEND2_AN_STAT
++#define MDIO_VEND2_AN_STAT 0x8002
++#endif
++
++#ifndef MDIO_CTRL1_SPEED1G
++#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
++#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_AN_RESTART
++#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_SS6
++#define MDIO_VEND2_CTRL1_SS6 BIT(6)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_SS13
++#define MDIO_VEND2_CTRL1_SS13 BIT(13)
++#endif
++
++/* MDIO mask values */
++#define AXGBE_AN_CL73_INT_CMPLT BIT(0)
++#define AXGBE_AN_CL73_INC_LINK BIT(1)
++#define AXGBE_AN_CL73_PG_RCV BIT(2)
++#define AXGBE_AN_CL73_INT_MASK 0x07
++
++#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001
++#define AXGBE_XNP_ACK_PROCESSED BIT(12)
++#define AXGBE_XNP_MP_FORMATTED BIT(13)
++#define AXGBE_XNP_NP_EXCHANGE BIT(15)
++
++#define AXGBE_KR_TRAINING_START BIT(0)
++#define AXGBE_KR_TRAINING_ENABLE BIT(1)
++
++#define AXGBE_PCS_CL37_BP BIT(12)
++
++#define AXGBE_AN_CL37_INT_CMPLT BIT(0)
++#define AXGBE_AN_CL37_INT_MASK 0x01
++
++#define AXGBE_AN_CL37_HD_MASK 0x40
++#define AXGBE_AN_CL37_FD_MASK 0x20
++
++#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06
++#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00
++#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
++#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
++
++/*generic*/
++#define __iomem
++
++#define rmb() rte_rmb() /* dpdk rte provided rmb */
++#define wmb() rte_wmb() /* dpdk rte provided wmb */
++
++#define __le16 u16
++#define __le32 u32
++#define __le64 u64
++
++typedef unsigned char u8;
++typedef unsigned short u16;
++typedef unsigned int u32;
++typedef unsigned long long u64;
++typedef unsigned long long dma_addr_t;
++
++static inline uint32_t low32_value(uint64_t addr)
++{
++ return (addr) & 0x0ffffffff;
++}
++
++static inline uint32_t high32_value(uint64_t addr)
++{
++ return (addr >> 32) & 0x0ffffffff;
++}
++
++/*END*/
++
++/* Bit setting and getting macros
++ * The get macro will extract the current bit field value from within
++ * the variable
++ *
++ * The set macro will clear the current bit field value within the
++ * variable and then set the bit field of the variable to the
++ * specified value
++ */
++#define GET_BITS(_var, _index, _width) \
++ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
++
++#define SET_BITS(_var, _index, _width, _val) \
++do { \
++ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
++ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
++} while (0)
++
++#define GET_BITS_LE(_var, _index, _width) \
++ ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
++
++#define SET_BITS_LE(_var, _index, _width, _val) \
++do { \
++ (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
++ (_var) |= rte_cpu_to_le_32((((_val) & \
++ ((0x1 << (_width)) - 1)) << (_index))); \
++} while (0)
++
++/* Bit setting and getting macros based on register fields
++ * The get macro uses the bit field definitions formed using the input
++ * names to extract the current bit field value from within the
++ * variable
++ *
++ * The set macro uses the bit field definitions formed using the input
++ * names to set the bit field of the variable to the specified value
++ */
++#define AXGMAC_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \
++ GET_BITS_LE((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
++ SET_BITS_LE((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++/* Macros for reading or writing registers
++ * The ioread macros will get bit fields or full values using the
++ * register definitions formed using the input names
++ *
++ * The iowrite macros will set bit fields or full values using the
++ * register definitions formed using the input names
++ */
++#define AXGMAC_IOREAD(_pdata, _reg) \
++ rte_read32((void *)((_pdata)->xgmac_regs + (_reg)))
++
++#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define AXGMAC_IOWRITE(_pdata, _reg, _val) \
++ rte_write32((_val), (void *)((_pdata)->xgmac_regs + (_reg)))
++
++#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ AXGMAC_IOWRITE((_pdata), _reg, reg_val); \
++} while (0)
++
++/* Macros for reading or writing MTL queue or traffic class registers
++ * Similar to the standard read and write macros except that the
++ * base register value is calculated by the queue or traffic class number
++ */
++#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
++ rte_read32((void *)((_pdata)->xgmac_regs + \
++ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)))
++
++#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
++ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
++ rte_write32((_val), (void *)((_pdata)->xgmac_regs + \
++ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)))
++
++#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
++do { \
++ u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
++} while (0)
++
++/* Macros for reading or writing DMA channel registers
++ * Similar to the standard read and write macros except that the
++ * base register value is obtained from the ring
++ */
++#define AXGMAC_DMA_IOREAD(_channel, _reg) \
++ rte_read32((void *)((_channel)->dma_regs + (_reg)))
++
++#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
++ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
++ rte_write32((_val), (void *)((_channel)->dma_regs + (_reg)))
++
++#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
++do { \
++ u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of XPCS registers.
++ */
++#define XPCS_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XPCS32_IOWRITE(_pdata, _off, _val) \
++ rte_write32(_val, (void *)((_pdata)->xpcs_regs + (_off)))
++
++#define XPCS32_IOREAD(_pdata, _off) \
++ rte_read32((void *)((_pdata)->xpcs_regs + (_off)))
++
++#define XPCS16_IOWRITE(_pdata, _off, _val) \
++ rte_write16(_val, (void *)((_pdata)->xpcs_regs + (_off)))
++
++#define XPCS16_IOREAD(_pdata, _off) \
++ rte_read16((void *)((_pdata)->xpcs_regs + (_off)))
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of SerDes integration registers.
++ */
++#define XSIR_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XSIR0_IOREAD(_pdata, _reg) \
++ rte_read16((void *)((_pdata)->sir0_regs + (_reg)))
++
++#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XSIR0_IOWRITE(_pdata, _reg, _val) \
++ rte_write16((_val), (void *)((_pdata)->sir0_regs + (_reg)))
++
++#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
++} while (0)
++
++#define XSIR1_IOREAD(_pdata, _reg) \
++ rte_read16((void *)((_pdata)->sir1_regs + _reg))
++
++#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XSIR1_IOWRITE(_pdata, _reg, _val) \
++ rte_write16((_val), (void *)((_pdata)->sir1_regs + (_reg)))
++
++#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of SerDes RxTx registers.
++ */
++#define XRXTX_IOREAD(_pdata, _reg) \
++ rte_read16((void *)((_pdata)->rxtx_regs + (_reg)))
++
++#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XRXTX_IOWRITE(_pdata, _reg, _val) \
++ rte_write16((_val), (void *)((_pdata)->rxtx_regs + (_reg)))
++
++#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of MAC Control registers.
++ */
++#define XP_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XP_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XP_IOREAD(_pdata, _reg) \
++ rte_read32((void *)((_pdata)->xprop_regs + (_reg)))
++
++#define XP_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XP_IOWRITE(_pdata, _reg, _val) \
++ rte_write32((_val), (void *)((_pdata)->xprop_regs + (_reg)))
++
++#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XP_IOWRITE((_pdata), (_reg), reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of I2C Control registers.
++ */
++#define XI2C_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XI2C_IOREAD(_pdata, _reg) \
++ rte_read32((void *)((_pdata)->xi2c_regs + (_reg)))
++
++#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XI2C_IOWRITE(_pdata, _reg, _val) \
++ rte_write32((_val), (void *)((_pdata)->xi2c_regs + (_reg)))
++
++#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * using MDIO. Different from above because of the use of standardized
++ * Linux include values. No shifting is performed with the bit
++ * operations, everything works on mask values.
++ */
++#define XMDIO_READ(_pdata, _mmd, _reg) \
++ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
++ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff)))
++
++#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
++ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
++
++#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
++ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
++ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val)))
++
++#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
++do { \
++ u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \
++ mmd_val &= ~(_mask); \
++ mmd_val |= (_val); \
++ XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \
++} while (0)
++
++/*
++ * time_after(a,b) returns true if the time a is after time b.
++ *
++ * Do this with "<0" and ">=0" to only test the sign of the result. A
++ * good compiler would generate better code (and a really good compiler
++ * wouldn't care). Gcc is currently neither.
++ */
++#define time_after(a, b) ((long)((b) - (a)) < 0)
++#define time_before(a, b) time_after(b, a)
++
++#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
++#define time_before_eq(a, b) time_after_eq(b, a)
++
++/*---bitmap support apis---*/
++static inline int axgbe_test_bit(int nr, volatile unsigned long *addr)
++{
++ int res;
++
++ rte_mb();
++ res = ((*addr) & (1UL << nr)) != 0;
++ rte_mb();
++ return res;
++}
++
++static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr)
++{
++ __sync_fetch_and_or(addr, (1UL << nr));
++}
++
++static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr)
++{
++ __sync_fetch_and_and(addr, ~(1UL << nr));
++}
++
++static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr)
++{
++ unsigned long mask = (1UL << nr);
++
++ return __sync_fetch_and_and(addr, ~mask) & mask;
++}
++
++static inline unsigned long msecs_to_timer_cycles(unsigned int m)
++{
++ return rte_get_timer_hz() * (m / 1000);
++}
++
+ #endif /* __AXGBE_COMMON_H__ */
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-03-18-net-axgbe-add-phy-register-map-and-helper-macros.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-03-18-net-axgbe-add-phy-register-map-and-helper-macros.patch
new file mode 100644
index 00000000..89e2af4d
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-03-18-net-axgbe-add-phy-register-map-and-helper-macros.patch
@@ -0,0 +1,341 @@
+From patchwork Fri Mar 9 08:42:19 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 03/18] net/axgbe: add phy register map and helper macros
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35822
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-3-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:19 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_phy.h | 314 ++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 314 insertions(+)
+ create mode 100644 drivers/net/axgbe/axgbe_phy.h
+
+diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h
+new file mode 100644
+index 0000000..f7c8a88
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_phy.h
+@@ -0,0 +1,314 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef __AXGBE_PHY_H__
++#define __AXGBE_PHY_H__
++
++#define SPEED_10 10
++#define SPEED_100 100
++#define SPEED_1000 1000
++#define SPEED_2500 2500
++#define SPEED_10000 10000
++
++
++/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
++ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
++ */
++#define MII_ADDR_C45 (1 << 30)
++
++/* Basic mode status register. */
++#define BMSR_LSTATUS 0x0004 /* Link status */
++
++/* Status register 1. */
++#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
++
++/* Generic MII registers. */
++#define MII_BMCR 0x00 /* Basic mode control register */
++#define MII_BMSR 0x01 /* Basic mode status register */
++#define MII_PHYSID1 0x02 /* PHYS ID 1 */
++#define MII_PHYSID2 0x03 /* PHYS ID 2 */
++#define MII_ADVERTISE 0x04 /* Advertisement control reg */
++#define MII_LPA 0x05 /* Link partner ability reg */
++#define MII_EXPANSION 0x06 /* Expansion register */
++#define MII_CTRL1000 0x09 /* 1000BASE-T control */
++#define MII_STAT1000 0x0a /* 1000BASE-T status */
++#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
++#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
++#define MII_ESTATUS 0x0f /* Extended Status */
++#define MII_DCOUNTER 0x12 /* Disconnect counter */
++#define MII_FCSCOUNTER 0x13 /* False carrier counter */
++#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
++#define MII_RERRCOUNTER 0x15 /* Receive error counter */
++#define MII_SREVISION 0x16 /* Silicon revision */
++#define MII_RESV1 0x17 /* Reserved... */
++#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
++#define MII_PHYADDR 0x19 /* PHY address */
++#define MII_RESV2 0x1a /* Reserved... */
++#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
++#define MII_NCONFIG 0x1c /* Network interface config */
++
++/* Basic mode control register. */
++#define BMCR_RESV 0x003f /* Unused... */
++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
++#define BMCR_CTST 0x0080 /* Collision test */
++#define BMCR_FULLDPLX 0x0100 /* Full duplex */
++#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
++#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
++#define BMCR_PDOWN 0x0800 /* Enable low power state */
++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
++#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
++#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
++#define BMCR_RESET 0x8000 /* Reset to default state */
++#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
++
++
++/* MDIO Manageable Devices (MMDs). */
++#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment
++ * Physical Medium Dependent
++ */
++#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
++#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
++#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
++#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
++#define MDIO_MMD_TC 6 /* Transmission Convergence */
++#define MDIO_MMD_AN 7 /* Auto-Negotiation */
++#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
++#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
++#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
++
++/* Generic MDIO registers. */
++#define MDIO_CTRL1 MII_BMCR
++#define MDIO_STAT1 MII_BMSR
++#define MDIO_DEVID1 MII_PHYSID1
++#define MDIO_DEVID2 MII_PHYSID2
++#define MDIO_SPEED 4 /* Speed ability */
++#define MDIO_DEVS1 5 /* Devices in package */
++#define MDIO_DEVS2 6
++#define MDIO_CTRL2 7 /* 10G control 2 */
++#define MDIO_STAT2 8 /* 10G status 2 */
++#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
++#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
++#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
++#define MDIO_PKGID1 14 /* Package identifier */
++#define MDIO_PKGID2 15
++#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
++#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
++#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
++#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
++#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
++#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
++#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
++
++/* Media-dependent registers. */
++#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
++#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
++#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
++ * Lanes B-D are numbered 134-136.
++ */
++#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
++#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
++#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
++#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
++#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
++#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
++
++/* Control register 1. */
++/* Enable extended speed selection */
++#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
++/* All speed selection bits */
++#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
++#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
++#define MDIO_CTRL1_LPOWER BMCR_PDOWN
++#define MDIO_CTRL1_RESET BMCR_RESET
++#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
++#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
++#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
++#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
++#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
++#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
++#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
++#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
++#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
++
++
++
++
++
++/* PMA 10GBASE-R FEC ability register. */
++#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
++#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
++
++
++/* Autoneg related */
++#define ADVERTISED_Autoneg (1 << 6)
++#define SUPPORTED_Autoneg (1 << 6)
++#define AUTONEG_DISABLE 0x00
++#define AUTONEG_ENABLE 0x01
++
++#define ADVERTISED_Pause (1 << 13)
++#define ADVERTISED_Asym_Pause (1 << 14)
++
++#define SUPPORTED_Pause (1 << 13)
++#define SUPPORTED_Asym_Pause (1 << 14)
++
++#define SUPPORTED_Backplane (1 << 16)
++#define SUPPORTED_TP (1 << 7)
++
++#define ADVERTISED_10000baseR_FEC (1 << 20)
++
++#define SUPPORTED_10000baseR_FEC (1 << 20)
++
++#define SUPPORTED_FIBRE (1 << 10)
++
++#define ADVERTISED_10000baseKR_Full (1 << 19)
++#define ADVERTISED_10000baseT_Full (1 << 12)
++#define ADVERTISED_2500baseX_Full (1 << 15)
++#define ADVERTISED_1000baseKX_Full (1 << 17)
++#define ADVERTISED_1000baseT_Full (1 << 5)
++#define ADVERTISED_100baseT_Full (1 << 3)
++#define ADVERTISED_TP (1 << 7)
++#define ADVERTISED_FIBRE (1 << 10)
++#define ADVERTISED_Backplane (1 << 16)
++
++#define SUPPORTED_1000baseKX_Full (1 << 17)
++#define SUPPORTED_10000baseKR_Full (1 << 19)
++#define SUPPORTED_2500baseX_Full (1 << 15)
++#define SUPPORTED_100baseT_Full (1 << 2)
++#define SUPPORTED_1000baseT_Full (1 << 5)
++#define SUPPORTED_10000baseT_Full (1 << 12)
++#define SUPPORTED_2500baseX_Full (1 << 15)
++
++
++#define SPEED_UNKNOWN -1
++
++/* Duplex, half or full. */
++#define DUPLEX_HALF 0x00
++#define DUPLEX_FULL 0x01
++#define DUPLEX_UNKNOWN 0xff
++
++#endif
++/* PHY */
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-04-18-net-axgbe-add-structures-for-MAC-initialization-and-reset.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-04-18-net-axgbe-add-structures-for-MAC-initialization-and-reset.patch
new file mode 100644
index 00000000..052aeae3
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-04-18-net-axgbe-add-structures-for-MAC-initialization-and-reset.patch
@@ -0,0 +1,920 @@
+From patchwork Fri Mar 9 08:42:20 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 04/18] net/axgbe: add structures for MAC initialization and reset
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35823
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-4-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:20 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/Makefile | 1 +
+ drivers/net/axgbe/axgbe_dev.c | 167 +++++++++++++++++++
+ drivers/net/axgbe/axgbe_ethdev.c | 301 ++++++++++++++++++++++++++++++++-
+ drivers/net/axgbe/axgbe_ethdev.h | 349 +++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 816 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/net/axgbe/axgbe_dev.c
+
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index c8a1e87..ce2485d 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -142,5 +142,6 @@ LIBABIVER := 1
+ # all source are stored in SRCS-y
+ #
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
+new file mode 100644
+index 0000000..8bf3b82
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_dev.c
+@@ -0,0 +1,167 @@
++/*-
++ * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_common.h"
++#include "axgbe_phy.h"
++
++static int __axgbe_exit(struct axgbe_port *pdata)
++{
++ unsigned int count = 2000;
++
++ /* Issue a software reset */
++ AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
++ rte_delay_us(10);
++
++ /* Poll Until Poll Condition */
++ while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
++ rte_delay_us(500);
++
++ if (!count)
++ return -EBUSY;
++
++ return 0;
++}
++
++static int axgbe_exit(struct axgbe_port *pdata)
++{
++ int ret;
++
++ /* To guard against possible incorrectly generated interrupts,
++ * issue the software reset twice.
++ */
++ ret = __axgbe_exit(pdata);
++ if (ret)
++ return ret;
++
++ return __axgbe_exit(pdata);
++}
++
++void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
++{
++ hw_if->exit = axgbe_exit;
++}
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 0b7894f..786d929 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -126,6 +126,8 @@
+ */
+
+ #include "axgbe_ethdev.h"
++#include "axgbe_common.h"
++#include "axgbe_phy.h"
+
+ static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+ static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+@@ -144,6 +146,190 @@ static const struct rte_pci_id pci_id_axgbe_map[] = {
+ { .vendor_id = 0, },
+ };
+
++static struct axgbe_version_data axgbe_v2a = {
++ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
++ .mmc_64bit = 1,
++ .tx_max_fifo_size = 229376,
++ .rx_max_fifo_size = 229376,
++ .tx_tstamp_workaround = 1,
++ .ecc_support = 1,
++ .i2c_support = 1,
++};
++
++static struct axgbe_version_data axgbe_v2b = {
++ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
++ .mmc_64bit = 1,
++ .tx_max_fifo_size = 65536,
++ .rx_max_fifo_size = 65536,
++ .tx_tstamp_workaround = 1,
++ .ecc_support = 1,
++ .i2c_support = 1,
++};
++
++static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
++{
++ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
++ struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
++
++ mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
++ mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
++ mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
++
++ memset(hw_feat, 0, sizeof(*hw_feat));
++
++ hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
++
++ /* Hardware feature register 0 */
++ hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
++ hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
++ hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
++ hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
++ hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
++ hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
++ hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
++ hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
++ hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
++ hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
++ hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
++ hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
++ ADDMACADRSEL);
++ hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
++ hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
++
++ /* Hardware feature register 1 */
++ hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
++ RXFIFOSIZE);
++ hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
++ TXFIFOSIZE);
++ hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
++ MAC_HWF1R, ADVTHWORD);
++ hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
++ hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
++ hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
++ hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
++ hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
++ hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
++ hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
++ hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
++ HASHTBLSZ);
++ hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
++ L3L4FNUM);
++
++ /* Hardware feature register 2 */
++ hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
++ hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
++ hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
++ hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
++ hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
++ hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
++ AUXSNAPNUM);
++
++ /* Translate the Hash Table size into actual number */
++ switch (hw_feat->hash_table_size) {
++ case 0:
++ break;
++ case 1:
++ hw_feat->hash_table_size = 64;
++ break;
++ case 2:
++ hw_feat->hash_table_size = 128;
++ break;
++ case 3:
++ hw_feat->hash_table_size = 256;
++ break;
++ }
++
++ /* Translate the address width setting into actual number */
++ switch (hw_feat->dma_width) {
++ case 0:
++ hw_feat->dma_width = 32;
++ break;
++ case 1:
++ hw_feat->dma_width = 40;
++ break;
++ case 2:
++ hw_feat->dma_width = 48;
++ break;
++ default:
++ hw_feat->dma_width = 32;
++ }
++
++ /* The Queue, Channel and TC counts are zero based so increment them
++ * to get the actual number
++ */
++ hw_feat->rx_q_cnt++;
++ hw_feat->tx_q_cnt++;
++ hw_feat->rx_ch_cnt++;
++ hw_feat->tx_ch_cnt++;
++ hw_feat->tc_cnt++;
++
++ /* Translate the fifo sizes into actual numbers */
++ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
++ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
++}
++
++static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
++{
++ axgbe_init_function_ptrs_dev(&pdata->hw_if);
++}
++
++static void axgbe_set_counts(struct axgbe_port *pdata)
++{
++ /* Set all the function pointers */
++ axgbe_init_all_fptrs(pdata);
++
++ /* Populate the hardware features */
++ axgbe_get_all_hw_features(pdata);
++
++ /* Set default max values if not provided */
++ if (!pdata->tx_max_channel_count)
++ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
++ if (!pdata->rx_max_channel_count)
++ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
++
++ if (!pdata->tx_max_q_count)
++ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
++ if (!pdata->rx_max_q_count)
++ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
++
++ /* Calculate the number of Tx and Rx rings to be created
++ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
++ * the number of Tx queues to the number of Tx channels
++ * enabled
++ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
++ * number of Rx queues or maximum allowed
++ */
++ pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
++ pdata->tx_max_channel_count);
++ pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
++ pdata->tx_max_q_count);
++
++ pdata->tx_q_count = pdata->tx_ring_count;
++
++ pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
++ pdata->rx_max_channel_count);
++
++ pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
++ pdata->rx_max_q_count);
++}
++
++static void axgbe_default_config(struct axgbe_port *pdata)
++{
++ pdata->pblx8 = DMA_PBL_X8_ENABLE;
++ pdata->tx_sf_mode = MTL_TSF_ENABLE;
++ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
++ pdata->tx_pbl = DMA_PBL_32;
++ pdata->tx_osp_mode = DMA_OSP_ENABLE;
++ pdata->rx_sf_mode = MTL_RSF_ENABLE;
++ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
++ pdata->rx_pbl = DMA_PBL_32;
++ pdata->pause_autoneg = 1;
++ pdata->tx_pause = 0;
++ pdata->rx_pause = 0;
++ pdata->phy_speed = SPEED_UNKNOWN;
++ pdata->power_down = 0;
++}
++
+ /*
+ * It returns 0 on success.
+ */
+@@ -153,8 +339,13 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata;
+ struct rte_pci_device *pci_dev;
++ uint32_t reg, mac_lo, mac_hi;
++ int ret;
+
+ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
++ /* initial state */
++ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
++ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+
+ /*
+@@ -167,6 +358,106 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdata->pci_dev = pci_dev;
+
++ pdata->xgmac_regs =
++ (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
++ pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
++ pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
++ pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
++
++ /* version specific driver data*/
++ if (pci_dev->id.device_id == 0x1458)
++ pdata->vdata = &axgbe_v2a;
++ else
++ pdata->vdata = &axgbe_v2b;
++
++ /* Configure the PCS indirect addressing support */
++ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
++ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
++ pdata->xpcs_window <<= 6;
++ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
++ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
++ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
++ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
++ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
++ PMD_INIT_LOG(DEBUG,
++ "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
++ pdata->xpcs_window_size, pdata->xpcs_window_mask);
++ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
++
++ /* Retrieve the MAC address */
++ mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
++ mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
++ pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
++ pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
++ pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
++ pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
++ pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
++ pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
++
++ eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
++ ETHER_ADDR_LEN, 0);
++ if (!eth_dev->data->mac_addrs) {
++ PMD_INIT_LOG(ERR,
++ "Failed to alloc %u bytes needed to store MAC addr tbl",
++ ETHER_ADDR_LEN);
++ return -ENOMEM;
++ }
++
++ if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
++ eth_random_addr(pdata->mac_addr.addr_bytes);
++
++ /* Copy the permanent MAC address */
++ ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
++
++ /* Clock settings */
++ pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
++ pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
++
++ /* Set the DMA coherency values */
++ pdata->coherent = 1;
++ pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
++ pdata->arcache = AXGBE_DMA_OS_ARCACHE;
++ pdata->awcache = AXGBE_DMA_OS_AWCACHE;
++
++ /* Set the maximum channels and queues */
++ reg = XP_IOREAD(pdata, XP_PROP_1);
++ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
++ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
++ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
++ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
++
++ /* Set the hardware channel and queue counts */
++ axgbe_set_counts(pdata);
++
++ /* Set the maximum fifo amounts */
++ reg = XP_IOREAD(pdata, XP_PROP_2);
++ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
++ pdata->tx_max_fifo_size *= 16384;
++ pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
++ pdata->vdata->tx_max_fifo_size);
++ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
++ pdata->rx_max_fifo_size *= 16384;
++ pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
++ pdata->vdata->rx_max_fifo_size);
++ /* Issue software reset to DMA */
++ ret = pdata->hw_if.exit(pdata);
++ if (ret)
++ PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error");
++
++ /* Set default configuration data */
++ axgbe_default_config(pdata);
++
++ /* Set default max values if not provided */
++ if (!pdata->tx_max_fifo_size)
++ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
++ if (!pdata->rx_max_fifo_size)
++ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
++
++ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
++ pthread_mutex_init(&pdata->i2c_mutex, NULL);
++ pthread_mutex_init(&pdata->an_mutex, NULL);
++ pthread_mutex_init(&pdata->phy_mutex, NULL);
++
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+@@ -175,11 +466,17 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ }
+
+ static int
+-eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev __rte_unused)
++eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+ {
+- /* stub function */
+ PMD_INIT_FUNC_TRACE();
+
++ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
++ return 0;
++
++ /*Free macaddres*/
++ rte_free(eth_dev->data->mac_addrs);
++ eth_dev->data->mac_addrs = NULL;
++
+ return 0;
+ }
+
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 5f8931f..5d20a1f 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -132,6 +132,263 @@
+ #include <rte_lcore.h>
+ #include "axgbe_common.h"
+
++#define AXGBE_MAX_DMA_CHANNELS 16
++#define AXGBE_MAX_QUEUES 16
++#define AXGBE_PRIORITY_QUEUES 8
++#define AXGBE_DMA_STOP_TIMEOUT 1
++
++/* DMA cache settings - Outer sharable, write-back, write-allocate */
++#define AXGBE_DMA_OS_AXDOMAIN 0x2
++#define AXGBE_DMA_OS_ARCACHE 0xb
++#define AXGBE_DMA_OS_AWCACHE 0xf
++
++/* DMA cache settings - System, no caches used */
++#define AXGBE_DMA_SYS_AXDOMAIN 0x3
++#define AXGBE_DMA_SYS_ARCACHE 0x0
++#define AXGBE_DMA_SYS_AWCACHE 0x0
++
++/* PCI BAR mapping */
++#define AXGBE_AXGMAC_BAR 0
++#define AXGBE_XPCS_BAR 1
++#define AXGBE_MAC_PROP_OFFSET 0x1d000
++#define AXGBE_I2C_CTRL_OFFSET 0x1e000
++
++/* PCI clock frequencies */
++#define AXGBE_V2_DMA_CLOCK_FREQ 500000000
++#define AXGBE_V2_PTP_CLOCK_FREQ 125000000
++
++#define AXGMAC_FIFO_MIN_ALLOC 2048
++#define AXGMAC_FIFO_UNIT 256
++#define AXGMAC_FIFO_ALIGN(_x) \
++ (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
++#define AXGMAC_FIFO_FC_OFF 2048
++#define AXGMAC_FIFO_FC_MIN 4096
++
++#define AXGBE_TC_MIN_QUANTUM 10
++
++/* Flow control queue count */
++#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8
++
++/* Flow control threshold units */
++#define AXGMAC_FLOW_CONTROL_UNIT 512
++#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \
++ (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \
++ ~(AXGMAC_FLOW_CONTROL_UNIT - 1))
++#define AXGMAC_FLOW_CONTROL_VALUE(_x) \
++ (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2)
++#define AXGMAC_FLOW_CONTROL_MAX 33280
++
++/* Maximum MAC address hash table size (256 bits = 8 bytes) */
++#define AXGBE_MAC_HASH_TABLE_SIZE 8
++
++/* Receive Side Scaling */
++#define AXGBE_RSS_OFFLOAD ( \
++ ETH_RSS_IPV4 | \
++ ETH_RSS_NONFRAG_IPV4_TCP | \
++ ETH_RSS_NONFRAG_IPV4_UDP | \
++ ETH_RSS_IPV6 | \
++ ETH_RSS_NONFRAG_IPV6_TCP | \
++ ETH_RSS_NONFRAG_IPV6_UDP)
++
++#define AXGBE_RSS_HASH_KEY_SIZE 40
++#define AXGBE_RSS_MAX_TABLE_SIZE 256
++#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0
++#define AXGBE_RSS_HASH_KEY_TYPE 1
++
++/* Auto-negotiation */
++#define AXGBE_AN_MS_TIMEOUT 500
++#define AXGBE_LINK_TIMEOUT 5
++
++#define AXGBE_SGMII_AN_LINK_STATUS BIT(1)
++#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
++#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04
++#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08
++#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4)
++
++/* ECC correctable error notification window (seconds) */
++#define AXGBE_ECC_LIMIT 60
++
++/* MDIO port types */
++#define AXGMAC_MAX_C22_PORT 3
++
++/* Helper macro for descriptor handling
++ * Always use AXGBE_GET_DESC_DATA to access the descriptor data
++ * since the index is free-running and needs to be and-ed
++ * with the descriptor count value of the ring to index to
++ * the proper descriptor data.
++ */
++#define AXGBE_GET_DESC_DATA(_ring, _idx) \
++ ((_ring)->rdata + \
++ ((_idx) & ((_ring)->rdesc_count - 1)))
++
++struct axgbe_port;
++
++enum axgbe_state {
++ AXGBE_DOWN,
++ AXGBE_LINK_INIT,
++ AXGBE_LINK_ERR,
++ AXGBE_STOPPED,
++};
++
++enum axgbe_int {
++ AXGMAC_INT_DMA_CH_SR_TI,
++ AXGMAC_INT_DMA_CH_SR_TPS,
++ AXGMAC_INT_DMA_CH_SR_TBU,
++ AXGMAC_INT_DMA_CH_SR_RI,
++ AXGMAC_INT_DMA_CH_SR_RBU,
++ AXGMAC_INT_DMA_CH_SR_RPS,
++ AXGMAC_INT_DMA_CH_SR_TI_RI,
++ AXGMAC_INT_DMA_CH_SR_FBE,
++ AXGMAC_INT_DMA_ALL,
++};
++
++enum axgbe_int_state {
++ AXGMAC_INT_STATE_SAVE,
++ AXGMAC_INT_STATE_RESTORE,
++};
++
++enum axgbe_ecc_sec {
++ AXGBE_ECC_SEC_TX,
++ AXGBE_ECC_SEC_RX,
++ AXGBE_ECC_SEC_DESC,
++};
++
++enum axgbe_speed {
++ AXGBE_SPEED_1000 = 0,
++ AXGBE_SPEED_2500,
++ AXGBE_SPEED_10000,
++ AXGBE_SPEEDS,
++};
++
++enum axgbe_xpcs_access {
++ AXGBE_XPCS_ACCESS_V1 = 0,
++ AXGBE_XPCS_ACCESS_V2,
++};
++
++enum axgbe_an_mode {
++ AXGBE_AN_MODE_CL73 = 0,
++ AXGBE_AN_MODE_CL73_REDRV,
++ AXGBE_AN_MODE_CL37,
++ AXGBE_AN_MODE_CL37_SGMII,
++ AXGBE_AN_MODE_NONE,
++};
++
++enum axgbe_an {
++ AXGBE_AN_READY = 0,
++ AXGBE_AN_PAGE_RECEIVED,
++ AXGBE_AN_INCOMPAT_LINK,
++ AXGBE_AN_COMPLETE,
++ AXGBE_AN_NO_LINK,
++ AXGBE_AN_ERROR,
++};
++
++enum axgbe_rx {
++ AXGBE_RX_BPA = 0,
++ AXGBE_RX_XNP,
++ AXGBE_RX_COMPLETE,
++ AXGBE_RX_ERROR,
++};
++
++enum axgbe_mode {
++ AXGBE_MODE_KX_1000 = 0,
++ AXGBE_MODE_KX_2500,
++ AXGBE_MODE_KR,
++ AXGBE_MODE_X,
++ AXGBE_MODE_SGMII_100,
++ AXGBE_MODE_SGMII_1000,
++ AXGBE_MODE_SFI,
++ AXGBE_MODE_UNKNOWN,
++};
++
++enum axgbe_speedset {
++ AXGBE_SPEEDSET_1000_10000 = 0,
++ AXGBE_SPEEDSET_2500_10000,
++};
++
++enum axgbe_mdio_mode {
++ AXGBE_MDIO_MODE_NONE = 0,
++ AXGBE_MDIO_MODE_CL22,
++ AXGBE_MDIO_MODE_CL45,
++};
++
++struct axgbe_hw_if {
++ void (*config_flow_control)(struct axgbe_port *);
++ int (*config_rx_mode)(struct axgbe_port *);
++
++ int (*init)(struct axgbe_port *);
++
++ int (*read_mmd_regs)(struct axgbe_port *, int, int);
++ void (*write_mmd_regs)(struct axgbe_port *, int, int, int);
++ int (*set_speed)(struct axgbe_port *, int);
++
++ int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int,
++ enum axgbe_mdio_mode);
++ int (*read_ext_mii_regs)(struct axgbe_port *, int, int);
++ int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t);
++
++ /* For FLOW ctrl */
++ int (*config_tx_flow_control)(struct axgbe_port *);
++ int (*config_rx_flow_control)(struct axgbe_port *);
++
++ int (*exit)(struct axgbe_port *);
++};
++
++/* This structure contains flags that indicate what hardware features
++ * or configurations are present in the device.
++ */
++struct axgbe_hw_features {
++ /* HW Version */
++ unsigned int version;
++
++ /* HW Feature Register0 */
++ unsigned int gmii; /* 1000 Mbps support */
++ unsigned int vlhash; /* VLAN Hash Filter */
++ unsigned int sma; /* SMA(MDIO) Interface */
++ unsigned int rwk; /* PMT remote wake-up packet */
++ unsigned int mgk; /* PMT magic packet */
++ unsigned int mmc; /* RMON module */
++ unsigned int aoe; /* ARP Offload */
++ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
++ unsigned int eee; /* Energy Efficient Ethernet */
++ unsigned int tx_coe; /* Tx Checksum Offload */
++ unsigned int rx_coe; /* Rx Checksum Offload */
++ unsigned int addn_mac; /* Additional MAC Addresses */
++ unsigned int ts_src; /* Timestamp Source */
++ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
++
++ /* HW Feature Register1 */
++ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
++ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
++ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
++ unsigned int dma_width; /* DMA width */
++ unsigned int dcb; /* DCB Feature */
++ unsigned int sph; /* Split Header Feature */
++ unsigned int tso; /* TCP Segmentation Offload */
++ unsigned int dma_debug; /* DMA Debug Registers */
++ unsigned int rss; /* Receive Side Scaling */
++ unsigned int tc_cnt; /* Number of Traffic Classes */
++ unsigned int hash_table_size; /* Hash Table Size */
++ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
++
++ /* HW Feature Register2 */
++ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
++ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
++ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
++ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
++ unsigned int pps_out_num; /* Number of PPS outputs */
++ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
++};
++
++struct axgbe_version_data {
++ enum axgbe_xpcs_access xpcs_access;
++ unsigned int mmc_64bit;
++ unsigned int tx_max_fifo_size;
++ unsigned int rx_max_fifo_size;
++ unsigned int tx_tstamp_workaround;
++ unsigned int ecc_support;
++ unsigned int i2c_support;
++};
++
+ /*
+ * Structure to store private data for each port.
+ */
+@@ -140,6 +397,98 @@ struct axgbe_port {
+ struct rte_eth_dev *eth_dev;
+ /* Pci dev info */
+ const struct rte_pci_device *pci_dev;
++ /* Version related data */
++ struct axgbe_version_data *vdata;
++
++ /* AXGMAC/XPCS related mmio registers */
++ uint64_t xgmac_regs; /* AXGMAC CSRs */
++ uint64_t xpcs_regs; /* XPCS MMD registers */
++ uint64_t xprop_regs; /* AXGBE property registers */
++ uint64_t xi2c_regs; /* AXGBE I2C CSRs */
++
++ /* XPCS indirect addressing lock */
++ unsigned int xpcs_window_def_reg;
++ unsigned int xpcs_window_sel_reg;
++ unsigned int xpcs_window;
++ unsigned int xpcs_window_size;
++ unsigned int xpcs_window_mask;
++
++ /* Flags representing axgbe_state */
++ unsigned long dev_state;
++
++ struct axgbe_hw_if hw_if;
++
++ /* AXI DMA settings */
++ unsigned int coherent;
++ unsigned int axdomain;
++ unsigned int arcache;
++ unsigned int awcache;
++
++ unsigned int tx_max_channel_count;
++ unsigned int rx_max_channel_count;
++ unsigned int channel_count;
++ unsigned int tx_ring_count;
++ unsigned int tx_desc_count;
++ unsigned int rx_ring_count;
++ unsigned int rx_desc_count;
++
++ unsigned int tx_max_q_count;
++ unsigned int rx_max_q_count;
++ unsigned int tx_q_count;
++ unsigned int rx_q_count;
++
++ /* Tx/Rx common settings */
++ unsigned int pblx8;
++
++ /* Tx settings */
++ unsigned int tx_sf_mode;
++ unsigned int tx_threshold;
++ unsigned int tx_pbl;
++ unsigned int tx_osp_mode;
++ unsigned int tx_max_fifo_size;
++
++ /* Rx settings */
++ unsigned int rx_sf_mode;
++ unsigned int rx_threshold;
++ unsigned int rx_pbl;
++ unsigned int rx_max_fifo_size;
++ unsigned int rx_buf_size;
++
++ /* Device clocks */
++ unsigned long sysclk_rate;
++ unsigned long ptpclk_rate;
++
++ /* Keeps track of power mode */
++ unsigned int power_down;
++
++ /* Current PHY settings */
++ int phy_link;
++ int phy_speed;
++
++ pthread_mutex_t xpcs_mutex;
++ pthread_mutex_t i2c_mutex;
++ pthread_mutex_t an_mutex;
++ pthread_mutex_t phy_mutex;
++
++ /* Flow control settings */
++ unsigned int pause_autoneg;
++ unsigned int tx_pause;
++ unsigned int rx_pause;
++ unsigned int rx_rfa[AXGBE_MAX_QUEUES];
++ unsigned int rx_rfd[AXGBE_MAX_QUEUES];
++ unsigned int fifo;
++
++ /* Receive Side Scaling settings */
++ u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE];
++ uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE];
++ uint32_t rss_options;
++ int rss_enable;
++
++ /* Hardware features of the device */
++ struct axgbe_hw_features hw_feat;
++
++ struct ether_addr mac_addr;
+ };
+
++void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+ #endif /* RTE_ETH_AXGBE_H_ */
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-05-18-net-axgbe-add-phy-initialization-and-related-apis.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-05-18-net-axgbe-add-phy-initialization-and-related-apis.patch
new file mode 100644
index 00000000..e5389e4c
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-05-18-net-axgbe-add-phy-initialization-and-related-apis.patch
@@ -0,0 +1,1974 @@
+From patchwork Fri Mar 9 08:42:21 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 05/18] net/axgbe: add phy initialization and related apis
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35824
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-5-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:21 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/Makefile | 3 +
+ drivers/net/axgbe/axgbe_dev.c | 188 +++++++++
+ drivers/net/axgbe/axgbe_ethdev.c | 11 +
+ drivers/net/axgbe/axgbe_ethdev.h | 172 ++++++++
+ drivers/net/axgbe/axgbe_i2c.c | 453 +++++++++++++++++++++
+ drivers/net/axgbe/axgbe_mdio.c | 203 ++++++++++
+ drivers/net/axgbe/axgbe_phy_impl.c | 799 +++++++++++++++++++++++++++++++++++++
+ 7 files changed, 1829 insertions(+)
+ create mode 100644 drivers/net/axgbe/axgbe_i2c.c
+ create mode 100644 drivers/net/axgbe/axgbe_mdio.c
+ create mode 100644 drivers/net/axgbe/axgbe_phy_impl.c
+
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index ce2485d..a8f3358 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -143,5 +143,8 @@ LIBABIVER := 1
+ #
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
+index 8bf3b82..90a99c4 100644
+--- a/drivers/net/axgbe/axgbe_dev.c
++++ b/drivers/net/axgbe/axgbe_dev.c
+@@ -129,6 +129,187 @@
+ #include "axgbe_common.h"
+ #include "axgbe_phy.h"
+
++/* query busy bit */
++static int mdio_complete(struct axgbe_port *pdata)
++{
++ if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
++ return 1;
++
++ return 0;
++}
++
++static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
++ int reg, u16 val)
++{
++ unsigned int mdio_sca, mdio_sccd;
++ uint64_t timeout;
++
++ mdio_sca = 0;
++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
++
++ mdio_sccd = 0;
++ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
++ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
++ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
++ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
++
++ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
++ while (time_before(rte_get_timer_cycles(), timeout)) {
++ rte_delay_us(100);
++ if (mdio_complete(pdata))
++ return 0;
++ }
++
++ PMD_DRV_LOG(ERR, "Mdio write operation timed out");
++ return -ETIMEDOUT;
++}
++
++static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
++ int reg)
++{
++ unsigned int mdio_sca, mdio_sccd;
++ uint64_t timeout;
++
++ mdio_sca = 0;
++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
++ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
++
++ mdio_sccd = 0;
++ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
++ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
++ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
++
++ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
++
++ while (time_before(rte_get_timer_cycles(), timeout)) {
++ rte_delay_us(100);
++ if (mdio_complete(pdata))
++ goto success;
++ }
++
++ PMD_DRV_LOG(ERR, "Mdio read operation timed out");
++ return -ETIMEDOUT;
++
++success:
++ return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
++}
++
++static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
++ enum axgbe_mdio_mode mode)
++{
++ unsigned int reg_val = 0;
++
++ switch (mode) {
++ case AXGBE_MDIO_MODE_CL22:
++ if (port > AXGMAC_MAX_C22_PORT)
++ return -EINVAL;
++ reg_val |= (1 << port);
++ break;
++ case AXGBE_MDIO_MODE_CL45:
++ break;
++ default:
++ return -EINVAL;
++ }
++ AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
++
++ return 0;
++}
++
++static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
++ int prtad __rte_unused, int mmd_reg)
++{
++ unsigned int mmd_address, index, offset;
++ int mmd_data;
++
++ if (mmd_reg & MII_ADDR_C45)
++ mmd_address = mmd_reg & ~MII_ADDR_C45;
++ else
++ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
++
++ /* The PCS registers are accessed using mmio. The underlying
++ * management interface uses indirect addressing to access the MMD
++ * register sets. This requires accessing of the PCS register in two
++ * phases, an address phase and a data phase.
++ *
++ * The mmio interface is based on 16-bit offsets and values. All
++ * register offsets must therefore be adjusted by left shifting the
++ * offset 1 bit and reading 16 bits of data.
++ */
++ mmd_address <<= 1;
++ index = mmd_address & ~pdata->xpcs_window_mask;
++ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
++
++ pthread_mutex_lock(&pdata->xpcs_mutex);
++
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ mmd_data = XPCS16_IOREAD(pdata, offset);
++
++ pthread_mutex_unlock(&pdata->xpcs_mutex);
++
++ return mmd_data;
++}
++
++static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
++ int prtad __rte_unused,
++ int mmd_reg, int mmd_data)
++{
++ unsigned int mmd_address, index, offset;
++
++ if (mmd_reg & MII_ADDR_C45)
++ mmd_address = mmd_reg & ~MII_ADDR_C45;
++ else
++ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
++
++ /* The PCS registers are accessed using mmio. The underlying
++ * management interface uses indirect addressing to access the MMD
++ * register sets. This requires accessing of the PCS register in two
++ * phases, an address phase and a data phase.
++ *
++ * The mmio interface is based on 16-bit offsets and values. All
++ * register offsets must therefore be adjusted by left shifting the
++ * offset 1 bit and writing 16 bits of data.
++ */
++ mmd_address <<= 1;
++ index = mmd_address & ~pdata->xpcs_window_mask;
++ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
++
++ pthread_mutex_lock(&pdata->xpcs_mutex);
++
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ XPCS16_IOWRITE(pdata, offset, mmd_data);
++
++ pthread_mutex_unlock(&pdata->xpcs_mutex);
++}
++
++static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
++ int mmd_reg)
++{
++ switch (pdata->vdata->xpcs_access) {
++ case AXGBE_XPCS_ACCESS_V1:
++ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported ");
++ return -1;
++ case AXGBE_XPCS_ACCESS_V2:
++ default:
++ return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
++ }
++}
++
++static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
++ int mmd_reg, int mmd_data)
++{
++ switch (pdata->vdata->xpcs_access) {
++ case AXGBE_XPCS_ACCESS_V1:
++ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported ");
++ return;
++ case AXGBE_XPCS_ACCESS_V2:
++ default:
++ return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
++ }
++}
++
+ static int __axgbe_exit(struct axgbe_port *pdata)
+ {
+ unsigned int count = 2000;
+@@ -164,4 +345,11 @@ static int axgbe_exit(struct axgbe_port *pdata)
+ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+ {
+ hw_if->exit = axgbe_exit;
++
++ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
++ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
++
++ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
++ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
++ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+ }
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 786d929..0dc424d 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -147,6 +147,7 @@ static const struct rte_pci_id pci_id_axgbe_map[] = {
+ };
+
+ static struct axgbe_version_data axgbe_v2a = {
++ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+@@ -157,6 +158,7 @@ static struct axgbe_version_data axgbe_v2a = {
+ };
+
+ static struct axgbe_version_data axgbe_v2b = {
++ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+@@ -271,6 +273,9 @@ static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+ static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
+ {
+ axgbe_init_function_ptrs_dev(&pdata->hw_if);
++ axgbe_init_function_ptrs_phy(&pdata->phy_if);
++ axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
++ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+ }
+
+ static void axgbe_set_counts(struct axgbe_port *pdata)
+@@ -458,6 +463,12 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+ pthread_mutex_init(&pdata->phy_mutex, NULL);
+
++ ret = pdata->phy_if.phy_init(pdata);
++ if (ret) {
++ rte_free(eth_dev->data->mac_addrs);
++ return ret;
++ }
++
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 5d20a1f..6cddb57 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -311,6 +311,61 @@ enum axgbe_mdio_mode {
+ AXGBE_MDIO_MODE_CL45,
+ };
+
++struct axgbe_phy {
++ uint32_t supported;
++ uint32_t advertising;
++ uint32_t lp_advertising;
++
++ int address;
++
++ int autoneg;
++ int speed;
++ int duplex;
++
++ int link;
++
++ int pause_autoneg;
++ int tx_pause;
++ int rx_pause;
++};
++
++enum axgbe_i2c_cmd {
++ AXGBE_I2C_CMD_READ = 0,
++ AXGBE_I2C_CMD_WRITE,
++};
++
++struct axgbe_i2c_op {
++ enum axgbe_i2c_cmd cmd;
++
++ unsigned int target;
++
++ uint8_t *buf;
++ unsigned int len;
++};
++
++struct axgbe_i2c_op_state {
++ struct axgbe_i2c_op *op;
++
++ unsigned int tx_len;
++ unsigned char *tx_buf;
++
++ unsigned int rx_len;
++ unsigned char *rx_buf;
++
++ unsigned int tx_abort_source;
++
++ int ret;
++};
++
++struct axgbe_i2c {
++ unsigned int started;
++ unsigned int max_speed_mode;
++ unsigned int rx_fifo_size;
++ unsigned int tx_fifo_size;
++
++ struct axgbe_i2c_op_state op_state;
++};
++
+ struct axgbe_hw_if {
+ void (*config_flow_control)(struct axgbe_port *);
+ int (*config_rx_mode)(struct axgbe_port *);
+@@ -333,6 +388,89 @@ struct axgbe_hw_if {
+ int (*exit)(struct axgbe_port *);
+ };
+
++/* This structure represents implementation specific routines for an
++ * implementation of a PHY. All routines are required unless noted below.
++ * Optional routines:
++ * kr_training_pre, kr_training_post
++ */
++struct axgbe_phy_impl_if {
++ /* Perform Setup/teardown actions */
++ int (*init)(struct axgbe_port *);
++ void (*exit)(struct axgbe_port *);
++
++ /* Perform start/stop specific actions */
++ int (*reset)(struct axgbe_port *);
++ int (*start)(struct axgbe_port *);
++ void (*stop)(struct axgbe_port *);
++
++ /* Return the link status */
++ int (*link_status)(struct axgbe_port *, int *);
++
++ /* Indicate if a particular speed is valid */
++ int (*valid_speed)(struct axgbe_port *, int);
++
++ /* Check if the specified mode can/should be used */
++ bool (*use_mode)(struct axgbe_port *, enum axgbe_mode);
++ /* Switch the PHY into various modes */
++ void (*set_mode)(struct axgbe_port *, enum axgbe_mode);
++ /* Retrieve mode needed for a specific speed */
++ enum axgbe_mode (*get_mode)(struct axgbe_port *, int);
++ /* Retrieve new/next mode when trying to auto-negotiate */
++ enum axgbe_mode (*switch_mode)(struct axgbe_port *);
++ /* Retrieve current mode */
++ enum axgbe_mode (*cur_mode)(struct axgbe_port *);
++
++ /* Retrieve current auto-negotiation mode */
++ enum axgbe_an_mode (*an_mode)(struct axgbe_port *);
++
++ /* Configure auto-negotiation settings */
++ int (*an_config)(struct axgbe_port *);
++
++ /* Set/override auto-negotiation advertisement settings */
++ unsigned int (*an_advertising)(struct axgbe_port *port);
++
++ /* Process results of auto-negotiation */
++ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
++
++ /* Pre/Post KR training enablement support */
++ void (*kr_training_pre)(struct axgbe_port *);
++ void (*kr_training_post)(struct axgbe_port *);
++};
++
++struct axgbe_phy_if {
++ /* For PHY setup/teardown */
++ int (*phy_init)(struct axgbe_port *);
++ void (*phy_exit)(struct axgbe_port *);
++
++ /* For PHY support when setting device up/down */
++ int (*phy_reset)(struct axgbe_port *);
++ int (*phy_start)(struct axgbe_port *);
++ void (*phy_stop)(struct axgbe_port *);
++
++ /* For PHY support while device is up */
++ void (*phy_status)(struct axgbe_port *);
++ int (*phy_config_aneg)(struct axgbe_port *);
++
++ /* For PHY settings validation */
++ int (*phy_valid_speed)(struct axgbe_port *, int);
++ /* For single interrupt support */
++ void (*an_isr)(struct axgbe_port *);
++ /* PHY implementation specific services */
++ struct axgbe_phy_impl_if phy_impl;
++};
++
++struct axgbe_i2c_if {
++ /* For initial I2C setup */
++ int (*i2c_init)(struct axgbe_port *);
++
++ /* For I2C support when setting device up/down */
++ int (*i2c_start)(struct axgbe_port *);
++ void (*i2c_stop)(struct axgbe_port *);
++
++ /* For performing I2C operations */
++ int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *);
++};
++
+ /* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+@@ -380,6 +518,7 @@ struct axgbe_hw_features {
+ };
+
+ struct axgbe_version_data {
++ void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *);
+ enum axgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+@@ -417,6 +556,8 @@ struct axgbe_port {
+ unsigned long dev_state;
+
+ struct axgbe_hw_if hw_if;
++ struct axgbe_phy_if phy_if;
++ struct axgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+@@ -488,7 +629,38 @@ struct axgbe_port {
+ struct axgbe_hw_features hw_feat;
+
+ struct ether_addr mac_addr;
++
++ /* MDIO/PHY related settings */
++ unsigned int phy_started;
++ void *phy_data;
++ struct axgbe_phy phy;
++ int mdio_mmd;
++ unsigned long link_check;
++ volatile int mdio_completion;
++
++ unsigned int kr_redrv;
++
++ /* Auto-negotiation atate machine support */
++ unsigned int an_int;
++ unsigned int an_status;
++ enum axgbe_an an_result;
++ enum axgbe_an an_state;
++ enum axgbe_rx kr_state;
++ enum axgbe_rx kx_state;
++ unsigned int an_supported;
++ unsigned int parallel_detect;
++ unsigned int fec_ability;
++ unsigned long an_start;
++ enum axgbe_an_mode an_mode;
++
++ /* I2C support */
++ struct axgbe_i2c i2c;
++ volatile int i2c_complete;
+ };
+
+ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
++void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if);
++void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if);
++void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if);
++
+ #endif /* RTE_ETH_AXGBE_H_ */
+diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
+new file mode 100644
+index 0000000..468955e
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_i2c.c
+@@ -0,0 +1,453 @@
++/*-
++ * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_common.h"
++
++#define AXGBE_ABORT_COUNT 500
++#define AXGBE_DISABLE_COUNT 1000
++
++#define AXGBE_STD_SPEED 1
++
++#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
++#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
++#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
++#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
++#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \
++ AXGBE_INTR_TX_EMPTY | \
++ AXGBE_INTR_TX_ABRT | \
++ AXGBE_INTR_STOP_DET)
++
++#define AXGBE_I2C_READ BIT(8)
++#define AXGBE_I2C_STOP BIT(9)
++
++static int axgbe_i2c_abort(struct axgbe_port *pdata)
++{
++ unsigned int wait = AXGBE_ABORT_COUNT;
++
++ /* Must be enabled to recognize the abort request */
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
++
++ /* Issue the abort */
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
++
++ while (wait--) {
++ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
++ return 0;
++ rte_delay_us(500);
++ }
++
++ return -EBUSY;
++}
++
++static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable)
++{
++ unsigned int wait = AXGBE_DISABLE_COUNT;
++ unsigned int mode = enable ? 1 : 0;
++
++ while (wait--) {
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
++ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
++ return 0;
++
++ rte_delay_us(100);
++ }
++
++ return -EBUSY;
++}
++
++static int axgbe_i2c_disable(struct axgbe_port *pdata)
++{
++ unsigned int ret;
++
++ ret = axgbe_i2c_set_enable(pdata, false);
++ if (ret) {
++ /* Disable failed, try an abort */
++ ret = axgbe_i2c_abort(pdata);
++ if (ret)
++ return ret;
++
++ /* Abort succeeded, try to disable again */
++ ret = axgbe_i2c_set_enable(pdata, false);
++ }
++
++ return ret;
++}
++
++static int axgbe_i2c_enable(struct axgbe_port *pdata)
++{
++ return axgbe_i2c_set_enable(pdata, true);
++}
++
++static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata)
++{
++ XI2C_IOREAD(pdata, IC_CLR_INTR);
++}
++
++static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata)
++{
++ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
++}
++
++static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata)
++{
++ XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK);
++}
++
++static void axgbe_i2c_write(struct axgbe_port *pdata)
++{
++ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int tx_slots;
++ unsigned int cmd;
++
++ /* Configured to never receive Rx overflows, so fill up Tx fifo */
++ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
++ while (tx_slots && state->tx_len) {
++ if (state->op->cmd == AXGBE_I2C_CMD_READ)
++ cmd = AXGBE_I2C_READ;
++ else
++ cmd = *state->tx_buf++;
++
++ if (state->tx_len == 1)
++ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
++
++ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
++
++ tx_slots--;
++ state->tx_len--;
++ }
++
++ /* No more Tx operations, so ignore TX_EMPTY and return */
++ if (!state->tx_len)
++ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
++}
++
++static void axgbe_i2c_read(struct axgbe_port *pdata)
++{
++ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int rx_slots;
++
++ /* Anything to be read? */
++ if (state->op->cmd != AXGBE_I2C_CMD_READ)
++ return;
++
++ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
++ while (rx_slots && state->rx_len) {
++ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
++ state->rx_len--;
++ rx_slots--;
++ }
++}
++
++static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata,
++ unsigned int isr)
++{
++ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
++
++ if (isr & AXGBE_INTR_TX_ABRT) {
++ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
++ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
++ }
++
++ if (isr & AXGBE_INTR_STOP_DET)
++ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
++}
++
++static int axgbe_i2c_isr(struct axgbe_port *pdata)
++{
++ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int isr;
++
++ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
++
++ axgbe_i2c_clear_isr_interrupts(pdata, isr);
++
++ if (isr & AXGBE_INTR_TX_ABRT) {
++ axgbe_i2c_disable_interrupts(pdata);
++
++ state->ret = -EIO;
++ goto out;
++ }
++
++ /* Check for data in the Rx fifo */
++ axgbe_i2c_read(pdata);
++
++ /* Fill up the Tx fifo next */
++ axgbe_i2c_write(pdata);
++
++out:
++ /* Complete on an error or STOP condition */
++ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
++ return 1;
++
++ return 0;
++}
++
++static void axgbe_i2c_set_mode(struct axgbe_port *pdata)
++{
++ unsigned int reg;
++
++ reg = XI2C_IOREAD(pdata, IC_CON);
++ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
++ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
++ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
++ XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED);
++ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
++ XI2C_IOWRITE(pdata, IC_CON, reg);
++}
++
++static void axgbe_i2c_get_features(struct axgbe_port *pdata)
++{
++ struct axgbe_i2c *i2c = &pdata->i2c;
++ unsigned int reg;
++
++ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
++ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ MAX_SPEED_MODE);
++ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ RX_BUFFER_DEPTH);
++ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ TX_BUFFER_DEPTH);
++}
++
++static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr)
++{
++ XI2C_IOWRITE(pdata, IC_TAR, addr);
++}
++
++static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
++{
++ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ int ret;
++ uint64_t timeout;
++
++ pthread_mutex_lock(&pdata->i2c_mutex);
++ ret = axgbe_i2c_disable(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "failed to disable i2c master");
++ return ret;
++ }
++
++ axgbe_i2c_set_target(pdata, op->target);
++
++ memset(state, 0, sizeof(*state));
++ state->op = op;
++ state->tx_len = op->len;
++ state->tx_buf = (unsigned char *)op->buf;
++ state->rx_len = op->len;
++ state->rx_buf = (unsigned char *)op->buf;
++
++ axgbe_i2c_clear_all_interrupts(pdata);
++ ret = axgbe_i2c_enable(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
++ return ret;
++ }
++
++ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
++ * fire and begin to process the command via the ISR.
++ */
++ axgbe_i2c_enable_interrupts(pdata);
++ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
++
++ while (time_before(rte_get_timer_cycles(), timeout)) {
++ rte_delay_us(100);
++ if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) {
++ if (axgbe_i2c_isr(pdata))
++ goto success;
++ }
++ }
++
++ PMD_DRV_LOG(ERR, "i2c operation timed out");
++ axgbe_i2c_disable_interrupts(pdata);
++ axgbe_i2c_disable(pdata);
++ ret = -ETIMEDOUT;
++ goto unlock;
++
++success:
++ ret = state->ret;
++ if (ret) {
++ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
++ ret = -ENOTCONN;
++ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
++ ret = -EAGAIN;
++ }
++
++unlock:
++ pthread_mutex_unlock(&pdata->i2c_mutex);
++ return ret;
++}
++
++static void axgbe_i2c_stop(struct axgbe_port *pdata)
++{
++ if (!pdata->i2c.started)
++ return;
++
++ pdata->i2c.started = 0;
++ axgbe_i2c_disable_interrupts(pdata);
++ axgbe_i2c_disable(pdata);
++ axgbe_i2c_clear_all_interrupts(pdata);
++}
++
++static int axgbe_i2c_start(struct axgbe_port *pdata)
++{
++ if (pdata->i2c.started)
++ return 0;
++
++ pdata->i2c.started = 1;
++
++ return 0;
++}
++
++static int axgbe_i2c_init(struct axgbe_port *pdata)
++{
++ int ret;
++
++ axgbe_i2c_disable_interrupts(pdata);
++
++ ret = axgbe_i2c_disable(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "failed to disable i2c master");
++ return ret;
++ }
++
++ axgbe_i2c_get_features(pdata);
++
++ axgbe_i2c_set_mode(pdata);
++
++ axgbe_i2c_clear_all_interrupts(pdata);
++
++ return 0;
++}
++
++void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if)
++{
++ i2c_if->i2c_init = axgbe_i2c_init;
++ i2c_if->i2c_start = axgbe_i2c_start;
++ i2c_if->i2c_stop = axgbe_i2c_stop;
++ i2c_if->i2c_xfer = axgbe_i2c_xfer;
++}
+diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
+new file mode 100644
+index 0000000..4fbf5c3
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_mdio.c
+@@ -0,0 +1,203 @@
++/*-
++ * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_common.h"
++#include "axgbe_phy.h"
++
++static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
++{
++ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
++ return SPEED_10000;
++ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
++ return SPEED_10000;
++ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
++ return SPEED_2500;
++ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
++ return SPEED_1000;
++ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
++ return SPEED_1000;
++ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
++ return SPEED_100;
++
++ return SPEED_UNKNOWN;
++}
++
++static int axgbe_phy_init(struct axgbe_port *pdata)
++{
++ int ret;
++
++ pdata->mdio_mmd = MDIO_MMD_PCS;
++
++ /* Check for FEC support */
++ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
++ MDIO_PMA_10GBR_FECABLE);
++ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
++ MDIO_PMA_10GBR_FECABLE_ERRABLE);
++
++ /* Setup the phy (including supported features) */
++ ret = pdata->phy_if.phy_impl.init(pdata);
++ if (ret)
++ return ret;
++ pdata->phy.advertising = pdata->phy.supported;
++
++ pdata->phy.address = 0;
++
++ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ } else {
++ pdata->phy.autoneg = AUTONEG_DISABLE;
++ pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata);
++ pdata->phy.duplex = DUPLEX_FULL;
++ }
++
++ pdata->phy.link = 0;
++
++ pdata->phy.pause_autoneg = pdata->pause_autoneg;
++ pdata->phy.tx_pause = pdata->tx_pause;
++ pdata->phy.rx_pause = pdata->rx_pause;
++
++ /* Fix up Flow Control advertising */
++ pdata->phy.advertising &= ~ADVERTISED_Pause;
++ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
++
++ if (pdata->rx_pause) {
++ pdata->phy.advertising |= ADVERTISED_Pause;
++ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
++ }
++
++ if (pdata->tx_pause)
++ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
++ return 0;
++}
++
++void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
++{
++ phy_if->phy_init = axgbe_phy_init;
++}
+diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
+new file mode 100644
+index 0000000..cea4266
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -0,0 +1,799 @@
++/*-
++ * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_common.h"
++#include "axgbe_phy.h"
++
++#define AXGBE_PHY_PORT_SPEED_100 BIT(0)
++#define AXGBE_PHY_PORT_SPEED_1000 BIT(1)
++#define AXGBE_PHY_PORT_SPEED_2500 BIT(2)
++#define AXGBE_PHY_PORT_SPEED_10000 BIT(3)
++
++#define AXGBE_MUTEX_RELEASE 0x80000000
++
++#define AXGBE_SFP_DIRECT 7
++
++/* I2C target addresses */
++#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50
++#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51
++#define AXGBE_SFP_PHY_ADDRESS 0x56
++#define AXGBE_GPIO_ADDRESS_PCA9555 0x20
++
++/* SFP sideband signal indicators */
++#define AXGBE_GPIO_NO_TX_FAULT BIT(0)
++#define AXGBE_GPIO_NO_RATE_SELECT BIT(1)
++#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2)
++#define AXGBE_GPIO_NO_RX_LOS BIT(3)
++
++/* Rate-change complete wait/retry count */
++#define AXGBE_RATECHANGE_COUNT 500
++
++enum axgbe_port_mode {
++ AXGBE_PORT_MODE_RSVD = 0,
++ AXGBE_PORT_MODE_BACKPLANE,
++ AXGBE_PORT_MODE_BACKPLANE_2500,
++ AXGBE_PORT_MODE_1000BASE_T,
++ AXGBE_PORT_MODE_1000BASE_X,
++ AXGBE_PORT_MODE_NBASE_T,
++ AXGBE_PORT_MODE_10GBASE_T,
++ AXGBE_PORT_MODE_10GBASE_R,
++ AXGBE_PORT_MODE_SFP,
++ AXGBE_PORT_MODE_MAX,
++};
++
++enum axgbe_conn_type {
++ AXGBE_CONN_TYPE_NONE = 0,
++ AXGBE_CONN_TYPE_SFP,
++ AXGBE_CONN_TYPE_MDIO,
++ AXGBE_CONN_TYPE_RSVD1,
++ AXGBE_CONN_TYPE_BACKPLANE,
++ AXGBE_CONN_TYPE_MAX,
++};
++
++/* SFP/SFP+ related definitions */
++enum axgbe_sfp_comm {
++ AXGBE_SFP_COMM_DIRECT = 0,
++ AXGBE_SFP_COMM_PCA9545,
++};
++
++enum axgbe_sfp_cable {
++ AXGBE_SFP_CABLE_UNKNOWN = 0,
++ AXGBE_SFP_CABLE_ACTIVE,
++ AXGBE_SFP_CABLE_PASSIVE,
++};
++
++enum axgbe_sfp_base {
++ AXGBE_SFP_BASE_UNKNOWN = 0,
++ AXGBE_SFP_BASE_1000_T,
++ AXGBE_SFP_BASE_1000_SX,
++ AXGBE_SFP_BASE_1000_LX,
++ AXGBE_SFP_BASE_1000_CX,
++ AXGBE_SFP_BASE_10000_SR,
++ AXGBE_SFP_BASE_10000_LR,
++ AXGBE_SFP_BASE_10000_LRM,
++ AXGBE_SFP_BASE_10000_ER,
++ AXGBE_SFP_BASE_10000_CR,
++};
++
++enum axgbe_sfp_speed {
++ AXGBE_SFP_SPEED_UNKNOWN = 0,
++ AXGBE_SFP_SPEED_100_1000,
++ AXGBE_SFP_SPEED_1000,
++ AXGBE_SFP_SPEED_10000,
++};
++
++/* SFP Serial ID Base ID values relative to an offset of 0 */
++#define AXGBE_SFP_BASE_ID 0
++#define AXGBE_SFP_ID_SFP 0x03
++
++#define AXGBE_SFP_BASE_EXT_ID 1
++#define AXGBE_SFP_EXT_ID_SFP 0x04
++
++#define AXGBE_SFP_BASE_10GBE_CC 3
++#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4)
++#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5)
++#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
++#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7)
++
++#define AXGBE_SFP_BASE_1GBE_CC 6
++#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0)
++#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1)
++#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2)
++#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3)
++
++#define AXGBE_SFP_BASE_CABLE 8
++#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
++#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
++
++#define AXGBE_SFP_BASE_BR 12
++#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a
++#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d
++#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64
++#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68
++
++#define AXGBE_SFP_BASE_CU_CABLE_LEN 18
++
++#define AXGBE_SFP_BASE_VENDOR_NAME 20
++#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16
++#define AXGBE_SFP_BASE_VENDOR_PN 40
++#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16
++#define AXGBE_SFP_BASE_VENDOR_REV 56
++#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4
++
++#define AXGBE_SFP_BASE_CC 63
++
++/* SFP Serial ID Extended ID values relative to an offset of 64 */
++#define AXGBE_SFP_BASE_VENDOR_SN 4
++#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16
++
++#define AXGBE_SFP_EXTD_DIAG 28
++#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
++
++#define AXGBE_SFP_EXTD_SFF_8472 30
++
++#define AXGBE_SFP_EXTD_CC 31
++
++struct axgbe_sfp_eeprom {
++ u8 base[64];
++ u8 extd[32];
++ u8 vendor[32];
++};
++
++#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE"
++#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06"
++
++struct axgbe_sfp_ascii {
++ union {
++ char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
++ char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1];
++ char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1];
++ char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1];
++ } u;
++};
++
++/* MDIO PHY reset types */
++enum axgbe_mdio_reset {
++ AXGBE_MDIO_RESET_NONE = 0,
++ AXGBE_MDIO_RESET_I2C_GPIO,
++ AXGBE_MDIO_RESET_INT_GPIO,
++ AXGBE_MDIO_RESET_MAX,
++};
++
++/* Re-driver related definitions */
++enum axgbe_phy_redrv_if {
++ AXGBE_PHY_REDRV_IF_MDIO = 0,
++ AXGBE_PHY_REDRV_IF_I2C,
++ AXGBE_PHY_REDRV_IF_MAX,
++};
++
++enum axgbe_phy_redrv_model {
++ AXGBE_PHY_REDRV_MODEL_4223 = 0,
++ AXGBE_PHY_REDRV_MODEL_4227,
++ AXGBE_PHY_REDRV_MODEL_MAX,
++};
++
++enum axgbe_phy_redrv_mode {
++ AXGBE_PHY_REDRV_MODE_CX = 5,
++ AXGBE_PHY_REDRV_MODE_SR = 9,
++};
++
++#define AXGBE_PHY_REDRV_MODE_REG 0x12b0
++
++/* PHY related configuration information */
++struct axgbe_phy_data {
++ enum axgbe_port_mode port_mode;
++
++ unsigned int port_id;
++
++ unsigned int port_speeds;
++
++ enum axgbe_conn_type conn_type;
++
++ enum axgbe_mode cur_mode;
++ enum axgbe_mode start_mode;
++
++ unsigned int rrc_count;
++
++ unsigned int mdio_addr;
++
++ unsigned int comm_owned;
++
++ /* SFP Support */
++ enum axgbe_sfp_comm sfp_comm;
++ unsigned int sfp_mux_address;
++ unsigned int sfp_mux_channel;
++
++ unsigned int sfp_gpio_address;
++ unsigned int sfp_gpio_mask;
++ unsigned int sfp_gpio_rx_los;
++ unsigned int sfp_gpio_tx_fault;
++ unsigned int sfp_gpio_mod_absent;
++ unsigned int sfp_gpio_rate_select;
++
++ unsigned int sfp_rx_los;
++ unsigned int sfp_tx_fault;
++ unsigned int sfp_mod_absent;
++ unsigned int sfp_diags;
++ unsigned int sfp_changed;
++ unsigned int sfp_phy_avail;
++ unsigned int sfp_cable_len;
++ enum axgbe_sfp_base sfp_base;
++ enum axgbe_sfp_cable sfp_cable;
++ enum axgbe_sfp_speed sfp_speed;
++ struct axgbe_sfp_eeprom sfp_eeprom;
++
++ /* External PHY support */
++ enum axgbe_mdio_mode phydev_mode;
++ enum axgbe_mdio_reset mdio_reset;
++ unsigned int mdio_reset_addr;
++ unsigned int mdio_reset_gpio;
++
++ /* Re-driver support */
++ unsigned int redrv;
++ unsigned int redrv_if;
++ unsigned int redrv_addr;
++ unsigned int redrv_lane;
++ unsigned int redrv_model;
++};
++
++static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ reg = XP_IOREAD(pdata, XP_PROP_3);
++
++ phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 +
++ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
++
++ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
++
++ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_RX_LOS);
++ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_TX_FAULT);
++ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_MOD_ABS);
++ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_RATE_SELECT);
++}
++
++static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg, mux_addr_hi, mux_addr_lo;
++
++ reg = XP_IOREAD(pdata, XP_PROP_4);
++
++ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
++ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
++ if (mux_addr_lo == AXGBE_SFP_DIRECT)
++ return;
++
++ phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545;
++ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
++ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
++}
++
++static void axgbe_phy_sfp_setup(struct axgbe_port *pdata)
++{
++ axgbe_phy_sfp_comm_setup(pdata);
++ axgbe_phy_sfp_gpio_setup(pdata);
++}
++
++static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data)
++{
++ if (!phy_data->redrv)
++ return false;
++
++ if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX)
++ return true;
++
++ switch (phy_data->redrv_model) {
++ case AXGBE_PHY_REDRV_MODEL_4223:
++ if (phy_data->redrv_lane > 3)
++ return true;
++ break;
++ case AXGBE_PHY_REDRV_MODEL_4227:
++ if (phy_data->redrv_lane > 1)
++ return true;
++ break;
++ default:
++ return true;
++ }
++
++ return false;
++}
++
++static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO)
++ return 0;
++ reg = XP_IOREAD(pdata, XP_PROP_3);
++ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
++ switch (phy_data->mdio_reset) {
++ case AXGBE_MDIO_RESET_NONE:
++ case AXGBE_MDIO_RESET_I2C_GPIO:
++ case AXGBE_MDIO_RESET_INT_GPIO:
++ break;
++ default:
++ PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
++ phy_data->mdio_reset);
++ return -EINVAL;
++ }
++ if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) {
++ phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 +
++ XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_I2C_ADDR);
++ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_I2C_GPIO);
++ } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) {
++ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_INT_GPIO);
++ }
++
++ return 0;
++}
++
++static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)
++ return false;
++ break;
++ case AXGBE_PORT_MODE_1000BASE_T:
++ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000))
++ return false;
++ break;
++ case AXGBE_PORT_MODE_1000BASE_X:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
++ return false;
++ break;
++ case AXGBE_PORT_MODE_NBASE_T:
++ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500))
++ return false;
++ break;
++ case AXGBE_PORT_MODE_10GBASE_T:
++ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ case AXGBE_PORT_MODE_10GBASE_R:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
++ return false;
++ break;
++ case AXGBE_PORT_MODE_SFP:
++ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ default:
++ break;
++ }
++
++ return true;
++}
++
++static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE)
++ return false;
++ break;
++ case AXGBE_PORT_MODE_1000BASE_T:
++ case AXGBE_PORT_MODE_1000BASE_X:
++ case AXGBE_PORT_MODE_NBASE_T:
++ case AXGBE_PORT_MODE_10GBASE_T:
++ case AXGBE_PORT_MODE_10GBASE_R:
++ if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO)
++ return false;
++ break;
++ case AXGBE_PORT_MODE_SFP:
++ if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP)
++ return false;
++ break;
++ default:
++ break;
++ }
++
++ return true;
++}
++
++static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
++{
++ unsigned int reg;
++
++ reg = XP_IOREAD(pdata, XP_PROP_0);
++ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
++ return false;
++ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
++ return false;
++
++ return true;
++}
++
++static int axgbe_phy_init(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data;
++ unsigned int reg;
++ int ret;
++
++ /* Check if enabled */
++ if (!axgbe_phy_port_enabled(pdata)) {
++ PMD_DRV_LOG(ERR, "device is not enabled");
++ return -ENODEV;
++ }
++
++ /* Initialize the I2C controller */
++ ret = pdata->i2c_if.i2c_init(pdata);
++ if (ret)
++ return ret;
++
++ phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
++ if (!phy_data) {
++ PMD_DRV_LOG(ERR, "phy_data allocation failed");
++ return -ENOMEM;
++ }
++ pdata->phy_data = phy_data;
++
++ reg = XP_IOREAD(pdata, XP_PROP_0);
++ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
++ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
++ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
++ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
++ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
++
++ reg = XP_IOREAD(pdata, XP_PROP_4);
++ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
++ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
++ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
++ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
++ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
++
++ /* Validate the connection requested */
++ if (axgbe_phy_conn_type_mismatch(pdata)) {
++ PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)",
++ phy_data->port_mode, phy_data->conn_type);
++ return -EINVAL;
++ }
++
++ /* Validate the mode requested */
++ if (axgbe_phy_port_mode_mismatch(pdata)) {
++ PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)",
++ phy_data->port_mode, phy_data->port_speeds);
++ return -EINVAL;
++ }
++
++ /* Check for and validate MDIO reset support */
++ ret = axgbe_phy_mdio_reset_setup(pdata);
++ if (ret)
++ return ret;
++
++ /* Validate the re-driver information */
++ if (axgbe_phy_redrv_error(phy_data)) {
++ PMD_DRV_LOG(ERR, "phy re-driver settings error");
++ return -EINVAL;
++ }
++ pdata->kr_redrv = phy_data->redrv;
++
++ /* Indicate current mode is unknown */
++ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
++
++ /* Initialize supported features */
++ pdata->phy.supported = 0;
++
++ switch (phy_data->port_mode) {
++ /* Backplane support */
++ case AXGBE_PORT_MODE_BACKPLANE:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_Backplane;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
++ phy_data->start_mode = AXGBE_MODE_KX_1000;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |=
++ SUPPORTED_10000baseR_FEC;
++ phy_data->start_mode = AXGBE_MODE_KR;
++ }
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
++ break;
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_Backplane;
++ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ phy_data->start_mode = AXGBE_MODE_KX_2500;
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
++ break;
++
++ /* MDIO 1GBase-T support */
++ case AXGBE_PORT_MODE_1000BASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
++ }
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
++ break;
++
++ /* MDIO Base-X support */
++ case AXGBE_PORT_MODE_1000BASE_X:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_X;
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
++ break;
++
++ /* MDIO NBase-T support */
++ case AXGBE_PORT_MODE_NBASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) {
++ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ phy_data->start_mode = AXGBE_MODE_KX_2500;
++ }
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45;
++ break;
++
++ /* 10GBase-T support */
++ case AXGBE_PORT_MODE_10GBASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_KR;
++ }
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
++ break;
++
++ /* 10GBase-R support */
++ case AXGBE_PORT_MODE_10GBASE_R:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
++ phy_data->start_mode = AXGBE_MODE_SFI;
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
++ break;
++
++ /* SFP support */
++ case AXGBE_PORT_MODE_SFP:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ phy_data->start_mode = AXGBE_MODE_SFI;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |=
++ SUPPORTED_10000baseR_FEC;
++ }
++
++ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
++
++ axgbe_phy_sfp_setup(pdata);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) &&
++ (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) {
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
++ phy_data->phydev_mode);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)",
++ phy_data->mdio_addr, phy_data->phydev_mode);
++ return -EINVAL;
++ }
++ }
++
++ if (phy_data->redrv && !phy_data->redrv_if) {
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
++ AXGBE_MDIO_MODE_CL22);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)",
++ phy_data->redrv_addr);
++ return -EINVAL;
++ }
++ }
++ return 0;
++}
++void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
++{
++ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
++
++ phy_impl->init = axgbe_phy_init;
++}
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-06-18-net-axgbe-add-phy-programming-apis.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-06-18-net-axgbe-add-phy-programming-apis.patch
new file mode 100644
index 00000000..3202f23a
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-06-18-net-axgbe-add-phy-programming-apis.patch
@@ -0,0 +1,2468 @@
+From patchwork Fri Mar 9 08:42:22 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,06/18] net/axgbe: add phy programming apis
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35825
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-6-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:22 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_dev.c | 27 +
+ drivers/net/axgbe/axgbe_mdio.c | 963 +++++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_phy_impl.c | 1397 ++++++++++++++++++++++++++++++++++++
+ 3 files changed, 2387 insertions(+)
+
+diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
+index 90a99c4..528241e 100644
+--- a/drivers/net/axgbe/axgbe_dev.c
++++ b/drivers/net/axgbe/axgbe_dev.c
+@@ -310,6 +310,30 @@ static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
+ }
+ }
+
++static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
++{
++ unsigned int ss;
++
++ switch (speed) {
++ case SPEED_1000:
++ ss = 0x03;
++ break;
++ case SPEED_2500:
++ ss = 0x02;
++ break;
++ case SPEED_10000:
++ ss = 0x00;
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
++ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
++
++ return 0;
++}
++
+ static int __axgbe_exit(struct axgbe_port *pdata)
+ {
+ unsigned int count = 2000;
+@@ -346,9 +370,12 @@ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+ {
+ hw_if->exit = axgbe_exit;
+
++
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+
++ hw_if->set_speed = axgbe_set_speed;
++
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
+index 4fbf5c3..753dde9 100644
+--- a/drivers/net/axgbe/axgbe_mdio.c
++++ b/drivers/net/axgbe/axgbe_mdio.c
+@@ -129,6 +129,963 @@
+ #include "axgbe_common.h"
+ #include "axgbe_phy.h"
+
++static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata)
++{
++ int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
++ reg &= ~AXGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
++}
++
++static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata)
++{
++ int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
++ reg &= ~AXGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
++ reg &= ~AXGBE_PCS_CL37_BP;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
++}
++
++static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata)
++{
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
++}
++
++static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata)
++{
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
++}
++
++static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata)
++{
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
++ AXGBE_AN_CL73_INT_MASK);
++}
++
++static void axgbe_an_enable_interrupts(struct axgbe_port *pdata)
++{
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ axgbe_an73_enable_interrupts(pdata);
++ break;
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37");
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata)
++{
++ axgbe_an73_clear_interrupts(pdata);
++ axgbe_an37_clear_interrupts(pdata);
++}
++
++static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++
++ reg |= AXGBE_KR_TRAINING_ENABLE;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++}
++
++static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++
++ reg &= ~AXGBE_KR_TRAINING_ENABLE;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++}
++
++static void axgbe_kr_mode(struct axgbe_port *pdata)
++{
++ /* Enable KR training */
++ axgbe_an73_enable_kr_training(pdata);
++
++ /* Set MAC to 10G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_10000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR);
++}
++
++static void axgbe_kx_2500_mode(struct axgbe_port *pdata)
++{
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 2.5G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_2500);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500);
++}
++
++static void axgbe_kx_1000_mode(struct axgbe_port *pdata)
++{
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000);
++}
++
++static void axgbe_sfi_mode(struct axgbe_port *pdata)
++{
++ /* If a KR re-driver is present, change to KR mode instead */
++ if (pdata->kr_redrv)
++ return axgbe_kr_mode(pdata);
++
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 10G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_10000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI);
++}
++
++static void axgbe_x_mode(struct axgbe_port *pdata)
++{
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X);
++}
++
++static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata)
++{
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000);
++}
++
++static void axgbe_sgmii_100_mode(struct axgbe_port *pdata)
++{
++ /* Disable KR training */
++ axgbe_an73_disable_kr_training(pdata);
++
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
++
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100);
++}
++
++static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata)
++{
++ return pdata->phy_if.phy_impl.cur_mode(pdata);
++}
++
++static bool axgbe_in_kr_mode(struct axgbe_port *pdata)
++{
++ return axgbe_cur_mode(pdata) == AXGBE_MODE_KR;
++}
++
++static void axgbe_change_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_KX_1000:
++ axgbe_kx_1000_mode(pdata);
++ break;
++ case AXGBE_MODE_KX_2500:
++ axgbe_kx_2500_mode(pdata);
++ break;
++ case AXGBE_MODE_KR:
++ axgbe_kr_mode(pdata);
++ break;
++ case AXGBE_MODE_SGMII_100:
++ axgbe_sgmii_100_mode(pdata);
++ break;
++ case AXGBE_MODE_SGMII_1000:
++ axgbe_sgmii_1000_mode(pdata);
++ break;
++ case AXGBE_MODE_X:
++ axgbe_x_mode(pdata);
++ break;
++ case AXGBE_MODE_SFI:
++ axgbe_sfi_mode(pdata);
++ break;
++ case AXGBE_MODE_UNKNOWN:
++ break;
++ default:
++ PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)", mode);
++ }
++}
++
++static void axgbe_switch_mode(struct axgbe_port *pdata)
++{
++ axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
++}
++
++static void axgbe_set_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ if (mode == axgbe_cur_mode(pdata))
++ return;
++
++ axgbe_change_mode(pdata, mode);
++}
++
++static bool axgbe_use_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
++}
++
++static void axgbe_an37_set(struct axgbe_port *pdata, bool enable,
++ bool restart)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
++ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
++
++ if (enable)
++ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
++
++ if (restart)
++ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
++}
++
++static void axgbe_an37_disable(struct axgbe_port *pdata)
++{
++ axgbe_an37_set(pdata, false, false);
++ axgbe_an37_disable_interrupts(pdata);
++}
++
++static void axgbe_an73_set(struct axgbe_port *pdata, bool enable,
++ bool restart)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
++ reg &= ~MDIO_AN_CTRL1_ENABLE;
++
++ if (enable)
++ reg |= MDIO_AN_CTRL1_ENABLE;
++
++ if (restart)
++ reg |= MDIO_AN_CTRL1_RESTART;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
++}
++
++static void axgbe_an73_restart(struct axgbe_port *pdata)
++{
++ axgbe_an73_enable_interrupts(pdata);
++ axgbe_an73_set(pdata, true, true);
++}
++
++static void axgbe_an73_disable(struct axgbe_port *pdata)
++{
++ axgbe_an73_set(pdata, false, false);
++ axgbe_an73_disable_interrupts(pdata);
++}
++
++static void axgbe_an_restart(struct axgbe_port *pdata)
++{
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ axgbe_an73_restart(pdata);
++ break;
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37");
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_an_disable(struct axgbe_port *pdata)
++{
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ axgbe_an73_disable(pdata);
++ break;
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37");
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_an_disable_all(struct axgbe_port *pdata)
++{
++ axgbe_an73_disable(pdata);
++ axgbe_an37_disable(pdata);
++}
++
++static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
++ enum axgbe_rx *state)
++{
++ unsigned int ad_reg, lp_reg, reg;
++
++ *state = AXGBE_RX_COMPLETE;
++
++ /* If we're not in KR mode then we're done */
++ if (!axgbe_in_kr_mode(pdata))
++ return AXGBE_AN_PAGE_RECEIVED;
++
++ /* Enable/Disable FEC */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
++ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
++ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
++ reg |= pdata->fec_ability;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
++
++ /* Start KR training */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++ if (reg & AXGBE_KR_TRAINING_ENABLE) {
++ if (pdata->phy_if.phy_impl.kr_training_pre)
++ pdata->phy_if.phy_impl.kr_training_pre(pdata);
++
++ reg |= AXGBE_KR_TRAINING_START;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
++ reg);
++
++ if (pdata->phy_if.phy_impl.kr_training_post)
++ pdata->phy_if.phy_impl.kr_training_post(pdata);
++ }
++
++ return AXGBE_AN_PAGE_RECEIVED;
++}
++
++static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata,
++ enum axgbe_rx *state)
++{
++ u16 msg;
++
++ *state = AXGBE_RX_XNP;
++
++ msg = AXGBE_XNP_MCF_NULL_MESSAGE;
++ msg |= AXGBE_XNP_MP_FORMATTED;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
++
++ return AXGBE_AN_PAGE_RECEIVED;
++}
++
++static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata,
++ enum axgbe_rx *state)
++{
++ unsigned int link_support;
++ unsigned int reg, ad_reg, lp_reg;
++
++ /* Read Base Ability register 2 first */
++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++
++ /* Check for a supported mode, otherwise restart in a different one */
++ link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
++ if (!(reg & link_support))
++ return AXGBE_AN_INCOMPAT_LINK;
++
++ /* Check Extended Next Page support */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
++
++ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
++ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
++ ? axgbe_an73_tx_xnp(pdata, state)
++ : axgbe_an73_tx_training(pdata, state);
++}
++
++static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata,
++ enum axgbe_rx *state)
++{
++ unsigned int ad_reg, lp_reg;
++
++ /* Check Extended Next Page support */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
++
++ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
++ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
++ ? axgbe_an73_tx_xnp(pdata, state)
++ : axgbe_an73_tx_training(pdata, state);
++}
++
++static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
++{
++ enum axgbe_rx *state;
++ unsigned long an_timeout;
++ enum axgbe_an ret;
++ unsigned long ticks;
++
++ if (!pdata->an_start) {
++ pdata->an_start = rte_get_timer_cycles();
++ } else {
++ an_timeout = pdata->an_start +
++ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
++ ticks = rte_get_timer_cycles();
++ if (time_after(ticks, an_timeout)) {
++ /* Auto-negotiation timed out, reset state */
++ pdata->kr_state = AXGBE_RX_BPA;
++ pdata->kx_state = AXGBE_RX_BPA;
++
++ pdata->an_start = rte_get_timer_cycles();
++ }
++ }
++
++ state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
++ : &pdata->kx_state;
++
++ switch (*state) {
++ case AXGBE_RX_BPA:
++ ret = axgbe_an73_rx_bpa(pdata, state);
++ break;
++ case AXGBE_RX_XNP:
++ ret = axgbe_an73_rx_xnp(pdata, state);
++ break;
++ default:
++ ret = AXGBE_AN_ERROR;
++ }
++
++ return ret;
++}
++
++static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
++{
++ /* Be sure we aren't looping trying to negotiate */
++ if (axgbe_in_kr_mode(pdata)) {
++ pdata->kr_state = AXGBE_RX_ERROR;
++
++ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
++ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
++ return AXGBE_AN_NO_LINK;
++
++ if (pdata->kx_state != AXGBE_RX_BPA)
++ return AXGBE_AN_NO_LINK;
++ } else {
++ pdata->kx_state = AXGBE_RX_ERROR;
++
++ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
++ return AXGBE_AN_NO_LINK;
++
++ if (pdata->kr_state != AXGBE_RX_BPA)
++ return AXGBE_AN_NO_LINK;
++ }
++
++ axgbe_an73_disable(pdata);
++ axgbe_switch_mode(pdata);
++ axgbe_an73_restart(pdata);
++
++ return AXGBE_AN_INCOMPAT_LINK;
++}
++
++static void axgbe_an73_state_machine(struct axgbe_port *pdata)
++{
++ enum axgbe_an cur_state = pdata->an_state;
++
++ if (!pdata->an_int)
++ return;
++
++next_int:
++ if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) {
++ pdata->an_state = AXGBE_AN_PAGE_RECEIVED;
++ pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV;
++ } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) {
++ pdata->an_state = AXGBE_AN_INCOMPAT_LINK;
++ pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK;
++ } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) {
++ pdata->an_state = AXGBE_AN_COMPLETE;
++ pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT;
++ } else {
++ pdata->an_state = AXGBE_AN_ERROR;
++ }
++
++again:
++ cur_state = pdata->an_state;
++
++ switch (pdata->an_state) {
++ case AXGBE_AN_READY:
++ pdata->an_supported = 0;
++ break;
++ case AXGBE_AN_PAGE_RECEIVED:
++ pdata->an_state = axgbe_an73_page_received(pdata);
++ pdata->an_supported++;
++ break;
++ case AXGBE_AN_INCOMPAT_LINK:
++ pdata->an_supported = 0;
++ pdata->parallel_detect = 0;
++ pdata->an_state = axgbe_an73_incompat_link(pdata);
++ break;
++ case AXGBE_AN_COMPLETE:
++ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
++ break;
++ case AXGBE_AN_NO_LINK:
++ break;
++ default:
++ pdata->an_state = AXGBE_AN_ERROR;
++ }
++
++ if (pdata->an_state == AXGBE_AN_NO_LINK) {
++ pdata->an_int = 0;
++ axgbe_an73_clear_interrupts(pdata);
++ pdata->eth_dev->data->dev_link.link_status =
++ ETH_LINK_DOWN;
++ } else if (pdata->an_state == AXGBE_AN_ERROR) {
++ PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
++ cur_state);
++ pdata->an_int = 0;
++ axgbe_an73_clear_interrupts(pdata);
++ }
++
++ if (pdata->an_state >= AXGBE_AN_COMPLETE) {
++ pdata->an_result = pdata->an_state;
++ pdata->an_state = AXGBE_AN_READY;
++ pdata->kr_state = AXGBE_RX_BPA;
++ pdata->kx_state = AXGBE_RX_BPA;
++ pdata->an_start = 0;
++ }
++
++ if (cur_state != pdata->an_state)
++ goto again;
++
++ if (pdata->an_int)
++ goto next_int;
++
++ axgbe_an73_enable_interrupts(pdata);
++}
++
++static void axgbe_an73_isr(struct axgbe_port *pdata)
++{
++ /* Disable AN interrupts */
++ axgbe_an73_disable_interrupts(pdata);
++
++ /* Save the interrupt(s) that fired */
++ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
++
++ if (pdata->an_int) {
++ /* Clear the interrupt(s) that fired and process them */
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
++ pthread_mutex_lock(&pdata->an_mutex);
++ axgbe_an73_state_machine(pdata);
++ pthread_mutex_unlock(&pdata->an_mutex);
++ } else {
++ /* Enable AN interrupts */
++ axgbe_an73_enable_interrupts(pdata);
++ }
++}
++
++static void axgbe_an_isr(struct axgbe_port *pdata)
++{
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ axgbe_an73_isr(pdata);
++ break;
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ PMD_DRV_LOG(ERR, "AN_MODE_37 not supported");
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_an_combined_isr(struct axgbe_port *pdata)
++{
++ axgbe_an_isr(pdata);
++}
++
++static void axgbe_an73_init(struct axgbe_port *pdata)
++{
++ unsigned int advertising, reg;
++
++ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
++
++ /* Set up Advertisement register 3 first */
++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ if (advertising & ADVERTISED_10000baseR_FEC)
++ reg |= 0xc000;
++ else
++ reg &= ~0xc000;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
++
++ /* Set up Advertisement register 2 next */
++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ if (advertising & ADVERTISED_10000baseKR_Full)
++ reg |= 0x80;
++ else
++ reg &= ~0x80;
++
++ if ((advertising & ADVERTISED_1000baseKX_Full) ||
++ (advertising & ADVERTISED_2500baseX_Full))
++ reg |= 0x20;
++ else
++ reg &= ~0x20;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
++
++ /* Set up Advertisement register 1 last */
++ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ if (advertising & ADVERTISED_Pause)
++ reg |= 0x400;
++ else
++ reg &= ~0x400;
++
++ if (advertising & ADVERTISED_Asym_Pause)
++ reg |= 0x800;
++ else
++ reg &= ~0x800;
++
++ /* We don't intend to perform XNP */
++ reg &= ~AXGBE_XNP_NP_EXCHANGE;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
++}
++
++static void axgbe_an_init(struct axgbe_port *pdata)
++{
++ /* Set up advertisement registers based on current settings */
++ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ axgbe_an73_init(pdata);
++ break;
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ PMD_DRV_LOG(ERR, "Unsupported AN_CL37");
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
++{
++ if (pdata->phy.link) {
++ /* Speed support */
++ if (pdata->phy_speed != pdata->phy.speed)
++ pdata->phy_speed = pdata->phy.speed;
++ if (pdata->phy_link != pdata->phy.link)
++ pdata->phy_link = pdata->phy.link;
++ } else if (pdata->phy_link) {
++ pdata->phy_link = 0;
++ pdata->phy_speed = SPEED_UNKNOWN;
++ }
++}
++
++static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
++{
++ enum axgbe_mode mode;
++
++ /* Disable auto-negotiation */
++ axgbe_an_disable(pdata);
++
++ /* Set specified mode for specified speed */
++ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
++ switch (mode) {
++ case AXGBE_MODE_KX_1000:
++ case AXGBE_MODE_KX_2500:
++ case AXGBE_MODE_KR:
++ case AXGBE_MODE_SGMII_100:
++ case AXGBE_MODE_SGMII_1000:
++ case AXGBE_MODE_X:
++ case AXGBE_MODE_SFI:
++ break;
++ case AXGBE_MODE_UNKNOWN:
++ default:
++ return -EINVAL;
++ }
++
++ /* Validate duplex mode */
++ if (pdata->phy.duplex != DUPLEX_FULL)
++ return -EINVAL;
++
++ axgbe_set_mode(pdata, mode);
++
++ return 0;
++}
++
++static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
++{
++ int ret;
++
++ axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
++ pdata->link_check = rte_get_timer_cycles();
++
++ ret = pdata->phy_if.phy_impl.an_config(pdata);
++ if (ret)
++ return ret;
++
++ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
++ ret = axgbe_phy_config_fixed(pdata);
++ if (ret || !pdata->kr_redrv)
++ return ret;
++ }
++
++ /* Disable auto-negotiation interrupt */
++ rte_intr_disable(&pdata->pci_dev->intr_handle);
++
++ /* Start auto-negotiation in a supported mode */
++ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_KR);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_SFI);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_X);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
++ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
++ } else {
++ rte_intr_enable(&pdata->pci_dev->intr_handle);
++ return -EINVAL;
++ }
++
++ /* Disable and stop any in progress auto-negotiation */
++ axgbe_an_disable_all(pdata);
++
++ /* Clear any auto-negotitation interrupts */
++ axgbe_an_clear_interrupts_all(pdata);
++
++ pdata->an_result = AXGBE_AN_READY;
++ pdata->an_state = AXGBE_AN_READY;
++ pdata->kr_state = AXGBE_RX_BPA;
++ pdata->kx_state = AXGBE_RX_BPA;
++
++ /* Re-enable auto-negotiation interrupt */
++ rte_intr_enable(&pdata->pci_dev->intr_handle);
++
++ axgbe_an_init(pdata);
++ axgbe_an_restart(pdata);
++
++ return 0;
++}
++
++static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
++{
++ int ret;
++
++ pthread_mutex_lock(&pdata->an_mutex);
++
++ ret = __axgbe_phy_config_aneg(pdata);
++ if (ret)
++ axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state);
++ else
++ axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state);
++
++ pthread_mutex_unlock(&pdata->an_mutex);
++
++ return ret;
++}
++
++static bool axgbe_phy_aneg_done(struct axgbe_port *pdata)
++{
++ return pdata->an_result == AXGBE_AN_COMPLETE;
++}
++
++static void axgbe_check_link_timeout(struct axgbe_port *pdata)
++{
++ unsigned long link_timeout;
++ unsigned long ticks;
++
++ link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
++ 2 * rte_get_timer_hz());
++ ticks = rte_get_timer_cycles();
++ if (time_after(ticks, link_timeout))
++ axgbe_phy_config_aneg(pdata);
++}
++
++static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
++{
++ return pdata->phy_if.phy_impl.an_outcome(pdata);
++}
++
++static void axgbe_phy_status_result(struct axgbe_port *pdata)
++{
++ enum axgbe_mode mode;
++
++ pdata->phy.lp_advertising = 0;
++
++ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
++ mode = axgbe_cur_mode(pdata);
++ else
++ mode = axgbe_phy_status_aneg(pdata);
++
++ switch (mode) {
++ case AXGBE_MODE_SGMII_100:
++ pdata->phy.speed = SPEED_100;
++ break;
++ case AXGBE_MODE_X:
++ case AXGBE_MODE_KX_1000:
++ case AXGBE_MODE_SGMII_1000:
++ pdata->phy.speed = SPEED_1000;
++ break;
++ case AXGBE_MODE_KX_2500:
++ pdata->phy.speed = SPEED_2500;
++ break;
++ case AXGBE_MODE_KR:
++ case AXGBE_MODE_SFI:
++ pdata->phy.speed = SPEED_10000;
++ break;
++ case AXGBE_MODE_UNKNOWN:
++ default:
++ pdata->phy.speed = SPEED_UNKNOWN;
++ }
++
++ pdata->phy.duplex = DUPLEX_FULL;
++
++ axgbe_set_mode(pdata, mode);
++}
++
++static void axgbe_phy_status(struct axgbe_port *pdata)
++{
++ unsigned int link_aneg;
++ int an_restart;
++
++ if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) {
++ pdata->phy.link = 0;
++ goto adjust_link;
++ }
++
++ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
++
++ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
++ &an_restart);
++ if (an_restart) {
++ axgbe_phy_config_aneg(pdata);
++ return;
++ }
++
++ if (pdata->phy.link) {
++ if (link_aneg && !axgbe_phy_aneg_done(pdata)) {
++ axgbe_check_link_timeout(pdata);
++ return;
++ }
++ axgbe_phy_status_result(pdata);
++ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state))
++ axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state);
++ } else {
++ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) {
++ axgbe_check_link_timeout(pdata);
++
++ if (link_aneg)
++ return;
++ }
++ axgbe_phy_status_result(pdata);
++ }
++
++adjust_link:
++ axgbe_phy_adjust_link(pdata);
++}
++
++static void axgbe_phy_stop(struct axgbe_port *pdata)
++{
++ if (!pdata->phy_started)
++ return;
++ /* Indicate the PHY is down */
++ pdata->phy_started = 0;
++ /* Disable auto-negotiation */
++ axgbe_an_disable_all(pdata);
++ pdata->phy_if.phy_impl.stop(pdata);
++ pdata->phy.link = 0;
++ axgbe_phy_adjust_link(pdata);
++}
++
++static int axgbe_phy_start(struct axgbe_port *pdata)
++{
++ int ret;
++
++ ret = pdata->phy_if.phy_impl.start(pdata);
++ if (ret)
++ return ret;
++ /* Set initial mode - call the mode setting routines
++ * directly to insure we are properly configured
++ */
++ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
++ axgbe_kr_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
++ axgbe_kx_2500_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
++ axgbe_kx_1000_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
++ axgbe_sfi_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
++ axgbe_x_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
++ axgbe_sgmii_1000_mode(pdata);
++ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
++ axgbe_sgmii_100_mode(pdata);
++ } else {
++ ret = -EINVAL;
++ goto err_stop;
++ }
++ /* Indicate the PHY is up and running */
++ pdata->phy_started = 1;
++ axgbe_an_init(pdata);
++ axgbe_an_enable_interrupts(pdata);
++ return axgbe_phy_config_aneg(pdata);
++
++err_stop:
++ pdata->phy_if.phy_impl.stop(pdata);
++
++ return ret;
++}
++
++static int axgbe_phy_reset(struct axgbe_port *pdata)
++{
++ int ret;
++
++ ret = pdata->phy_if.phy_impl.reset(pdata);
++ if (ret)
++ return ret;
++
++ /* Disable auto-negotiation for now */
++ axgbe_an_disable_all(pdata);
++
++ /* Clear auto-negotiation interrupts */
++ axgbe_an_clear_interrupts_all(pdata);
++
++ return 0;
++}
++
+ static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
+ {
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+@@ -200,4 +1157,10 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
+ void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
+ {
+ phy_if->phy_init = axgbe_phy_init;
++ phy_if->phy_reset = axgbe_phy_reset;
++ phy_if->phy_start = axgbe_phy_start;
++ phy_if->phy_stop = axgbe_phy_stop;
++ phy_if->phy_status = axgbe_phy_status;
++ phy_if->phy_config_aneg = axgbe_phy_config_aneg;
++ phy_if->an_isr = axgbe_an_combined_isr;
+ }
+diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
+index cea4266..5f69651 100644
+--- a/drivers/net/axgbe/axgbe_phy_impl.c
++++ b/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -361,6 +361,1337 @@ struct axgbe_phy_data {
+ unsigned int redrv_model;
+ };
+
++static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
++
++static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata,
++ struct axgbe_i2c_op *i2c_op)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* Be sure we own the bus */
++ if (!phy_data->comm_owned)
++ return -EIO;
++
++ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
++}
++
++static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
++ unsigned int val)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_i2c_op i2c_op;
++ uint16_t *redrv_val;
++ u8 redrv_data[5], csum;
++ unsigned int i, retry;
++ int ret;
++
++ /* High byte of register contains read/write indicator */
++ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
++ redrv_data[1] = reg & 0xff;
++ redrv_val = (uint16_t *)&redrv_data[2];
++ *redrv_val = rte_cpu_to_be_16(val);
++
++ /* Calculate 1 byte checksum */
++ csum = 0;
++ for (i = 0; i < 4; i++) {
++ csum += redrv_data[i];
++ if (redrv_data[i] > csum)
++ csum++;
++ }
++ redrv_data[4] = ~csum;
++
++ retry = 1;
++again1:
++ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->redrv_addr;
++ i2c_op.len = sizeof(redrv_data);
++ i2c_op.buf = redrv_data;
++ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again1;
++
++ return ret;
++ }
++
++ retry = 1;
++again2:
++ i2c_op.cmd = AXGBE_I2C_CMD_READ;
++ i2c_op.target = phy_data->redrv_addr;
++ i2c_op.len = 1;
++ i2c_op.buf = redrv_data;
++ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again2;
++
++ return ret;
++ }
++
++ if (redrv_data[0] != 0xff) {
++ PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
++ ret = -EIO;
++ }
++
++ return ret;
++}
++
++static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
++ void *reg, unsigned int reg_len,
++ void *val, unsigned int val_len)
++{
++ struct axgbe_i2c_op i2c_op;
++ int retry, ret;
++
++ retry = 1;
++again1:
++ /* Set the specified register to read */
++ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
++ i2c_op.target = target;
++ i2c_op.len = reg_len;
++ i2c_op.buf = reg;
++ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again1;
++
++ return ret;
++ }
++
++ retry = 1;
++again2:
++ /* Read the specfied register */
++ i2c_op.cmd = AXGBE_I2C_CMD_READ;
++ i2c_op.target = target;
++ i2c_op.len = val_len;
++ i2c_op.buf = val;
++ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if ((ret == -EAGAIN) && retry--)
++ goto again2;
++
++ return ret;
++}
++
++static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_i2c_op i2c_op;
++ uint8_t mux_channel;
++
++ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
++ return 0;
++
++ /* Select no mux channels */
++ mux_channel = 0;
++ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->sfp_mux_address;
++ i2c_op.len = sizeof(mux_channel);
++ i2c_op.buf = &mux_channel;
++
++ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
++}
++
++static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_i2c_op i2c_op;
++ u8 mux_channel;
++
++ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
++ return 0;
++
++ /* Select desired mux channel */
++ mux_channel = 1 << phy_data->sfp_mux_channel;
++ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->sfp_mux_address;
++ i2c_op.len = sizeof(mux_channel);
++ i2c_op.buf = &mux_channel;
++
++ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
++}
++
++static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ phy_data->comm_owned = 0;
++
++ pthread_mutex_unlock(&pdata->phy_mutex);
++}
++
++static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ uint64_t timeout;
++ unsigned int mutex_id;
++
++ if (phy_data->comm_owned)
++ return 0;
++
++ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
++ * the driver needs to take the software mutex and then the hardware
++ * mutexes before being able to use the busses.
++ */
++ pthread_mutex_lock(&pdata->phy_mutex);
++
++ /* Clear the mutexes */
++ XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE);
++ XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE);
++
++ /* Mutex formats are the same for I2C and MDIO/GPIO */
++ mutex_id = 0;
++ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
++ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
++
++ timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5);
++ while (time_before(rte_get_timer_cycles(), timeout)) {
++ /* Must be all zeroes in order to obtain the mutex */
++ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
++ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
++ rte_delay_us(100);
++ continue;
++ }
++
++ /* Obtain the mutex */
++ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
++ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
++
++ phy_data->comm_owned = 1;
++ return 0;
++ }
++
++ pthread_mutex_unlock(&pdata->phy_mutex);
++
++ PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes");
++
++ return -ETIMEDOUT;
++}
++
++static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (phy_data->sfp_mod_absent) {
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.advertising = pdata->phy.supported;
++ }
++
++ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
++ pdata->phy.advertising &= ~ADVERTISED_TP;
++ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
++ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
++
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ case AXGBE_SFP_BASE_1000_SX:
++ case AXGBE_SFP_BASE_1000_LX:
++ case AXGBE_SFP_BASE_1000_CX:
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.advertising |= ADVERTISED_Autoneg;
++ break;
++ case AXGBE_SFP_BASE_10000_SR:
++ case AXGBE_SFP_BASE_10000_LR:
++ case AXGBE_SFP_BASE_10000_LRM:
++ case AXGBE_SFP_BASE_10000_ER:
++ case AXGBE_SFP_BASE_10000_CR:
++ default:
++ pdata->phy.speed = SPEED_10000;
++ pdata->phy.duplex = DUPLEX_FULL;
++ pdata->phy.autoneg = AUTONEG_DISABLE;
++ break;
++ }
++
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ case AXGBE_SFP_BASE_1000_CX:
++ case AXGBE_SFP_BASE_10000_CR:
++ pdata->phy.advertising |= ADVERTISED_TP;
++ break;
++ default:
++ pdata->phy.advertising |= ADVERTISED_FIBRE;
++ }
++
++ switch (phy_data->sfp_speed) {
++ case AXGBE_SFP_SPEED_100_1000:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
++ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ break;
++ case AXGBE_SFP_SPEED_1000:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ break;
++ case AXGBE_SFP_SPEED_10000:
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
++ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ break;
++ default:
++ /* Choose the fastest supported speed */
++ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
++ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
++ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ }
++}
++
++static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
++ enum axgbe_sfp_speed sfp_speed)
++{
++ u8 *sfp_base, min, max;
++
++ sfp_base = sfp_eeprom->base;
++
++ switch (sfp_speed) {
++ case AXGBE_SFP_SPEED_1000:
++ min = AXGBE_SFP_BASE_BR_1GBE_MIN;
++ max = AXGBE_SFP_BASE_BR_1GBE_MAX;
++ break;
++ case AXGBE_SFP_SPEED_10000:
++ min = AXGBE_SFP_BASE_BR_10GBE_MIN;
++ max = AXGBE_SFP_BASE_BR_10GBE_MAX;
++ break;
++ default:
++ return false;
++ }
++
++ return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
++ (sfp_base[AXGBE_SFP_BASE_BR] <= max));
++}
++
++static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (!phy_data->sfp_changed)
++ return;
++
++ phy_data->sfp_phy_avail = 0;
++
++ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
++ return;
++}
++
++static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
++
++ if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME],
++ AXGBE_BEL_FUSE_VENDOR, AXGBE_SFP_BASE_VENDOR_NAME_LEN))
++ return false;
++
++ if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
++ AXGBE_BEL_FUSE_PARTNO, AXGBE_SFP_BASE_VENDOR_PN_LEN)) {
++ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
++ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
++ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
++ return true;
++ }
++
++ return false;
++}
++
++static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata)
++{
++ if (axgbe_phy_belfuse_parse_quirks(pdata))
++ return true;
++
++ return false;
++}
++
++static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
++ uint8_t *sfp_base;
++
++ sfp_base = sfp_eeprom->base;
++
++ if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP)
++ return;
++
++ if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP)
++ return;
++
++ if (axgbe_phy_sfp_parse_quirks(pdata))
++ return;
++
++ /* Assume ACTIVE cable unless told it is PASSIVE */
++ if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
++ phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
++ phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
++ } else {
++ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
++ }
++
++ /* Determine the type of SFP */
++ if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] &
++ AXGBE_SFP_BASE_10GBE_CC_LRM)
++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM;
++ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER)
++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER;
++ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX)
++ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
++ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX)
++ phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX;
++ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX)
++ phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
++ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
++ phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
++ else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
++ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
++ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
++
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000;
++ break;
++ case AXGBE_SFP_BASE_1000_SX:
++ case AXGBE_SFP_BASE_1000_LX:
++ case AXGBE_SFP_BASE_1000_CX:
++ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
++ break;
++ case AXGBE_SFP_BASE_10000_SR:
++ case AXGBE_SFP_BASE_10000_LR:
++ case AXGBE_SFP_BASE_10000_LRM:
++ case AXGBE_SFP_BASE_10000_ER:
++ case AXGBE_SFP_BASE_10000_CR:
++ phy_data->sfp_speed = AXGBE_SFP_SPEED_10000;
++ break;
++ default:
++ break;
++ }
++}
++
++static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf,
++ unsigned int len)
++{
++ uint8_t cc;
++
++ for (cc = 0; len; buf++, len--)
++ cc += *buf;
++
++ return (cc == cc_in) ? true : false;
++}
++
++static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ struct axgbe_sfp_eeprom sfp_eeprom;
++ uint8_t eeprom_addr;
++ int ret;
++
++ ret = axgbe_phy_sfp_get_mux(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "I2C error setting SFP MUX");
++ return ret;
++ }
++
++ /* Read the SFP serial ID eeprom */
++ eeprom_addr = 0;
++ ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS,
++ &eeprom_addr, sizeof(eeprom_addr),
++ &sfp_eeprom, sizeof(sfp_eeprom));
++ if (ret) {
++ PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM");
++ goto put;
++ }
++
++ /* Validate the contents read */
++ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC],
++ sfp_eeprom.base,
++ sizeof(sfp_eeprom.base) - 1)) {
++ ret = -EINVAL;
++ goto put;
++ }
++
++ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC],
++ sfp_eeprom.extd,
++ sizeof(sfp_eeprom.extd) - 1)) {
++ ret = -EINVAL;
++ goto put;
++ }
++
++ /* Check for an added or changed SFP */
++ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
++ phy_data->sfp_changed = 1;
++ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
++
++ if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) {
++ uint8_t diag_type;
++ diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG];
++
++ if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
++ phy_data->sfp_diags = 1;
++ }
++ } else {
++ phy_data->sfp_changed = 0;
++ }
++
++put:
++ axgbe_phy_sfp_put_mux(pdata);
++
++ return ret;
++}
++
++static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int gpio_input;
++ u8 gpio_reg, gpio_ports[2];
++ int ret;
++
++ /* Read the input port registers */
++ gpio_reg = 0;
++ ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
++ &gpio_reg, sizeof(gpio_reg),
++ gpio_ports, sizeof(gpio_ports));
++ if (ret) {
++ PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs");
++ return;
++ }
++
++ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
++
++ if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) {
++ /* No GPIO, just assume the module is present for now */
++ phy_data->sfp_mod_absent = 0;
++ } else {
++ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
++ phy_data->sfp_mod_absent = 0;
++ }
++
++ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) &&
++ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
++ phy_data->sfp_rx_los = 1;
++
++ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) &&
++ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
++ phy_data->sfp_tx_fault = 1;
++}
++
++static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ phy_data->sfp_mod_absent = 1;
++ phy_data->sfp_phy_avail = 0;
++ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
++}
++
++static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data)
++{
++ phy_data->sfp_rx_los = 0;
++ phy_data->sfp_tx_fault = 0;
++ phy_data->sfp_mod_absent = 1;
++ phy_data->sfp_diags = 0;
++ phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN;
++ phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN;
++ phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN;
++}
++
++static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ /* Reset the SFP signals and info */
++ axgbe_phy_sfp_reset(phy_data);
++
++ ret = axgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return;
++
++ /* Read the SFP signals and check for module presence */
++ axgbe_phy_sfp_signals(pdata);
++ if (phy_data->sfp_mod_absent) {
++ axgbe_phy_sfp_mod_absent(pdata);
++ goto put;
++ }
++
++ ret = axgbe_phy_sfp_read_eeprom(pdata);
++ if (ret) {
++ /* Treat any error as if there isn't an SFP plugged in */
++ axgbe_phy_sfp_reset(phy_data);
++ axgbe_phy_sfp_mod_absent(pdata);
++ goto put;
++ }
++
++ axgbe_phy_sfp_parse_eeprom(pdata);
++ axgbe_phy_sfp_external_phy(pdata);
++
++put:
++ axgbe_phy_sfp_phy_settings(pdata);
++ axgbe_phy_put_comm_ownership(pdata);
++}
++
++static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata)
++{
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++}
++
++static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ enum axgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++
++ /* Use external PHY to determine flow control */
++ if (pdata->phy.pause_autoneg)
++ axgbe_phy_phydev_flowctrl(pdata);
++
++ /* Compare Advertisement and Link Partner register 2 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ if (lp_reg & 0x20)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++
++ ad_reg &= lp_reg;
++ if (ad_reg & 0x80) {
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ mode = AXGBE_MODE_KR;
++ break;
++ default:
++ mode = AXGBE_MODE_SFI;
++ break;
++ }
++ } else if (ad_reg & 0x20) {
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ mode = AXGBE_MODE_KX_1000;
++ break;
++ case AXGBE_PORT_MODE_1000BASE_X:
++ mode = AXGBE_MODE_X;
++ break;
++ case AXGBE_PORT_MODE_SFP:
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ mode = AXGBE_MODE_SGMII_1000;
++ break;
++ case AXGBE_SFP_BASE_1000_SX:
++ case AXGBE_SFP_BASE_1000_LX:
++ case AXGBE_SFP_BASE_1000_CX:
++ default:
++ mode = AXGBE_MODE_X;
++ break;
++ }
++ break;
++ default:
++ mode = AXGBE_MODE_SGMII_1000;
++ break;
++ }
++ } else {
++ mode = AXGBE_MODE_UNKNOWN;
++ }
++
++ /* Compare Advertisement and Link Partner register 3 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++ if (lp_reg & 0xc000)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++
++ return mode;
++}
++
++static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata)
++{
++ enum axgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++
++ /* Compare Advertisement and Link Partner register 1 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
++ if (lp_reg & 0x400)
++ pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ if (lp_reg & 0x800)
++ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++
++ if (pdata->phy.pause_autoneg) {
++ /* Set flow control based on auto-negotiation result */
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++
++ if (ad_reg & lp_reg & 0x400) {
++ pdata->phy.tx_pause = 1;
++ pdata->phy.rx_pause = 1;
++ } else if (ad_reg & lp_reg & 0x800) {
++ if (ad_reg & 0x400)
++ pdata->phy.rx_pause = 1;
++ else if (lp_reg & 0x400)
++ pdata->phy.tx_pause = 1;
++ }
++ }
++
++ /* Compare Advertisement and Link Partner register 2 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ if (lp_reg & 0x20)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++
++ ad_reg &= lp_reg;
++ if (ad_reg & 0x80)
++ mode = AXGBE_MODE_KR;
++ else if (ad_reg & 0x20)
++ mode = AXGBE_MODE_KX_1000;
++ else
++ mode = AXGBE_MODE_UNKNOWN;
++
++ /* Compare Advertisement and Link Partner register 3 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++ if (lp_reg & 0xc000)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++
++ return mode;
++}
++
++static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata)
++{
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ return axgbe_phy_an73_outcome(pdata);
++ case AXGBE_AN_MODE_CL73_REDRV:
++ return axgbe_phy_an73_redrv_outcome(pdata);
++ case AXGBE_AN_MODE_CL37:
++ case AXGBE_AN_MODE_CL37_SGMII:
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int advertising;
++
++ /* Without a re-driver, just return current advertising */
++ if (!phy_data->redrv)
++ return pdata->phy.advertising;
++
++ /* With the KR re-driver we need to advertise a single speed */
++ advertising = pdata->phy.advertising;
++ advertising &= ~ADVERTISED_1000baseKX_Full;
++ advertising &= ~ADVERTISED_10000baseKR_Full;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ case AXGBE_PORT_MODE_1000BASE_T:
++ case AXGBE_PORT_MODE_1000BASE_X:
++ case AXGBE_PORT_MODE_NBASE_T:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ case AXGBE_PORT_MODE_10GBASE_T:
++ PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported");
++ break;
++ case AXGBE_PORT_MODE_10GBASE_R:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ case AXGBE_PORT_MODE_SFP:
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ case AXGBE_SFP_BASE_1000_SX:
++ case AXGBE_SFP_BASE_1000_LX:
++ case AXGBE_SFP_BASE_1000_CX:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ default:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ }
++ break;
++ default:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ }
++
++ return advertising;
++}
++
++static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
++{
++ return 0;
++ /* Dummy API since there is no case to support
++ * external phy devices registred through kerenl apis
++ */
++}
++
++static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data)
++{
++ switch (phy_data->sfp_base) {
++ case AXGBE_SFP_BASE_1000_T:
++ return AXGBE_AN_MODE_CL37_SGMII;
++ case AXGBE_SFP_BASE_1000_SX:
++ case AXGBE_SFP_BASE_1000_LX:
++ case AXGBE_SFP_BASE_1000_CX:
++ return AXGBE_AN_MODE_CL37;
++ default:
++ return AXGBE_AN_MODE_NONE;
++ }
++}
++
++static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* A KR re-driver will always require CL73 AN */
++ if (phy_data->redrv)
++ return AXGBE_AN_MODE_CL73_REDRV;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ return AXGBE_AN_MODE_CL73;
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ return AXGBE_AN_MODE_NONE;
++ case AXGBE_PORT_MODE_1000BASE_T:
++ return AXGBE_AN_MODE_CL37_SGMII;
++ case AXGBE_PORT_MODE_1000BASE_X:
++ return AXGBE_AN_MODE_CL37;
++ case AXGBE_PORT_MODE_NBASE_T:
++ return AXGBE_AN_MODE_CL37_SGMII;
++ case AXGBE_PORT_MODE_10GBASE_T:
++ return AXGBE_AN_MODE_CL73;
++ case AXGBE_PORT_MODE_10GBASE_R:
++ return AXGBE_AN_MODE_NONE;
++ case AXGBE_PORT_MODE_SFP:
++ return axgbe_phy_an_sfp_mode(phy_data);
++ default:
++ return AXGBE_AN_MODE_NONE;
++ }
++}
++
++static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata,
++ enum axgbe_phy_redrv_mode mode)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ u16 redrv_reg, redrv_val;
++
++ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
++ redrv_val = (u16)mode;
++
++ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
++ redrv_reg, redrv_val);
++}
++
++static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata,
++ enum axgbe_phy_redrv_mode mode)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int redrv_reg;
++ int ret;
++
++ /* Calculate the register to write */
++ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
++
++ ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode);
++
++ return ret;
++}
++
++static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ enum axgbe_phy_redrv_mode mode;
++ int ret;
++
++ if (!phy_data->redrv)
++ return;
++
++ mode = AXGBE_PHY_REDRV_MODE_CX;
++ if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) &&
++ (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) &&
++ (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR))
++ mode = AXGBE_PHY_REDRV_MODE_SR;
++
++ ret = axgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return;
++
++ if (phy_data->redrv_if)
++ axgbe_phy_set_redrv_mode_i2c(pdata, mode);
++ else
++ axgbe_phy_set_redrv_mode_mdio(pdata, mode);
++
++ axgbe_phy_put_comm_ownership(pdata);
++}
++
++static void axgbe_phy_start_ratechange(struct axgbe_port *pdata)
++{
++ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ return;
++}
++
++static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata)
++{
++ unsigned int wait;
++
++ /* Wait for command to complete */
++ wait = AXGBE_RATECHANGE_COUNT;
++ while (wait--) {
++ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ return;
++
++ rte_delay_us(1500);
++ }
++}
++
++static void axgbe_phy_rrc(struct axgbe_port *pdata)
++{
++ unsigned int s0;
++
++ axgbe_phy_start_ratechange(pdata);
++
++ /* Receiver Reset Cycle */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ axgbe_phy_complete_ratechange(pdata);
++}
++
++static void axgbe_phy_power_off(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ axgbe_phy_start_ratechange(pdata);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++ axgbe_phy_complete_ratechange(pdata);
++ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
++}
++
++static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ axgbe_phy_set_redrv_mode(pdata);
++
++ axgbe_phy_start_ratechange(pdata);
++
++ /* 10G/SFI */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
++ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) {
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++ } else {
++ if (phy_data->sfp_cable_len <= 1)
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
++ else if (phy_data->sfp_cable_len <= 3)
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
++ else
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
++ }
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++ axgbe_phy_complete_ratechange(pdata);
++ phy_data->cur_mode = AXGBE_MODE_SFI;
++}
++
++static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ axgbe_phy_set_redrv_mode(pdata);
++
++ axgbe_phy_start_ratechange(pdata);
++
++ /* 10G/KR */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++ axgbe_phy_complete_ratechange(pdata);
++ phy_data->cur_mode = AXGBE_MODE_KR;
++}
++
++static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ return phy_data->cur_mode;
++}
++
++static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* No switching if not 10GBase-T */
++ if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T)
++ return axgbe_phy_cur_mode(pdata);
++
++ switch (axgbe_phy_cur_mode(pdata)) {
++ case AXGBE_MODE_SGMII_100:
++ case AXGBE_MODE_SGMII_1000:
++ return AXGBE_MODE_KR;
++ case AXGBE_MODE_KR:
++ default:
++ return AXGBE_MODE_SGMII_1000;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata
++ __rte_unused)
++{
++ return AXGBE_MODE_KX_2500;
++}
++
++static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata)
++{
++ /* If we are in KR switch to KX, and vice-versa */
++ switch (axgbe_phy_cur_mode(pdata)) {
++ case AXGBE_MODE_KX_1000:
++ return AXGBE_MODE_KR;
++ case AXGBE_MODE_KR:
++ default:
++ return AXGBE_MODE_KX_1000;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ return axgbe_phy_switch_bp_mode(pdata);
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ return axgbe_phy_switch_bp_2500_mode(pdata);
++ case AXGBE_PORT_MODE_1000BASE_T:
++ case AXGBE_PORT_MODE_NBASE_T:
++ case AXGBE_PORT_MODE_10GBASE_T:
++ return axgbe_phy_switch_baset_mode(pdata);
++ case AXGBE_PORT_MODE_1000BASE_X:
++ case AXGBE_PORT_MODE_10GBASE_R:
++ case AXGBE_PORT_MODE_SFP:
++ /* No switching, so just return current mode */
++ return axgbe_phy_cur_mode(pdata);
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data
++ __rte_unused,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ return AXGBE_MODE_X;
++ case SPEED_10000:
++ return AXGBE_MODE_KR;
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data
++ __rte_unused,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ return AXGBE_MODE_SGMII_100;
++ case SPEED_1000:
++ return AXGBE_MODE_SGMII_1000;
++ case SPEED_10000:
++ return AXGBE_MODE_KR;
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ return AXGBE_MODE_SGMII_100;
++ case SPEED_1000:
++ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
++ return AXGBE_MODE_SGMII_1000;
++ else
++ return AXGBE_MODE_X;
++ case SPEED_10000:
++ case SPEED_UNKNOWN:
++ return AXGBE_MODE_SFI;
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_2500:
++ return AXGBE_MODE_KX_2500;
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_bp_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ return AXGBE_MODE_KX_1000;
++ case SPEED_10000:
++ return AXGBE_MODE_KR;
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata,
++ int speed)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ return axgbe_phy_get_bp_mode(speed);
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ return axgbe_phy_get_bp_2500_mode(speed);
++ case AXGBE_PORT_MODE_1000BASE_T:
++ case AXGBE_PORT_MODE_NBASE_T:
++ case AXGBE_PORT_MODE_10GBASE_T:
++ return axgbe_phy_get_baset_mode(phy_data, speed);
++ case AXGBE_PORT_MODE_1000BASE_X:
++ case AXGBE_PORT_MODE_10GBASE_R:
++ return axgbe_phy_get_basex_mode(phy_data, speed);
++ case AXGBE_PORT_MODE_SFP:
++ return axgbe_phy_get_sfp_mode(phy_data, speed);
++ default:
++ return AXGBE_MODE_UNKNOWN;
++ }
++}
++
++static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_KR:
++ axgbe_phy_kr_mode(pdata);
++ break;
++ case AXGBE_MODE_SFI:
++ axgbe_phy_sfi_mode(pdata);
++ break;
++ default:
++ break;
++ }
++}
++
++static bool axgbe_phy_check_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode, u32 advert)
++{
++ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
++ if (pdata->phy.advertising & advert)
++ return true;
++ } else {
++ enum axgbe_mode cur_mode;
++
++ cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed);
++ if (cur_mode == mode)
++ return true;
++ }
++
++ return false;
++}
++
++static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_X:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case AXGBE_MODE_KR:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_SGMII_100:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_100baseT_Full);
++ case AXGBE_MODE_SGMII_1000:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case AXGBE_MODE_KR:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (mode) {
++ case AXGBE_MODE_X:
++ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
++ return false;
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case AXGBE_MODE_SGMII_100:
++ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
++ return false;
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_100baseT_Full);
++ case AXGBE_MODE_SGMII_1000:
++ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
++ return false;
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case AXGBE_MODE_SFI:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_KX_2500:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_2500baseX_Full);
++ default:
++ return false;
++ }
++}
++
++static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata,
++ enum axgbe_mode mode)
++{
++ switch (mode) {
++ case AXGBE_MODE_KX_1000:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseKX_Full);
++ case AXGBE_MODE_KR:
++ return axgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseKR_Full);
++ default:
++ return false;
++ }
++}
++
++static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_BACKPLANE:
++ return axgbe_phy_use_bp_mode(pdata, mode);
++ case AXGBE_PORT_MODE_BACKPLANE_2500:
++ return axgbe_phy_use_bp_2500_mode(pdata, mode);
++ case AXGBE_PORT_MODE_1000BASE_T:
++ case AXGBE_PORT_MODE_NBASE_T:
++ case AXGBE_PORT_MODE_10GBASE_T:
++ return axgbe_phy_use_baset_mode(pdata, mode);
++ case AXGBE_PORT_MODE_1000BASE_X:
++ case AXGBE_PORT_MODE_10GBASE_R:
++ return axgbe_phy_use_basex_mode(pdata, mode);
++ case AXGBE_PORT_MODE_SFP:
++ return axgbe_phy_use_sfp_mode(pdata, mode);
++ default:
++ return false;
++ }
++}
++
++static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ *an_restart = 0;
++
++ if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) {
++ /* Check SFP signals */
++ axgbe_phy_sfp_detect(pdata);
++
++ if (phy_data->sfp_changed) {
++ *an_restart = 1;
++ return 0;
++ }
++
++ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
++ return 0;
++ }
++
++ /* Link status is latched low, so read once to clear
++ * and then read again to get current state
++ */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++ if (reg & MDIO_STAT1_LSTATUS)
++ return 1;
++
++ /* No link, attempt a receiver reset cycle */
++ if (phy_data->rrc_count++) {
++ phy_data->rrc_count = 0;
++ axgbe_phy_rrc(pdata);
++ }
++
++ return 0;
++}
++
+ static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
+ {
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+@@ -557,6 +1888,59 @@ static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+ return true;
+ }
+
++static void axgbe_phy_stop(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* Reset SFP data */
++ axgbe_phy_sfp_reset(phy_data);
++ axgbe_phy_sfp_mod_absent(pdata);
++
++ /* Power off the PHY */
++ axgbe_phy_power_off(pdata);
++
++ /* Stop the I2C controller */
++ pdata->i2c_if.i2c_stop(pdata);
++}
++
++static int axgbe_phy_start(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ /* Start the I2C controller */
++ ret = pdata->i2c_if.i2c_start(pdata);
++ if (ret)
++ return ret;
++
++ /* Start in highest supported mode */
++ axgbe_phy_set_mode(pdata, phy_data->start_mode);
++
++ /* After starting the I2C controller, we can check for an SFP */
++ switch (phy_data->port_mode) {
++ case AXGBE_PORT_MODE_SFP:
++ axgbe_phy_sfp_detect(pdata);
++ break;
++ default:
++ break;
++ }
++
++ return ret;
++}
++
++static int axgbe_phy_reset(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++ enum axgbe_mode cur_mode;
++
++ /* Reset by power cycling the PHY */
++ cur_mode = phy_data->cur_mode;
++ axgbe_phy_power_off(pdata);
++ /* First time reset is done with passed unknown mode*/
++ axgbe_phy_set_mode(pdata, cur_mode);
++ return 0;
++}
++
+ static int axgbe_phy_init(struct axgbe_port *pdata)
+ {
+ struct axgbe_phy_data *phy_data;
+@@ -796,4 +2180,17 @@ void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = axgbe_phy_init;
++ phy_impl->reset = axgbe_phy_reset;
++ phy_impl->start = axgbe_phy_start;
++ phy_impl->stop = axgbe_phy_stop;
++ phy_impl->link_status = axgbe_phy_link_status;
++ phy_impl->use_mode = axgbe_phy_use_mode;
++ phy_impl->set_mode = axgbe_phy_set_mode;
++ phy_impl->get_mode = axgbe_phy_get_mode;
++ phy_impl->switch_mode = axgbe_phy_switch_mode;
++ phy_impl->cur_mode = axgbe_phy_cur_mode;
++ phy_impl->an_mode = axgbe_phy_an_mode;
++ phy_impl->an_config = axgbe_phy_an_config;
++ phy_impl->an_advertising = axgbe_phy_an_advertising;
++ phy_impl->an_outcome = axgbe_phy_an_outcome;
+ }
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-07-18-net-axgbe-add-interrupt-handler-for-autonegotiation.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-07-18-net-axgbe-add-interrupt-handler-for-autonegotiation.patch
new file mode 100644
index 00000000..c327b7ba
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-07-18-net-axgbe-add-interrupt-handler-for-autonegotiation.patch
@@ -0,0 +1,98 @@
+From patchwork Fri Mar 9 08:42:23 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 07/18] net/axgbe: add interrupt handler for autonegotiation
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35826
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-7-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:23 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_ethdev.c | 37 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 37 insertions(+)
+
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 0dc424d..daea0b2 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -131,6 +131,7 @@
+
+ static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+ static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
++static void axgbe_dev_interrupt_handler(void *param);
+
+ /* The set of PCI devices this driver supports */
+ #define AMD_PCI_VENDOR_ID 0x1022
+@@ -168,6 +169,30 @@ static struct axgbe_version_data axgbe_v2b = {
+ .i2c_support = 1,
+ };
+
++/*
++ * Interrupt handler triggered by NIC for handling
++ * specific interrupt.
++ *
++ * @param handle
++ * Pointer to interrupt handle.
++ * @param param
++ * The address of parameter (struct rte_eth_dev *) regsitered before.
++ *
++ * @return
++ * void
++ */
++static void
++axgbe_dev_interrupt_handler(void *param)
++{
++ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ pdata->phy_if.an_isr(pdata);
++
++ /* Enable interrupts since disabled after generation*/
++ rte_intr_enable(&pdata->pci_dev->intr_handle);
++}
++
+ static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+ {
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+@@ -469,6 +494,9 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ return ret;
+ }
+
++ rte_intr_callback_register(&pci_dev->intr_handle,
++ axgbe_dev_interrupt_handler,
++ (void *)eth_dev);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+@@ -479,15 +507,24 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ static int
+ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+ {
++ struct rte_pci_device *pci_dev;
++
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
++ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+
++ /* disable uio intr before callback unregister */
++ rte_intr_disable(&pci_dev->intr_handle);
++ rte_intr_callback_unregister(&pci_dev->intr_handle,
++ axgbe_dev_interrupt_handler,
++ (void *)eth_dev);
++
+ return 0;
+ }
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-08-18-net-axgbe-add-transmit-and-receive-queue-setup-apis.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-08-18-net-axgbe-add-transmit-and-receive-queue-setup-apis.patch
new file mode 100644
index 00000000..5f103985
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-08-18-net-axgbe-add-transmit-and-receive-queue-setup-apis.patch
@@ -0,0 +1,903 @@
+From patchwork Fri Mar 9 08:42:24 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 08/18] net/axgbe: add transmit and receive queue setup apis
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35827
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-8-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:24 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/Makefile | 1 +
+ drivers/net/axgbe/axgbe_ethdev.c | 81 +++++++++
+ drivers/net/axgbe/axgbe_ethdev.h | 36 ++++
+ drivers/net/axgbe/axgbe_rxtx.c | 363 +++++++++++++++++++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_rxtx.h | 289 +++++++++++++++++++++++++++++++
+ 5 files changed, 770 insertions(+)
+ create mode 100644 drivers/net/axgbe/axgbe_rxtx.c
+ create mode 100644 drivers/net/axgbe/axgbe_rxtx.h
+
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index a8f3358..d030530 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -146,5 +146,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index daea0b2..ee1e48d 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -125,6 +125,7 @@
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include "axgbe_rxtx.h"
+ #include "axgbe_ethdev.h"
+ #include "axgbe_common.h"
+ #include "axgbe_phy.h"
+@@ -132,6 +133,9 @@
+ static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+ static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+ static void axgbe_dev_interrupt_handler(void *param);
++static void axgbe_dev_close(struct rte_eth_dev *dev);
++static void axgbe_dev_info_get(struct rte_eth_dev *dev,
++ struct rte_eth_dev_info *dev_info);
+
+ /* The set of PCI devices this driver supports */
+ #define AMD_PCI_VENDOR_ID 0x1022
+@@ -169,6 +173,27 @@ static struct axgbe_version_data axgbe_v2b = {
+ .i2c_support = 1,
+ };
+
++static const struct rte_eth_desc_lim rx_desc_lim = {
++ .nb_max = AXGBE_MAX_RING_DESC,
++ .nb_min = AXGBE_MIN_RING_DESC,
++ .nb_align = 8,
++};
++
++static const struct rte_eth_desc_lim tx_desc_lim = {
++ .nb_max = AXGBE_MAX_RING_DESC,
++ .nb_min = AXGBE_MIN_RING_DESC,
++ .nb_align = 8,
++};
++
++static const struct eth_dev_ops axgbe_eth_dev_ops = {
++ .dev_close = axgbe_dev_close,
++ .dev_infos_get = axgbe_dev_info_get,
++ .rx_queue_setup = axgbe_dev_rx_queue_setup,
++ .rx_queue_release = axgbe_dev_rx_queue_release,
++ .tx_queue_setup = axgbe_dev_tx_queue_setup,
++ .tx_queue_release = axgbe_dev_tx_queue_release,
++};
++
+ /*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+@@ -193,6 +218,57 @@ axgbe_dev_interrupt_handler(void *param)
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ }
+
++/* Clear all resources like TX/RX queues. */
++static void
++axgbe_dev_close(struct rte_eth_dev *dev)
++{
++ axgbe_dev_clear_queues(dev);
++}
++
++static void
++axgbe_dev_info_get(struct rte_eth_dev *dev,
++ struct rte_eth_dev_info *dev_info)
++{
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
++ dev_info->max_rx_queues = pdata->tx_ring_count;
++ dev_info->max_tx_queues = pdata->rx_ring_count;
++ dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
++ dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
++ dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
++ dev_info->speed_capa = ETH_LINK_SPEED_10G;
++
++ dev_info->rx_offload_capa =
++ DEV_RX_OFFLOAD_IPV4_CKSUM |
++ DEV_RX_OFFLOAD_UDP_CKSUM |
++ DEV_RX_OFFLOAD_TCP_CKSUM;
++
++ dev_info->tx_offload_capa =
++ DEV_TX_OFFLOAD_IPV4_CKSUM |
++ DEV_TX_OFFLOAD_UDP_CKSUM |
++ DEV_TX_OFFLOAD_TCP_CKSUM;
++
++ if (pdata->hw_feat.rss) {
++ dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
++ dev_info->reta_size = pdata->hw_feat.hash_table_size;
++ dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
++ }
++
++ dev_info->rx_desc_lim = rx_desc_lim;
++ dev_info->tx_desc_lim = tx_desc_lim;
++
++ dev_info->default_rxconf = (struct rte_eth_rxconf) {
++ .rx_free_thresh = AXGBE_RX_FREE_THRESH,
++ };
++
++ dev_info->default_txconf = (struct rte_eth_txconf) {
++ .tx_free_thresh = AXGBE_TX_FREE_THRESH,
++ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
++ ETH_TXQ_FLAGS_NOOFFLOADS,
++ };
++}
++
+ static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+ {
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+@@ -377,6 +453,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
++ eth_dev->dev_ops = &axgbe_eth_dev_ops;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+@@ -483,6 +560,8 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
++ pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
++ pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
+ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
+ pthread_mutex_init(&pdata->i2c_mutex, NULL);
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+@@ -518,6 +597,8 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
++ eth_dev->dev_ops = NULL;
++ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 6cddb57..4091d1a 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -132,6 +132,16 @@
+ #include <rte_lcore.h>
+ #include "axgbe_common.h"
+
++#define IRQ 0xff
++#define VLAN_HLEN 4
++
++#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
++#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
++#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
++#define AXGBE_MAX_MAC_ADDRS 1
++
++#define AXGBE_RX_BUF_ALIGN 64
++
+ #define AXGBE_MAX_DMA_CHANNELS 16
+ #define AXGBE_MAX_QUEUES 16
+ #define AXGBE_PRIORITY_QUEUES 8
+@@ -147,6 +157,23 @@
+ #define AXGBE_DMA_SYS_ARCACHE 0x0
+ #define AXGBE_DMA_SYS_AWCACHE 0x0
+
++/* DMA channel interrupt modes */
++#define AXGBE_IRQ_MODE_EDGE 0
++#define AXGBE_IRQ_MODE_LEVEL 1
++
++#define AXGBE_DMA_INTERRUPT_MASK 0x31c7
++
++#define AXGMAC_MIN_PACKET 60
++#define AXGMAC_STD_PACKET_MTU 1500
++#define AXGMAC_MAX_STD_PACKET 1518
++#define AXGMAC_JUMBO_PACKET_MTU 9000
++#define AXGMAC_MAX_JUMBO_PACKET 9018
++/* Inter-frame gap + preamble */
++#define AXGMAC_ETH_PREAMBLE (12 + 8)
++
++#define AXGMAC_PFC_DATA_LEN 46
++#define AXGMAC_PFC_DELAYS 14000
++
+ /* PCI BAR mapping */
+ #define AXGBE_AXGMAC_BAR 0
+ #define AXGBE_XPCS_BAR 1
+@@ -630,6 +657,10 @@ struct axgbe_port {
+
+ struct ether_addr mac_addr;
+
++ /* Software Tx/Rx structure pointers*/
++ void **rx_queues;
++ void **tx_queues;
++
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+@@ -656,6 +687,11 @@ struct axgbe_port {
+ /* I2C support */
+ struct axgbe_i2c i2c;
+ volatile int i2c_complete;
++
++ /* CRC stripping by H/w for Rx packet*/
++ int crc_strip_enable;
++ /* csum enable to hardware */
++ uint32_t rx_csum_enable;
+ };
+
+ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
+new file mode 100644
+index 0000000..64065e8
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_rxtx.c
+@@ -0,0 +1,363 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_rxtx.h"
++#include "axgbe_phy.h"
++
++#include <rte_time.h>
++#include <rte_mempool.h>
++#include <rte_mbuf.h>
++
++static void
++axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
++{
++ uint16_t i;
++ struct rte_mbuf **sw_ring;
++
++ if (rx_queue) {
++ sw_ring = rx_queue->sw_ring;
++ if (sw_ring) {
++ for (i = 0; i < rx_queue->nb_desc; i++) {
++ if (sw_ring[i])
++ rte_pktmbuf_free(sw_ring[i]);
++ }
++ rte_free(sw_ring);
++ }
++ rte_free(rx_queue);
++ }
++}
++
++void axgbe_dev_rx_queue_release(void *rxq)
++{
++ axgbe_rx_queue_release(rxq);
++}
++
++int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
++ uint16_t nb_desc, unsigned int socket_id,
++ const struct rte_eth_rxconf *rx_conf,
++ struct rte_mempool *mp)
++{
++ PMD_INIT_FUNC_TRACE();
++ uint32_t size;
++ const struct rte_memzone *dma;
++ struct axgbe_rx_queue *rxq;
++ uint32_t rx_desc = nb_desc;
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ /*
++ * validate Rx descriptors count
++ * should be power of 2 and less than h/w supported
++ */
++ if ((!rte_is_power_of_2(rx_desc)) ||
++ rx_desc > pdata->rx_desc_count)
++ return -EINVAL;
++ /* First allocate the rx queue data structure */
++ rxq = rte_zmalloc_socket("ethdev RX queue",
++ sizeof(struct axgbe_rx_queue),
++ RTE_CACHE_LINE_SIZE, socket_id);
++ if (!rxq) {
++ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
++ return -ENOMEM;
++ }
++
++ rxq->cur = 0;
++ rxq->dirty = 0;
++ rxq->pdata = pdata;
++ rxq->mb_pool = mp;
++ rxq->queue_id = queue_idx;
++ rxq->port_id = dev->data->port_id;
++ rxq->nb_desc = rx_desc;
++ rxq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
++ (DMA_CH_INC * rxq->queue_id);
++ rxq->dma_tail_reg = (volatile uint32_t *)(rxq->dma_regs +
++ DMA_CH_RDTR_LO);
++
++ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
++ ETHER_CRC_LEN);
++ /* CRC strip in AXGBE supports per port not per queue */
++ pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
++ rxq->free_thresh = rx_conf->rx_free_thresh ?
++ rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
++ if (rxq->free_thresh > rxq->nb_desc)
++ rxq->free_thresh = rxq->nb_desc >> 3;
++
++ /* Allocate RX ring hardware descriptors */
++ size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
++ dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
++ socket_id);
++ if (!dma) {
++ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
++ axgbe_rx_queue_release(rxq);
++ return -ENOMEM;
++ }
++ rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
++ rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
++ memset((void *)rxq->desc, 0, size);
++ /* Allocate software ring */
++ size = rxq->nb_desc * sizeof(struct rte_mbuf *);
++ rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
++ RTE_CACHE_LINE_SIZE,
++ socket_id);
++ if (!rxq->sw_ring) {
++ PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed!");
++ axgbe_rx_queue_release(rxq);
++ return -ENOMEM;
++ }
++ dev->data->rx_queues[queue_idx] = rxq;
++ if (!pdata->rx_queues)
++ pdata->rx_queues = dev->data->rx_queues;
++
++ return 0;
++}
++
++/* Tx Apis */
++static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
++{
++ uint16_t i;
++ struct rte_mbuf **sw_ring;
++
++ if (tx_queue) {
++ sw_ring = tx_queue->sw_ring;
++ if (sw_ring) {
++ for (i = 0; i < tx_queue->nb_desc; i++) {
++ if (sw_ring[i])
++ rte_pktmbuf_free(sw_ring[i]);
++ }
++ rte_free(sw_ring);
++ }
++ rte_free(tx_queue);
++ }
++}
++
++void axgbe_dev_tx_queue_release(void *txq)
++{
++ axgbe_tx_queue_release(txq);
++}
++
++int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
++ uint16_t nb_desc, unsigned int socket_id,
++ const struct rte_eth_txconf *tx_conf)
++{
++ PMD_INIT_FUNC_TRACE();
++ uint32_t tx_desc;
++ struct axgbe_port *pdata;
++ struct axgbe_tx_queue *txq;
++ unsigned int tsize;
++ const struct rte_memzone *tz;
++
++ tx_desc = nb_desc;
++ pdata = (struct axgbe_port *)dev->data->dev_private;
++
++ /*
++ * validate tx descriptors count
++ * should be power of 2 and less than h/w supported
++ */
++ if ((!rte_is_power_of_2(tx_desc)) ||
++ tx_desc > pdata->tx_desc_count ||
++ tx_desc < AXGBE_MIN_RING_DESC)
++ return -EINVAL;
++
++ /* First allocate the tx queue data structure */
++ txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
++ RTE_CACHE_LINE_SIZE);
++ if (!txq)
++ return -ENOMEM;
++ txq->pdata = pdata;
++
++ txq->nb_desc = tx_desc;
++ txq->free_thresh = tx_conf->tx_free_thresh ?
++ tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
++ if (txq->free_thresh > txq->nb_desc)
++ txq->free_thresh = (txq->nb_desc >> 1);
++ txq->free_batch_cnt = txq->free_thresh;
++
++ if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
++ ETH_TXQ_FLAGS_NOOFFLOADS) {
++ txq->vector_disable = 1;
++ }
++
++ /* Allocate TX ring hardware descriptors */
++ tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
++ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
++ tsize, AXGBE_DESC_ALIGN, socket_id);
++ if (!tz) {
++ axgbe_tx_queue_release(txq);
++ return -ENOMEM;
++ }
++ memset(tz->addr, 0, tsize);
++ txq->ring_phys_addr = (uint64_t)tz->phys_addr;
++ txq->desc = tz->addr;
++ txq->queue_id = queue_idx;
++ txq->port_id = dev->data->port_id;
++ txq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
++ (DMA_CH_INC * txq->queue_id);
++ txq->dma_tail_reg = (volatile uint32_t *)(txq->dma_regs +
++ DMA_CH_TDTR_LO);
++ txq->cur = 0;
++ txq->dirty = 0;
++ txq->nb_desc_free = txq->nb_desc;
++ /* Allocate software ring */
++ tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
++ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
++ RTE_CACHE_LINE_SIZE);
++ if (!txq->sw_ring) {
++ axgbe_tx_queue_release(txq);
++ return -ENOMEM;
++ }
++ dev->data->tx_queues[queue_idx] = txq;
++ if (!pdata->tx_queues)
++ pdata->tx_queues = dev->data->tx_queues;
++
++ return 0;
++}
++
++void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ uint8_t i;
++ struct axgbe_rx_queue *rxq;
++ struct axgbe_tx_queue *txq;
++
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++
++ if (rxq) {
++ axgbe_rx_queue_release(rxq);
++ dev->data->rx_queues[i] = NULL;
++ }
++ }
++
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++
++ if (txq) {
++ axgbe_tx_queue_release(txq);
++ dev->data->tx_queues[i] = NULL;
++ }
++ }
++}
+diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
+new file mode 100644
+index 0000000..0d712f7
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_rxtx.h
+@@ -0,0 +1,289 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _AXGBE_RXTX_H_
++#define _AXGBE_RXTX_H_
++
++/* to suppress gcc warnings related to descriptor casting*/
++#ifdef RTE_TOOLCHAIN_GCC
++#pragma GCC diagnostic ignored "-Wcast-qual"
++#endif
++
++#ifdef RTE_TOOLCHAIN_CLANG
++#pragma GCC diagnostic ignored "-Wcast-qual"
++#endif
++
++/* Descriptor related defines */
++#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
++#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
++#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
++#define AXGBE_MIN_RING_DESC 32
++#define RTE_AXGBE_DESCS_PER_LOOP 4
++#define RTE_AXGBE_MAX_RX_BURST 32
++
++#define AXGBE_RX_FREE_THRESH 32
++#define AXGBE_TX_FREE_THRESH 32
++
++#define AXGBE_DESC_ALIGN 128
++#define AXGBE_DESC_OWN 0x80000000
++#define AXGBE_ERR_STATUS 0x000f0000
++#define AXGBE_L3_CSUM_ERR 0x00050000
++#define AXGBE_L4_CSUM_ERR 0x00060000
++
++#include "axgbe_common.h"
++
++#define AXGBE_GET_DESC_PT(_queue, _idx) \
++ (((_queue)->desc) + \
++ ((_idx) & ((_queue)->nb_desc - 1)))
++
++#define AXGBE_GET_DESC_IDX(_queue, _idx) \
++ ((_idx) & ((_queue)->nb_desc - 1)) \
++
++/* Rx desc format */
++union axgbe_rx_desc {
++ struct {
++ uint64_t baddr;
++ uint32_t desc2;
++ uint32_t desc3;
++ } read;
++ struct {
++ uint32_t desc0;
++ uint32_t desc1;
++ uint32_t desc2;
++ uint32_t desc3;
++ } write;
++};
++
++struct axgbe_rx_queue {
++ /* membuf pool for rx buffers */
++ struct rte_mempool *mb_pool;
++ /* H/w Rx buffer size configured in DMA */
++ unsigned int buf_size;
++ /* CRC h/w offload */
++ uint16_t crc_len;
++ /* address of s/w rx buffers */
++ struct rte_mbuf **sw_ring;
++ /* Port private data */
++ struct axgbe_port *pdata;
++ /* Number of Rx descriptors in queue */
++ uint16_t nb_desc;
++ /* max free RX desc to hold */
++ uint16_t free_thresh;
++ /* Index of descriptor to check for packet availability */
++ uint64_t cur;
++ /* Index of descriptor to check for buffer reallocation */
++ uint64_t dirty;
++ /* Software Rx descriptor ring*/
++ volatile union axgbe_rx_desc *desc;
++ /* Ring physical address */
++ uint64_t ring_phys_addr;
++ /* Dma Channel register address */
++ uint64_t dma_regs;
++ /* Dma channel tail register address*/
++ volatile uint32_t *dma_tail_reg;
++ /* DPDK queue index */
++ uint16_t queue_id;
++ /* dpdk port id*/
++ uint16_t port_id;
++ /* queue stats */
++ uint64_t pkts;
++ uint64_t bytes;
++ uint64_t errors;
++ /* Number of mbufs allocated from pool*/
++ uint64_t mbuf_alloc;
++
++} ____cacheline_aligned;
++
++/*Tx descriptor format */
++struct axgbe_tx_desc {
++ phys_addr_t baddr;
++ uint32_t desc2;
++ uint32_t desc3;
++};
++
++struct axgbe_tx_queue {
++ /* Port private data reference */
++ struct axgbe_port *pdata;
++ /* Number of Tx descriptors in queue*/
++ uint16_t nb_desc;
++ /* Start freeing TX buffers if there are less free descriptors than
++ * this value
++ */
++ uint16_t free_thresh;
++ /* Available descriptors for Tx processing*/
++ uint16_t nb_desc_free;
++ /* Batch of mbufs/descs to release */
++ uint16_t free_batch_cnt;
++ /* Flag for vector support */
++ uint16_t vector_disable;
++ /* Index of descriptor to be used for current transfer */
++ uint64_t cur;
++ /* Index of descriptor to check for transfer complete */
++ uint64_t dirty;
++ /* Virtual address of ring */
++ volatile struct axgbe_tx_desc *desc;
++ /* Physical address of ring */
++ uint64_t ring_phys_addr;
++ /* Dma channel register space */
++ uint64_t dma_regs;
++ /* Dma tail register address of ring*/
++ volatile uint32_t *dma_tail_reg;
++ /* Tx queue index/id*/
++ uint16_t queue_id;
++ /* Reference to hold Tx mbufs mapped to Tx descriptors freed
++ * after transmission confirmation
++ */
++ struct rte_mbuf **sw_ring;
++ /* dpdk port id*/
++ uint16_t port_id;
++ /* queue stats */
++ uint64_t pkts;
++ uint64_t bytes;
++ uint64_t errors;
++
++} __rte_cache_aligned;
++
++/*Queue related APIs */
++
++/*
++ * RX/TX function prototypes
++ */
++
++
++void axgbe_dev_tx_queue_release(void *txq);
++int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
++ uint16_t nb_tx_desc, unsigned int socket_id,
++ const struct rte_eth_txconf *tx_conf);
++
++void axgbe_dev_rx_queue_release(void *rxq);
++int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
++ uint16_t nb_rx_desc, unsigned int socket_id,
++ const struct rte_eth_rxconf *rx_conf,
++ struct rte_mempool *mb_pool);
++void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
++
++#endif /* _AXGBE_RXTX_H_ */
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-09-18-net-axgbe-add-DMA-programming-and-dev-start-and-stop-apis.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-09-18-net-axgbe-add-DMA-programming-and-dev-start-and-stop-apis.patch
new file mode 100644
index 00000000..03c98c50
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-09-18-net-axgbe-add-DMA-programming-and-dev-start-and-stop-apis.patch
@@ -0,0 +1,1027 @@
+From patchwork Fri Mar 9 08:42:25 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 09/18] net/axgbe: add DMA programming and dev start and stop apis
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35828
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-9-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:25 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_dev.c | 844 +++++++++++++++++++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_ethdev.c | 96 +++++
+ 2 files changed, 940 insertions(+)
+
+diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
+index 528241e..a69a078 100644
+--- a/drivers/net/axgbe/axgbe_dev.c
++++ b/drivers/net/axgbe/axgbe_dev.c
+@@ -128,6 +128,13 @@
+ #include "axgbe_ethdev.h"
+ #include "axgbe_common.h"
+ #include "axgbe_phy.h"
++#include "axgbe_rxtx.h"
++
++static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
++{
++ return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
++ ETHER_CRC_LEN + VLAN_HLEN;
++}
+
+ /* query busy bit */
+ static int mdio_complete(struct axgbe_port *pdata)
+@@ -334,6 +341,191 @@ static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+ return 0;
+ }
+
++static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
++{
++ unsigned int max_q_count, q_count;
++ unsigned int reg, reg_val;
++ unsigned int i;
++
++ /* Clear MTL flow control */
++ for (i = 0; i < pdata->rx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
++
++ /* Clear MAC flow control */
++ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
++ q_count = RTE_MIN(pdata->tx_q_count,
++ max_q_count);
++ reg = MAC_Q0TFCR;
++ for (i = 0; i < q_count; i++) {
++ reg_val = AXGMAC_IOREAD(pdata, reg);
++ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
++ AXGMAC_IOWRITE(pdata, reg, reg_val);
++
++ reg += MAC_QTFCR_INC;
++ }
++
++ return 0;
++}
++
++static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
++{
++ unsigned int max_q_count, q_count;
++ unsigned int reg, reg_val;
++ unsigned int i;
++
++ /* Set MTL flow control */
++ for (i = 0; i < pdata->rx_q_count; i++) {
++ unsigned int ehfc = 0;
++
++ /* Flow control thresholds are established */
++ if (pdata->rx_rfd[i])
++ ehfc = 1;
++
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
++ }
++
++ /* Set MAC flow control */
++ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
++ q_count = RTE_MIN(pdata->tx_q_count,
++ max_q_count);
++ reg = MAC_Q0TFCR;
++ for (i = 0; i < q_count; i++) {
++ reg_val = AXGMAC_IOREAD(pdata, reg);
++
++ /* Enable transmit flow control */
++ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
++ /* Set pause time */
++ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
++
++ AXGMAC_IOWRITE(pdata, reg, reg_val);
++
++ reg += MAC_QTFCR_INC;
++ }
++
++ return 0;
++}
++
++static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
++{
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
++
++ return 0;
++}
++
++static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
++{
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
++
++ return 0;
++}
++
++static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
++{
++ if (pdata->tx_pause)
++ axgbe_enable_tx_flow_control(pdata);
++ else
++ axgbe_disable_tx_flow_control(pdata);
++
++ return 0;
++}
++
++static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
++{
++ if (pdata->rx_pause)
++ axgbe_enable_rx_flow_control(pdata);
++ else
++ axgbe_disable_rx_flow_control(pdata);
++
++ return 0;
++}
++
++static void axgbe_config_flow_control(struct axgbe_port *pdata)
++{
++ axgbe_config_tx_flow_control(pdata);
++ axgbe_config_rx_flow_control(pdata);
++
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
++}
++
++static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
++ unsigned int queue,
++ unsigned int q_fifo_size)
++{
++ unsigned int frame_fifo_size;
++ unsigned int rfa, rfd;
++
++ frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
++
++ /* This path deals with just maximum frame sizes which are
++ * limited to a jumbo frame of 9,000 (plus headers, etc.)
++ * so we can never exceed the maximum allowable RFA/RFD
++ * values.
++ */
++ if (q_fifo_size <= 2048) {
++ /* rx_rfd to zero to signal no flow control */
++ pdata->rx_rfa[queue] = 0;
++ pdata->rx_rfd[queue] = 0;
++ return;
++ }
++
++ if (q_fifo_size <= 4096) {
++ /* Between 2048 and 4096 */
++ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
++ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
++ return;
++ }
++
++ if (q_fifo_size <= frame_fifo_size) {
++ /* Between 4096 and max-frame */
++ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
++ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
++ return;
++ }
++
++ if (q_fifo_size <= (frame_fifo_size * 3)) {
++ /* Between max-frame and 3 max-frames,
++ * trigger if we get just over a frame of data and
++ * resume when we have just under half a frame left.
++ */
++ rfa = q_fifo_size - frame_fifo_size;
++ rfd = rfa + (frame_fifo_size / 2);
++ } else {
++ /* Above 3 max-frames - trigger when just over
++ * 2 frames of space available
++ */
++ rfa = frame_fifo_size * 2;
++ rfa += AXGMAC_FLOW_CONTROL_UNIT;
++ rfd = rfa + frame_fifo_size;
++ }
++
++ pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
++ pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
++}
++
++static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
++{
++ unsigned int q_fifo_size;
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++) {
++ q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
++
++ axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
++ }
++}
++
++static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++) {
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
++ pdata->rx_rfa[i]);
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
++ pdata->rx_rfd[i]);
++ }
++}
++
+ static int __axgbe_exit(struct axgbe_port *pdata)
+ {
+ unsigned int count = 2000;
+@@ -366,10 +558,659 @@ static int axgbe_exit(struct axgbe_port *pdata)
+ return __axgbe_exit(pdata);
+ }
+
++static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
++{
++ unsigned int i, count;
++
++ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
++ return 0;
++
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
++
++ /* Poll Until Poll Condition */
++ for (i = 0; i < pdata->tx_q_count; i++) {
++ count = 2000;
++ while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
++ MTL_Q_TQOMR, FTQ))
++ rte_delay_us(500);
++
++ if (!count)
++ return -EBUSY;
++ }
++
++ return 0;
++}
++
++static void axgbe_config_dma_bus(struct axgbe_port *pdata)
++{
++ /* Set enhanced addressing mode */
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
++
++ /* Out standing read/write requests*/
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
++
++ /* Set the System Bus mode */
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
++ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
++}
++
++static void axgbe_config_dma_cache(struct axgbe_port *pdata)
++{
++ unsigned int arcache, awcache, arwcache;
++
++ arcache = 0;
++ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
++ AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
++
++ awcache = 0;
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
++ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
++ AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
++
++ arwcache = 0;
++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
++ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
++ AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
++}
++
++static void axgbe_config_edma_control(struct axgbe_port *pdata)
++{
++ AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
++ AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
++}
++
++static int axgbe_config_osp_mode(struct axgbe_port *pdata)
++{
++ /* Force DMA to operate on second packet before closing descriptors
++ * of first packet
++ */
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
++ txq = pdata->eth_dev->data->tx_queues[i];
++ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
++ pdata->tx_osp_mode);
++ }
++
++ return 0;
++}
++
++static int axgbe_config_pblx8(struct axgbe_port *pdata)
++{
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
++ txq = pdata->eth_dev->data->tx_queues[i];
++ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
++ pdata->pblx8);
++ }
++ return 0;
++}
++
++static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
++{
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
++ txq = pdata->eth_dev->data->tx_queues[i];
++ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
++ pdata->tx_pbl);
++ }
++
++ return 0;
++}
++
++static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
++{
++ struct axgbe_rx_queue *rxq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
++ rxq = pdata->eth_dev->data->rx_queues[i];
++ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
++ pdata->rx_pbl);
++ }
++
++ return 0;
++}
++
++static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
++{
++ struct axgbe_rx_queue *rxq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
++ rxq = pdata->eth_dev->data->rx_queues[i];
++
++ rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
++ RTE_PKTMBUF_HEADROOM;
++ rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
++ ~(AXGBE_RX_BUF_ALIGN - 1);
++
++ if (rxq->buf_size > pdata->rx_buf_size)
++ pdata->rx_buf_size = rxq->buf_size;
++
++ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
++ rxq->buf_size);
++ }
++}
++
++static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
++ unsigned int index, unsigned int val)
++{
++ unsigned int wait;
++
++ if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
++ return -EBUSY;
++
++ AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
++
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
++
++ wait = 1000;
++ while (wait--) {
++ if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
++ return 0;
++
++ rte_delay_us(1500);
++ }
++
++ return -EBUSY;
++}
++
++static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
++{
++ struct rte_eth_rss_conf *rss_conf;
++ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
++ unsigned int *key;
++ int ret;
++
++ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
++
++ if (!rss_conf->rss_key)
++ key = (unsigned int *)&pdata->rss_key;
++ else
++ key = (unsigned int *)&rss_conf->rss_key;
++
++ while (key_regs--) {
++ ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
++ key_regs, *key++);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
++{
++ unsigned int i;
++ int ret;
++
++ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
++ ret = axgbe_write_rss_reg(pdata,
++ AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
++ pdata->rss_table[i]);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
++static int axgbe_enable_rss(struct axgbe_port *pdata)
++{
++ int ret;
++
++ /* Program the hash key */
++ ret = axgbe_write_rss_hash_key(pdata);
++ if (ret)
++ return ret;
++
++ /* Program the lookup table */
++ ret = axgbe_write_rss_lookup_table(pdata);
++ if (ret)
++ return ret;
++
++ /* Set the RSS options */
++ AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
++
++ /* Enable RSS */
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
++
++ return 0;
++}
++
++static void axgbe_rss_options(struct axgbe_port *pdata)
++{
++ struct rte_eth_rss_conf *rss_conf;
++ uint64_t rss_hf;
++
++ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
++ rss_hf = rss_conf->rss_hf;
++
++ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
++ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
++ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
++ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
++ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
++ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
++}
++
++static int axgbe_config_rss(struct axgbe_port *pdata)
++{
++ uint32_t i;
++
++ if (pdata->rss_enable) {
++ /* Initialize RSS hash key and lookup table */
++ uint32_t *key = (uint32_t *)pdata->rss_key;
++
++ for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
++ *key++ = (uint32_t)rte_rand();
++ for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
++ AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
++ i % pdata->eth_dev->data->nb_rx_queues);
++ axgbe_rss_options(pdata);
++ if (axgbe_enable_rss(pdata)) {
++ PMD_DRV_LOG(ERR, "Error in enabling RSS support");
++ return -1;
++ }
++ } else {
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
++ }
++
++ return 0;
++}
++
++static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
++{
++ struct axgbe_tx_queue *txq;
++ unsigned int dma_ch_isr, dma_ch_ier;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
++ txq = pdata->eth_dev->data->tx_queues[i];
++
++ /* Clear all the interrupts which are set */
++ dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
++
++ /* Clear all interrupt enable bits */
++ dma_ch_ier = 0;
++
++ /* Enable following interrupts
++ * NIE - Normal Interrupt Summary Enable
++ * AIE - Abnormal Interrupt Summary Enable
++ * FBEE - Fatal Bus Error Enable
++ */
++ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
++ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
++ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
++
++ /* Enable following Rx interrupts
++ * RBUE - Receive Buffer Unavailable Enable
++ * RIE - Receive Interrupt Enable (unless using
++ * per channel interrupts in edge triggered
++ * mode)
++ */
++ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
++
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
++ }
++}
++
++static void wrapper_tx_desc_init(struct axgbe_port *pdata)
++{
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
++ txq = pdata->eth_dev->data->tx_queues[i];
++ txq->cur = 0;
++ txq->dirty = 0;
++ /* Update the total number of Tx descriptors */
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
++ /* Update the starting address of descriptor ring */
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
++ high32_value(txq->ring_phys_addr));
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
++ low32_value(txq->ring_phys_addr));
++ }
++}
++
++static int wrapper_rx_desc_init(struct axgbe_port *pdata)
++{
++ struct axgbe_rx_queue *rxq;
++ struct rte_mbuf *mbuf;
++ volatile union axgbe_rx_desc *desc;
++ unsigned int i, j;
++
++ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
++ rxq = pdata->eth_dev->data->rx_queues[i];
++
++ /* Initialize software ring entries */
++ rxq->mbuf_alloc = 0;
++ rxq->cur = 0;
++ rxq->dirty = 0;
++ desc = AXGBE_GET_DESC_PT(rxq, 0);
++
++ for (j = 0; j < rxq->nb_desc; j++) {
++ mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
++ if (mbuf == NULL) {
++ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d",
++ (unsigned int)rxq->queue_id, j);
++ axgbe_dev_rx_queue_release(rxq);
++ return -ENOMEM;
++ }
++ rxq->sw_ring[j] = mbuf;
++ /* Mbuf populate */
++ mbuf->next = NULL;
++ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
++ mbuf->nb_segs = 1;
++ mbuf->port = rxq->port_id;
++ desc->read.baddr =
++ rte_cpu_to_le_64(
++ rte_mbuf_data_iova_default(mbuf));
++ rte_wmb();
++ AXGMAC_SET_BITS_LE(desc->read.desc3,
++ RX_NORMAL_DESC3, OWN, 1);
++ rte_wmb();
++ rxq->mbuf_alloc++;
++ desc++;
++ }
++ /* Update the total number of Rx descriptors */
++ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
++ rxq->nb_desc - 1);
++ /* Update the starting address of descriptor ring */
++ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
++ high32_value(rxq->ring_phys_addr));
++ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
++ low32_value(rxq->ring_phys_addr));
++ /* Update the Rx Descriptor Tail Pointer */
++ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
++ low32_value(rxq->ring_phys_addr +
++ (rxq->nb_desc - 1) *
++ sizeof(union axgbe_rx_desc)));
++ }
++ return 0;
++}
++
++static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
++{
++ unsigned int i;
++
++ /* Set Tx to weighted round robin scheduling algorithm */
++ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
++
++ /* Set Tx traffic classes to use WRR algorithm with equal weights */
++ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
++ MTL_TSA_ETS);
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
++ }
++
++ /* Set Rx to strict priority algorithm */
++ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
++}
++
++static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
++
++ return 0;
++}
++
++static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
++
++ return 0;
++}
++
++static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
++ unsigned int val)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
++
++ return 0;
++}
++
++static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
++ unsigned int val)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
++
++ return 0;
++}
++
++/*Distrubting fifo size */
++static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
++{
++ unsigned int fifo_size;
++ unsigned int q_fifo_size;
++ unsigned int p_fifo, i;
++
++ fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
++ pdata->hw_feat.rx_fifo_size);
++ q_fifo_size = fifo_size / pdata->rx_q_count;
++
++ /* Calculate the fifo setting by dividing the queue's fifo size
++ * by the fifo allocation increment (with 0 representing the
++ * base allocation increment so decrement the result
++ * by 1).
++ */
++ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
++ if (p_fifo)
++ p_fifo--;
++
++ for (i = 0; i < pdata->rx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
++ pdata->fifo = p_fifo;
++
++ /*Calculate and config Flow control threshold*/
++ axgbe_calculate_flow_control_threshold(pdata);
++ axgbe_config_flow_control_threshold(pdata);
++}
++
++static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
++{
++ unsigned int fifo_size;
++ unsigned int q_fifo_size;
++ unsigned int p_fifo, i;
++
++ fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
++ pdata->hw_feat.tx_fifo_size);
++ q_fifo_size = fifo_size / pdata->tx_q_count;
++
++ /* Calculate the fifo setting by dividing the queue's fifo size
++ * by the fifo allocation increment (with 0 representing the
++ * base allocation increment so decrement the result
++ * by 1).
++ */
++ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
++ if (p_fifo)
++ p_fifo--;
++
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
++}
++
++static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
++{
++ unsigned int qptc, qptc_extra, queue;
++ unsigned int i, j, reg, reg_val;
++
++ /* Map the MTL Tx Queues to Traffic Classes
++ * Note: Tx Queues >= Traffic Classes
++ */
++ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
++ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
++
++ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
++ for (j = 0; j < qptc; j++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
++ Q2TCMAP, i);
++ if (i < qptc_extra)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
++ Q2TCMAP, i);
++ }
++
++ if (pdata->rss_enable) {
++ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
++ reg = MTL_RQDCM0R;
++ reg_val = 0;
++ for (i = 0; i < pdata->rx_q_count;) {
++ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
++
++ if ((i % MTL_RQDCM_Q_PER_REG) &&
++ (i != pdata->rx_q_count))
++ continue;
++
++ AXGMAC_IOWRITE(pdata, reg, reg_val);
++
++ reg += MTL_RQDCM_INC;
++ reg_val = 0;
++ }
++ }
++}
++
++static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
++{
++ unsigned int mtl_q_isr;
++ unsigned int q_count, i;
++
++ q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
++ for (i = 0; i < q_count; i++) {
++ /* Clear all the interrupts which are set */
++ mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
++ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
++
++ /* No MTL interrupts to be enabled */
++ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
++ }
++}
++
++static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
++{
++ unsigned int mac_addr_hi, mac_addr_lo;
++
++ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
++ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
++ (addr[1] << 8) | (addr[0] << 0);
++
++ AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
++ AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
++
++ return 0;
++}
++
++static void axgbe_config_mac_address(struct axgbe_port *pdata)
++{
++ axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
++}
++
++static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
++{
++ unsigned int val;
++
++ val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
++ val = 1;
++
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
++}
++
++static void axgbe_config_mac_speed(struct axgbe_port *pdata)
++{
++ axgbe_set_speed(pdata, pdata->phy_speed);
++}
++
++static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
++{
++ if (pdata->rx_csum_enable)
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
++ else
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
++}
++
++static int axgbe_init(struct axgbe_port *pdata)
++{
++ int ret;
++
++ /* Flush Tx queues */
++ ret = axgbe_flush_tx_queues(pdata);
++ if (ret)
++ return ret;
++ /* Initialize DMA related features */
++ axgbe_config_dma_bus(pdata);
++ axgbe_config_dma_cache(pdata);
++ axgbe_config_edma_control(pdata);
++ axgbe_config_osp_mode(pdata);
++ axgbe_config_pblx8(pdata);
++ axgbe_config_tx_pbl_val(pdata);
++ axgbe_config_rx_pbl_val(pdata);
++ axgbe_config_rx_buffer_size(pdata);
++ axgbe_config_rss(pdata);
++ wrapper_tx_desc_init(pdata);
++ ret = wrapper_rx_desc_init(pdata);
++ if (ret)
++ return ret;
++ axgbe_enable_dma_interrupts(pdata);
++
++ /* Initialize MTL related features */
++ axgbe_config_mtl_mode(pdata);
++ axgbe_config_queue_mapping(pdata);
++ axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
++ axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
++ axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
++ axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
++ axgbe_config_tx_fifo_size(pdata);
++ axgbe_config_rx_fifo_size(pdata);
++
++ axgbe_enable_mtl_interrupts(pdata);
++
++ /* Initialize MAC related features */
++ axgbe_config_mac_address(pdata);
++ axgbe_config_jumbo_enable(pdata);
++ axgbe_config_flow_control(pdata);
++ axgbe_config_mac_speed(pdata);
++ axgbe_config_checksum_offload(pdata);
++
++ return 0;
++}
++
+ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+ {
+ hw_if->exit = axgbe_exit;
++ hw_if->config_flow_control = axgbe_config_flow_control;
+
++ hw_if->init = axgbe_init;
+
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+@@ -379,4 +1220,7 @@ void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
++ /* For FLOW ctrl */
++ hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
++ hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+ }
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index ee1e48d..9065a44 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -132,6 +132,9 @@
+
+ static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+ static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
++static int axgbe_dev_configure(struct rte_eth_dev *dev);
++static int axgbe_dev_start(struct rte_eth_dev *dev);
++static void axgbe_dev_stop(struct rte_eth_dev *dev);
+ static void axgbe_dev_interrupt_handler(void *param);
+ static void axgbe_dev_close(struct rte_eth_dev *dev);
+ static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+@@ -186,6 +189,9 @@ static const struct rte_eth_desc_lim tx_desc_lim = {
+ };
+
+ static const struct eth_dev_ops axgbe_eth_dev_ops = {
++ .dev_configure = axgbe_dev_configure,
++ .dev_start = axgbe_dev_start,
++ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+@@ -194,6 +200,13 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .tx_queue_release = axgbe_dev_tx_queue_release,
+ };
+
++static int axgbe_phy_reset(struct axgbe_port *pdata)
++{
++ pdata->phy_link = -1;
++ pdata->phy_speed = SPEED_UNKNOWN;
++ return pdata->phy_if.phy_reset(pdata);
++}
++
+ /*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+@@ -218,6 +231,89 @@ axgbe_dev_interrupt_handler(void *param)
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ }
+
++/*
++ * Configure device link speed and setup link.
++ * It returns 0 on success.
++ */
++static int
++axgbe_dev_configure(struct rte_eth_dev *dev)
++{
++ struct axgbe_port *pdata = dev->data->dev_private;
++ /* Checksum offload to hardware */
++ pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
++ DEV_RX_OFFLOAD_CHECKSUM;
++ return 0;
++}
++
++static int
++axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
++{
++ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
++
++ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
++ pdata->rss_enable = 1;
++ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
++ pdata->rss_enable = 0;
++ else
++ return -1;
++ return 0;
++}
++
++static int
++axgbe_dev_start(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
++ int ret;
++
++ /* Multiqueue RSS */
++ ret = axgbe_dev_rx_mq_config(dev);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "Unable to config RX MQ");
++ return ret;
++ }
++ ret = axgbe_phy_reset(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "phy reset failed");
++ return ret;
++ }
++ ret = pdata->hw_if.init(pdata);
++ if (ret) {
++ PMD_DRV_LOG(ERR, "dev_init failed");
++ return ret;
++ }
++
++ /* enable uio/vfio intr/eventfd mapping */
++ rte_intr_enable(&pdata->pci_dev->intr_handle);
++
++ /* phy start*/
++ pdata->phy_if.phy_start(pdata);
++
++ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
++ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
++ return 0;
++}
++
++/* Stop device: disable rx and tx functions to allow for reconfiguring. */
++static void
++axgbe_dev_stop(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ rte_intr_disable(&pdata->pci_dev->intr_handle);
++
++ if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
++ return;
++
++ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
++
++ pdata->phy_if.phy_stop(pdata);
++ pdata->hw_if.exit(pdata);
++ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
++ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
++}
++
+ /* Clear all resources like TX/RX queues. */
+ static void
+ axgbe_dev_close(struct rte_eth_dev *dev)
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-10-18-net-axgbe-add-transmit-and-receive-data-path-apis.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-10-18-net-axgbe-add-transmit-and-receive-data-path-apis.patch
new file mode 100644
index 00000000..8cd1f791
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-10-18-net-axgbe-add-transmit-and-receive-data-path-apis.patch
@@ -0,0 +1,813 @@
+From patchwork Fri Mar 9 08:42:26 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 10/18] net/axgbe: add transmit and receive data path apis
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35829
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-10-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:26 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/Makefile | 1 +
+ drivers/net/axgbe/axgbe_ethdev.c | 22 +-
+ drivers/net/axgbe/axgbe_rxtx.c | 429 +++++++++++++++++++++++++++++++++
+ drivers/net/axgbe/axgbe_rxtx.h | 19 ++
+ drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 215 +++++++++++++++++
+ 5 files changed, 685 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index d030530..58eb41e 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -147,5 +147,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+ SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
++SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 9065a44..ae78e09 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -224,9 +224,22 @@ axgbe_dev_interrupt_handler(void *param)
+ {
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct axgbe_port *pdata = dev->data->dev_private;
++ unsigned int dma_isr, dma_ch_isr;
+
+ pdata->phy_if.an_isr(pdata);
+-
++ /*DMA related interrupts*/
++ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
++ if (dma_isr) {
++ if (dma_isr & 1) {
++ dma_ch_isr =
++ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
++ pdata->rx_queues[0],
++ DMA_CH_SR);
++ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
++ pdata->rx_queues[0],
++ DMA_CH_SR, dma_ch_isr);
++ }
++ }
+ /* Enable interrupts since disabled after generation*/
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ }
+@@ -288,6 +301,8 @@ axgbe_dev_start(struct rte_eth_dev *dev)
+
+ /* phy start*/
+ pdata->phy_if.phy_start(pdata);
++ axgbe_dev_enable_tx(dev);
++ axgbe_dev_enable_rx(dev);
+
+ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+@@ -307,6 +322,8 @@ axgbe_dev_stop(struct rte_eth_dev *dev)
+ return;
+
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
++ axgbe_dev_disable_tx(dev);
++ axgbe_dev_disable_rx(dev);
+
+ pdata->phy_if.phy_stop(pdata);
+ pdata->hw_if.exit(pdata);
+@@ -550,6 +567,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+ eth_dev->dev_ops = &axgbe_eth_dev_ops;
++ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+@@ -694,6 +712,8 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
++ eth_dev->rx_pkt_burst = NULL;
++ eth_dev->tx_pkt_burst = NULL;
+ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
+index 64065e8..c616fc1 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.c
++++ b/drivers/net/axgbe/axgbe_rxtx.c
+@@ -235,6 +235,197 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ return 0;
+ }
+
++static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
++ unsigned int queue)
++{
++ unsigned int rx_status;
++ unsigned long rx_timeout;
++
++ /* The Rx engine cannot be stopped if it is actively processing
++ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
++ * wait forever though...
++ */
++ rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
++ rte_get_timer_hz());
++
++ while (time_before(rte_get_timer_cycles(), rx_timeout)) {
++ rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
++ if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
++ (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
++ break;
++
++ rte_delay_us(900);
++ }
++
++ if (!time_before(rte_get_timer_cycles(), rx_timeout))
++ PMD_DRV_LOG(ERR,
++ "timed out waiting for Rx queue %u to empty\n",
++ queue);
++}
++
++void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
++{
++ struct axgbe_rx_queue *rxq;
++ struct axgbe_port *pdata = dev->data->dev_private;
++ unsigned int i;
++
++ /* Disable MAC Rx */
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
++
++ /* Prepare for Rx DMA channel stop */
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ axgbe_prepare_rx_stop(pdata, i);
++ }
++ /* Disable each Rx queue */
++ AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ /* Disable Rx DMA channel */
++ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
++ }
++}
++
++void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
++{
++ struct axgbe_rx_queue *rxq;
++ struct axgbe_port *pdata = dev->data->dev_private;
++ unsigned int i;
++ unsigned int reg_val = 0;
++
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ /* Enable Rx DMA channel */
++ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
++ }
++
++ reg_val = 0;
++ for (i = 0; i < pdata->rx_q_count; i++)
++ reg_val |= (0x02 << (i << 1));
++ AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
++
++ /* Enable MAC Rx */
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
++ /* Frame is forwarded after stripping CRC to application*/
++ if (pdata->crc_strip_enable) {
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
++ }
++ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
++}
++
++/* Rx function one to one refresh */
++uint16_t
++axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
++ uint16_t nb_pkts)
++{
++ PMD_INIT_FUNC_TRACE();
++ uint16_t nb_rx = 0;
++ struct axgbe_rx_queue *rxq = rx_queue;
++ volatile union axgbe_rx_desc *desc;
++ uint64_t old_dirty = rxq->dirty;
++ struct rte_mbuf *mbuf, *tmbuf;
++ unsigned int err;
++ uint32_t error_status;
++ uint16_t idx, pidx, pkt_len;
++
++ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
++ while (nb_rx < nb_pkts) {
++ if (unlikely(idx == rxq->nb_desc))
++ idx = 0;
++
++ desc = &rxq->desc[idx];
++
++ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
++ break;
++ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
++ if (unlikely(!tmbuf)) {
++ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
++ " queue_id = %u\n",
++ (unsigned int)rxq->port_id,
++ (unsigned int)rxq->queue_id);
++ rte_eth_devices[
++ rxq->port_id].data->rx_mbuf_alloc_failed++;
++ break;
++ }
++ pidx = idx + 1;
++ if (unlikely(pidx == rxq->nb_desc))
++ pidx = 0;
++
++ rte_prefetch0(rxq->sw_ring[pidx]);
++ if ((pidx & 0x3) == 0) {
++ rte_prefetch0(&rxq->desc[pidx]);
++ rte_prefetch0(&rxq->sw_ring[pidx]);
++ }
++
++ mbuf = rxq->sw_ring[idx];
++ /* Check for any errors and free mbuf*/
++ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
++ RX_NORMAL_DESC3, ES);
++ error_status = 0;
++ if (unlikely(err)) {
++ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
++ if ((error_status != AXGBE_L3_CSUM_ERR) &&
++ (error_status != AXGBE_L4_CSUM_ERR)) {
++ rxq->errors++;
++ rte_pktmbuf_free(mbuf);
++ goto err_set;
++ }
++ }
++ if (rxq->pdata->rx_csum_enable) {
++ mbuf->ol_flags = 0;
++ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
++ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
++ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
++ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
++ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
++ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
++ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
++ } else if (
++ unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
++ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
++ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
++ }
++ }
++ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
++ /* Get the RSS hash */
++ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
++ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
++ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
++ PL) - rxq->crc_len;
++ /* Mbuf populate */
++ mbuf->next = NULL;
++ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
++ mbuf->nb_segs = 1;
++ mbuf->port = rxq->port_id;
++ mbuf->pkt_len = pkt_len;
++ mbuf->data_len = pkt_len;
++ rxq->bytes += pkt_len;
++ rx_pkts[nb_rx++] = mbuf;
++err_set:
++ rxq->cur++;
++ rxq->sw_ring[idx++] = tmbuf;
++ desc->read.baddr =
++ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
++ memset((void *)(&desc->read.desc2), 0, 8);
++ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
++ rxq->dirty++;
++ }
++ rxq->pkts += nb_rx;
++ if (rxq->dirty != old_dirty) {
++ rte_wmb();
++ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
++ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
++ low32_value(rxq->ring_phys_addr +
++ (idx * sizeof(union axgbe_rx_desc))));
++ }
++
++ return nb_rx;
++}
++
+ /* Tx Apis */
+ static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
+ {
+@@ -296,6 +487,10 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ txq->free_thresh = (txq->nb_desc >> 1);
+ txq->free_batch_cnt = txq->free_thresh;
+
++ /* In vector_tx path threshold should be multiple of queue_size*/
++ if (txq->nb_desc % txq->free_thresh != 0)
++ txq->vector_disable = 1;
++
+ if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
+ ETH_TXQ_FLAGS_NOOFFLOADS) {
+ txq->vector_disable = 1;
+@@ -333,9 +528,243 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ if (!pdata->tx_queues)
+ pdata->tx_queues = dev->data->tx_queues;
+
++ if (txq->vector_disable)
++ dev->tx_pkt_burst = &axgbe_xmit_pkts;
++ else
++ dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
++
+ return 0;
+ }
+
++static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
++ unsigned int queue)
++{
++ unsigned int tx_status;
++ unsigned long tx_timeout;
++
++ /* The Tx engine cannot be stopped if it is actively processing
++ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
++ * wait forever though...
++ */
++ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
++ rte_get_timer_hz());
++ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
++ tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
++ if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
++ (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
++ break;
++
++ rte_delay_us(900);
++ }
++
++ if (!time_before(rte_get_timer_cycles(), tx_timeout))
++ PMD_DRV_LOG(ERR,
++ "timed out waiting for Tx queue %u to empty\n",
++ queue);
++}
++
++static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
++ unsigned int queue)
++{
++ unsigned int tx_dsr, tx_pos, tx_qidx;
++ unsigned int tx_status;
++ unsigned long tx_timeout;
++
++ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
++ return axgbe_txq_prepare_tx_stop(pdata, queue);
++
++ /* Calculate the status register to read and the position within */
++ if (queue < DMA_DSRX_FIRST_QUEUE) {
++ tx_dsr = DMA_DSR0;
++ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
++ } else {
++ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
++
++ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
++ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
++ DMA_DSRX_TPS_START;
++ }
++
++ /* The Tx engine cannot be stopped if it is actively processing
++ * descriptors. Wait for the Tx engine to enter the stopped or
++ * suspended state. Don't wait forever though...
++ */
++ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
++ rte_get_timer_hz());
++ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
++ tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
++ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
++ if ((tx_status == DMA_TPS_STOPPED) ||
++ (tx_status == DMA_TPS_SUSPENDED))
++ break;
++
++ rte_delay_us(900);
++ }
++
++ if (!time_before(rte_get_timer_cycles(), tx_timeout))
++ PMD_DRV_LOG(ERR,
++ "timed out waiting for Tx DMA channel %u to stop\n",
++ queue);
++}
++
++void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
++{
++ struct axgbe_tx_queue *txq;
++ struct axgbe_port *pdata = dev->data->dev_private;
++ unsigned int i;
++
++ /* Prepare for stopping DMA channel */
++ for (i = 0; i < pdata->tx_q_count; i++) {
++ txq = dev->data->tx_queues[i];
++ axgbe_prepare_tx_stop(pdata, i);
++ }
++ /* Disable MAC Tx */
++ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
++ /* Disable each Tx queue*/
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
++ 0);
++ /* Disable each Tx DMA channel */
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
++ }
++}
++
++void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
++{
++ struct axgbe_tx_queue *txq;
++ struct axgbe_port *pdata = dev->data->dev_private;
++ unsigned int i;
++
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++ /* Enable Tx DMA channel */
++ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
++ }
++ /* Enable Tx queue*/
++ for (i = 0; i < pdata->tx_q_count; i++)
++ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
++ MTL_Q_ENABLED);
++ /* Enable MAC Tx */
++ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
++}
++
++/* Free Tx conformed mbufs */
++static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
++{
++ volatile struct axgbe_tx_desc *desc;
++ uint16_t idx;
++
++ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
++ while (txq->cur != txq->dirty) {
++ if (unlikely(idx == txq->nb_desc))
++ idx = 0;
++ desc = &txq->desc[idx];
++ /* Check for ownership */
++ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
++ return;
++ memset((void *)&desc->desc2, 0, 8);
++ /* Free mbuf */
++ rte_pktmbuf_free(txq->sw_ring[idx]);
++ txq->sw_ring[idx++] = NULL;
++ txq->dirty++;
++ }
++}
++
++/* Tx Descriptor formation
++ * Considering each mbuf requires one desc
++ * mbuf is linear
++ */
++static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
++ struct rte_mbuf *mbuf)
++{
++ volatile struct axgbe_tx_desc *desc;
++ uint16_t idx;
++ uint64_t mask;
++
++ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
++ desc = &txq->desc[idx];
++
++ /* Update buffer address and length */
++ desc->baddr = rte_mbuf_data_iova(mbuf);
++ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
++ mbuf->pkt_len);
++ /* Total msg length to transmit */
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
++ mbuf->pkt_len);
++ /* Mark it as First and Last Descriptor */
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
++ /* Mark it as a NORMAL descriptor */
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
++ /* configure h/w Offload */
++ mask = mbuf->ol_flags & PKT_TX_L4_MASK;
++ if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
++ else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
++ rte_wmb();
++
++ /* Set OWN bit */
++ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
++ rte_wmb();
++
++ /* Save mbuf */
++ txq->sw_ring[idx] = mbuf;
++ /* Update current index*/
++ txq->cur++;
++ /* Update stats */
++ txq->bytes += mbuf->pkt_len;
++
++ return 0;
++}
++
++/* Eal supported tx wrapper*/
++uint16_t
++axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
++ uint16_t nb_pkts)
++{
++ PMD_INIT_FUNC_TRACE();
++
++ if (unlikely(nb_pkts == 0))
++ return nb_pkts;
++
++ struct axgbe_tx_queue *txq;
++ uint16_t nb_desc_free;
++ uint16_t nb_pkt_sent = 0;
++ uint16_t idx;
++ uint32_t tail_addr;
++ struct rte_mbuf *mbuf;
++
++ txq = (struct axgbe_tx_queue *)tx_queue;
++ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
++
++ if (unlikely(nb_desc_free <= txq->free_thresh)) {
++ axgbe_xmit_cleanup(txq);
++ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
++ if (unlikely(nb_desc_free == 0))
++ return 0;
++ }
++ nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
++ while (nb_pkts--) {
++ mbuf = *tx_pkts++;
++ if (axgbe_xmit_hw(txq, mbuf))
++ goto out;
++ nb_pkt_sent++;
++ }
++out:
++ /* Sync read and write */
++ rte_mb();
++ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
++ tail_addr = low32_value(txq->ring_phys_addr +
++ idx * sizeof(struct axgbe_tx_desc));
++ /* Update tail reg with next immediate address to kick Tx DMA channel*/
++ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
++ txq->pkts += nb_pkt_sent;
++ return nb_pkt_sent;
++}
++
+ void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
+ {
+ PMD_INIT_FUNC_TRACE();
+diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
+index 0d712f7..45aaf89 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.h
++++ b/drivers/net/axgbe/axgbe_rxtx.h
+@@ -278,12 +278,31 @@ void axgbe_dev_tx_queue_release(void *txq);
+ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
++void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
++void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
++int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
++int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
++
++uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
++ uint16_t nb_pkts);
++uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
++ uint16_t nb_pkts);
++
+
+ void axgbe_dev_rx_queue_release(void *rxq);
+ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
++void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
++void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
++int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
++int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
++uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
++ uint16_t nb_pkts);
++uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
++ struct rte_mbuf **rx_pkts,
++ uint16_t nb_pkts);
+ void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+ #endif /* _AXGBE_RXTX_H_ */
+diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+new file mode 100644
+index 0000000..c2bd5da
+--- /dev/null
++++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+@@ -0,0 +1,215 @@
++/*-
++ * Copyright(c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2017 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
++ * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ *
++ * Copyright (c) 2013 Synopsys, Inc.
++ *
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include "axgbe_ethdev.h"
++#include "axgbe_rxtx.h"
++#include "axgbe_phy.h"
++
++#include <rte_time.h>
++#include <rte_mempool.h>
++#include <rte_mbuf.h>
++
++/* Useful to avoid shifting for every descriptor prepration*/
++#define TX_DESC_CTRL_FLAGS 0xb000000000000000
++#define TX_FREE_BULK 8
++#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)
++
++static inline void
++axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
++ struct rte_mbuf *mbuf)
++{
++ __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
++ TX_DESC_CTRL_FLAGS | mbuf->data_len,
++ mbuf->buf_iova
++ + mbuf->data_off);
++ _mm_store_si128((__m128i *)desc, descriptor);
++}
++
++static void
++axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)
++{
++ volatile struct axgbe_tx_desc *desc;
++ int idx, i;
++
++ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt
++ - 1);
++ desc = &txq->desc[idx];
++ if (desc->desc3 & AXGBE_DESC_OWN)
++ return;
++ /* memset avoided for desc ctrl fields since in vec_tx path
++ * all 128 bits are populated
++ */
++ for (i = 0; i < txq->free_batch_cnt; i++, idx--)
++ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
++
++
++ txq->dirty += txq->free_batch_cnt;
++ txq->nb_desc_free += txq->free_batch_cnt;
++}
++
++uint16_t
++axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
++ uint16_t nb_pkts)
++{
++ PMD_INIT_FUNC_TRACE();
++
++ struct axgbe_tx_queue *txq;
++ uint16_t idx, nb_commit, loop, i;
++ uint32_t tail_addr;
++
++ txq = (struct axgbe_tx_queue *)tx_queue;
++ if (txq->nb_desc_free < txq->free_thresh) {
++ axgbe_xmit_cleanup_vec(txq);
++ if (unlikely(txq->nb_desc_free == 0))
++ return 0;
++ }
++ nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);
++ nb_commit = nb_pkts;
++ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
++ loop = txq->nb_desc - idx;
++ if (nb_commit >= loop) {
++ for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {
++ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
++ txq->sw_ring[idx] = *tx_pkts;
++ }
++ nb_commit -= loop;
++ idx = 0;
++ }
++ for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {
++ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
++ txq->sw_ring[idx] = *tx_pkts;
++ }
++ txq->cur += nb_pkts;
++ tail_addr = (uint32_t)(txq->ring_phys_addr +
++ idx * sizeof(struct axgbe_tx_desc));
++ /* Update tail reg with next immediate address to kick Tx DMA channel*/
++ rte_write32(tail_addr, (void *)txq->dma_tail_reg);
++ txq->pkts += nb_pkts;
++ txq->nb_desc_free -= nb_pkts;
++
++ return nb_pkts;
++}
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-11-18-doc-add-documents-for-AMD-axgbe-Ethernet-PMD.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-11-18-doc-add-documents-for-AMD-axgbe-Ethernet-PMD.patch
new file mode 100644
index 00000000..e1490a40
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-11-18-doc-add-documents-for-AMD-axgbe-Ethernet-PMD.patch
@@ -0,0 +1,272 @@
+From patchwork Fri Mar 9 08:42:27 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,11/18] doc: add documents for AMD axgbe Ethernet PMD
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35830
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-11-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:27 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/axgbe.rst | 211 +++++++++++++++++++++++++++++++++++++
+ doc/guides/nics/features/axgbe.ini | 14 +++
+ doc/guides/nics/index.rst | 1 +
+ 3 files changed, 226 insertions(+)
+ create mode 100644 doc/guides/nics/axgbe.rst
+ create mode 100644 doc/guides/nics/features/axgbe.ini
+
+diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst
+new file mode 100644
+index 0000000..e9bebb8
+--- /dev/null
++++ b/doc/guides/nics/axgbe.rst
+@@ -0,0 +1,211 @@
++.. Copyright (c) 2017 Advanced Micro Devices, Inc.
++ All rights reserved.
++
++ AMD 10Gb Ethernet driver
++
++ This file is available to you under your choice of the following two
++ licenses:
++
++ License 1: GPLv2
++
++ Copyright (c) 2017 Advanced Micro Devices, Inc.
++
++ This file is free software; you may copy, redistribute and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation, either version 2 of the License, or (at
++ your option) any later version.
++
++ This file is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program. If not, see <http://www.gnu.org/licenses/>.
++
++ This file incorporates work covered by the following copyright and
++ permission notice:
++
++ Copyright (c) 2013 Synopsys, Inc.
++
++ The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ Inc. unless otherwise expressly agreed to in writing between Synopsys
++ and you.
++
++ The Software IS NOT an item of Licensed Software or Licensed Product
++ under any End User Software License Agreement or Agreement for Licensed
++ Product with Synopsys or any supplement thereto. Permission is hereby
++ granted, free of charge, to any person obtaining a copy of this software
++ annotated with this license and the Software, to deal in the Software
++ without restriction, including without limitation the rights to use,
++ copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ of the Software, and to permit persons to whom the Software is furnished
++ to do so, subject to the following conditions:
++
++ The above copyright notice and this permission notice shall be included
++ in all copies or substantial portions of the Software.
++
++ THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ THE POSSIBILITY OF SUCH DAMAGE.
++
++ License 2: Modified BSD
++
++ Copyright (c) 2017 Advanced Micro Devices, Inc.
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ * Redistributions of source code must retain the above copyright
++ notice, this list of conditions and the following disclaimer.
++ * Redistributions in binary form must reproduce the above copyright
++ notice, this list of conditions and the following disclaimer in the
++ documentation and/or other materials provided with the distribution.
++ * Neither the name of Advanced Micro Devices, Inc. nor the
++ names of its contributors may be used to endorse or promote products
++ derived from this software without specific prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++ This file incorporates work covered by the following copyright and
++ permission notice:
++
++ Copyright (c) 2013 Synopsys, Inc.
++
++ The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ Inc. unless otherwise expressly agreed to in writing between Synopsys
++ and you.
++
++ The Software IS NOT an item of Licensed Software or Licensed Product
++ under any End User Software License Agreement or Agreement for Licensed
++ Product with Synopsys or any supplement thereto. Permission is hereby
++ granted, free of charge, to any person obtaining a copy of this software
++ annotated with this license and the Software, to deal in the Software
++ without restriction, including without limitation the rights to use,
++ copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ of the Software, and to permit persons to whom the Software is furnished
++ to do so, subject to the following conditions:
++
++ The above copyright notice and this permission notice shall be included
++ in all copies or substantial portions of the Software.
++
++ THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ THE POSSIBILITY OF SUCH DAMAGE.
++
++AXGBE Poll Mode Driver
++======================
++
++The AXGBE poll mode driver library (**librte_pmd_axgbe**) implements support
++for AMD 10 Gbps family of adapters. It is compiled and tested in standard linux distro like Ubuntu.
++
++Supported Features
++------------------
++
++AXGBE PMD has support for:
++
++- Base L2 features
++- TSS (Transmit Side Scaling)
++- RSS (Receive Side Scaling)
++- Checksum offload
++- Jumbo Frame upto 9K
++
++
++Supported Chipsets and NICs
++---------------------------
++- AMD 10 Gigabit Ethernet Controller
++
++
++Configuration Information
++-------------------------
++
++The following options can be modified in the ``.config`` file. Please note that
++enabling debugging options may affect system performance.
++
++- ``CONFIG_RTE_LIBRTE_AXGBE_PMD`` (default **n**)
++
++ Toggle compilation of axgbe driver. To use axgbe PMD set this config parameter
++ to 'y'.
++
++- ``CONFIG_RTE_LIBRTE_AXGBE_DEBUG_INIT`` (default **n**)
++
++ Toggle display of initialization related messages.
++
++
++Building DPDK
++-------------
++
++See the :ref:`DPDK Getting Started Guide for Linux <linux_gsg>` for
++instructions on how to build DPDK.
++
++By default the AXGBE PMD library will be built into the DPDK library.
++
++For configuring and using UIO frameworks, please also refer :ref:`the
++documentation that comes with DPDK suite <linux_gsg>`.
++
++
++Prerequisites and Pre-conditions
++--------------------------------
++- Prepare the system as recommended by DPDK suite.
++
++- AXGBE PMD works only with legacy interrupts, so load igb_uio module in legacy interrupt mode
++ using module params.
++
++- Bind the intended AMD device to igb_uio module.
++
++Now system is ready to run DPDK application.
++
++
++Usage Example
++-------------
++
++Refer to the document :ref:`compiling and testing a PMD for a NIC <pmd_build_and_test>`
++for details.
++
++Example output:
++
++.. code-block:: console
++
++ [...]
++ EAL: PCI device 0000:02:00.4 on NUMA socket 0
++ EAL: probe driver: 1022:1458 net_axgbe
++ Interactive-mode selected
++ USER1: create a new mbuf pool <mbuf_pool_socket_0>: n=171456, size=2176, socket=0
++ USER1: create a new mbuf pool <mbuf_pool_socket_1>: n=171456, size=2176, socket=1
++ USER1: create a new mbuf pool <mbuf_pool_socket_2>: n=171456, size=2176, socket=2
++ USER1: create a new mbuf pool <mbuf_pool_socket_3>: n=171456, size=2176, socket=3
++ Configuring Port 0 (socket 0)
++ Port 0: 00:00:1A:1C:6A:17
++ Checking link statuses...
++ Port 0 Link Up - speed 10000 Mbps - full-duplex
++ Done
++ testpmd>
+diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
+new file mode 100644
+index 0000000..f644128
+--- /dev/null
++++ b/doc/guides/nics/features/axgbe.ini
+@@ -0,0 +1,14 @@
++;
++; Supported features of the 'axgbe' network poll mode driver.
++;
++; Refer to default.ini for the full list of available PMD features.
++;
++[Features]
++Speed capabilities = Y
++Jumbo frame = Y
++RSS hash = Y
++CRC offload = Y
++L3 checksum offload = Y
++L4 checksum offload = Y
++Linux UIO = Y
++x86-64 = Y
+diff --git a/doc/guides/nics/index.rst b/doc/guides/nics/index.rst
+index 59419f4..0c90aa9 100644
+--- a/doc/guides/nics/index.rst
++++ b/doc/guides/nics/index.rst
+@@ -13,6 +13,7 @@ Network Interface Controller Drivers
+ build_and_test
+ ark
+ avp
++ axgbe
+ bnx2x
+ bnxt
+ cxgbe
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-12-18-net-axgbe-add-link-status-update.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-12-18-net-axgbe-add-link-status-update.patch
new file mode 100644
index 00000000..68cb5f36
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-12-18-net-axgbe-add-link-status-update.patch
@@ -0,0 +1,90 @@
+From patchwork Fri Mar 9 08:42:28 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,12/18] net/axgbe: add link status update
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35831
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-12-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:28 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/features/axgbe.ini | 1 +
+ drivers/net/axgbe/axgbe_ethdev.c | 33 +++++++++++++++++++++++++++++++++
+ 2 files changed, 34 insertions(+)
+
+diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
+index f644128..2dbff18 100644
+--- a/doc/guides/nics/features/axgbe.ini
++++ b/doc/guides/nics/features/axgbe.ini
+@@ -5,6 +5,7 @@
+ ;
+ [Features]
+ Speed capabilities = Y
++Link status = Y
+ Jumbo frame = Y
+ RSS hash = Y
+ CRC offload = Y
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index ae78e09..ebae6cc 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -137,6 +137,8 @@ static int axgbe_dev_start(struct rte_eth_dev *dev);
+ static void axgbe_dev_stop(struct rte_eth_dev *dev);
+ static void axgbe_dev_interrupt_handler(void *param);
+ static void axgbe_dev_close(struct rte_eth_dev *dev);
++static int axgbe_dev_link_update(struct rte_eth_dev *dev,
++ int wait_to_complete);
+ static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+@@ -193,6 +195,7 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
++ .link_update = axgbe_dev_link_update,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+@@ -338,6 +341,36 @@ axgbe_dev_close(struct rte_eth_dev *dev)
+ axgbe_dev_clear_queues(dev);
+ }
+
++/* return 0 means link status changed, -1 means not changed */
++static int
++axgbe_dev_link_update(struct rte_eth_dev *dev,
++ int wait_to_complete __rte_unused)
++{
++ PMD_INIT_FUNC_TRACE();
++ rte_delay_ms(800);
++
++ struct axgbe_port *pdata = dev->data->dev_private;
++ int old_link_status = dev->data->dev_link.link_status;
++
++ pdata->phy_if.phy_status(pdata);
++
++ dev->data->dev_link.link_speed = pdata->phy_speed;
++ switch (pdata->phy.duplex) {
++ case DUPLEX_FULL:
++ dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
++ break;
++ case DUPLEX_HALF:
++ dev->data->dev_link.link_duplex = ETH_LINK_HALF_DUPLEX;
++ break;
++ }
++ dev->data->dev_link.link_autoneg = !(dev->data->dev_conf.link_speeds &
++ ETH_LINK_SPEED_FIXED);
++ dev->data->dev_link.link_status = pdata->phy_link;
++
++ return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
++}
++
++
+ static void
+ axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-13-18-net-axgbe-add-configure-flow-control-while-link-adjustment.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-13-18-net-axgbe-add-configure-flow-control-while-link-adjustment.patch
new file mode 100644
index 00000000..da26b9ef
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-13-18-net-axgbe-add-configure-flow-control-while-link-adjustment.patch
@@ -0,0 +1,44 @@
+From patchwork Fri Mar 9 08:42:29 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 13/18] net/axgbe: add configure flow control while link adjustment
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35832
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-13-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:29 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_mdio.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
+index 753dde9..07f4087 100644
+--- a/drivers/net/axgbe/axgbe_mdio.c
++++ b/drivers/net/axgbe/axgbe_mdio.c
+@@ -796,6 +796,19 @@ static void axgbe_an_init(struct axgbe_port *pdata)
+ static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
+ {
+ if (pdata->phy.link) {
++ /* Flow control support */
++ pdata->pause_autoneg = pdata->phy.pause_autoneg;
++
++ if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) {
++ pdata->hw_if.config_tx_flow_control(pdata);
++ pdata->tx_pause = pdata->phy.tx_pause;
++ }
++
++ if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) {
++ pdata->hw_if.config_rx_flow_control(pdata);
++ pdata->rx_pause = pdata->phy.rx_pause;
++ }
++
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed)
+ pdata->phy_speed = pdata->phy.speed;
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-14-18-net-axgbe-add-promiscuous-mode-support.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-14-18-net-axgbe-add-promiscuous-mode-support.patch
new file mode 100644
index 00000000..025780e5
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-14-18-net-axgbe-add-promiscuous-mode-support.patch
@@ -0,0 +1,120 @@
+From patchwork Fri Mar 9 08:42:30 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,14/18] net/axgbe: add promiscuous mode support
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35833
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-14-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:30 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/axgbe.rst | 2 ++
+ doc/guides/nics/features/axgbe.ini | 2 ++
+ drivers/net/axgbe/axgbe_ethdev.c | 48 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 52 insertions(+)
+
+diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst
+index e9bebb8..8fe75bf 100644
+--- a/doc/guides/nics/axgbe.rst
++++ b/doc/guides/nics/axgbe.rst
+@@ -135,6 +135,8 @@ AXGBE PMD has support for:
+
+ - Base L2 features
+ - TSS (Transmit Side Scaling)
++- Promiscuous mode
++- Multicast mode
+ - RSS (Receive Side Scaling)
+ - Checksum offload
+ - Jumbo Frame upto 9K
+diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
+index 2dbff18..9f4d38f 100644
+--- a/doc/guides/nics/features/axgbe.ini
++++ b/doc/guides/nics/features/axgbe.ini
+@@ -7,6 +7,8 @@
+ Speed capabilities = Y
+ Link status = Y
+ Jumbo frame = Y
++Promiscuous mode = Y
++Allmulticast mode = Y
+ RSS hash = Y
+ CRC offload = Y
+ L3 checksum offload = Y
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index ebae6cc..0b67cf6 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -137,6 +137,10 @@ static int axgbe_dev_start(struct rte_eth_dev *dev);
+ static void axgbe_dev_stop(struct rte_eth_dev *dev);
+ static void axgbe_dev_interrupt_handler(void *param);
+ static void axgbe_dev_close(struct rte_eth_dev *dev);
++static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
++static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
++static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
++static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+ static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+ static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+@@ -195,6 +199,10 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
++ .promiscuous_enable = axgbe_dev_promiscuous_enable,
++ .promiscuous_disable = axgbe_dev_promiscuous_disable,
++ .allmulticast_enable = axgbe_dev_allmulticast_enable,
++ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+@@ -341,6 +349,46 @@ axgbe_dev_close(struct rte_eth_dev *dev)
+ axgbe_dev_clear_queues(dev);
+ }
+
++static void
++axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
++}
++
++static void
++axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
++}
++
++static void
++axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
++ return;
++ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
++}
++
++static void
++axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
++{
++ PMD_INIT_FUNC_TRACE();
++ struct axgbe_port *pdata = dev->data->dev_private;
++
++ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
++ return;
++ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
++}
++
+ /* return 0 means link status changed, -1 means not changed */
+ static int
+ axgbe_dev_link_update(struct rte_eth_dev *dev,
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-15-18-net-axgbe-add-generic-transmit-and-receive-stats-support.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-15-18-net-axgbe-add-generic-transmit-and-receive-stats-support.patch
new file mode 100644
index 00000000..24e98b27
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-15-18-net-axgbe-add-generic-transmit-and-receive-stats-support.patch
@@ -0,0 +1,121 @@
+From patchwork Fri Mar 9 08:42:31 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 15/18] net/axgbe: add generic transmit and receive stats support
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35834
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-15-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:31 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/axgbe.rst | 1 +
+ doc/guides/nics/features/axgbe.ini | 1 +
+ drivers/net/axgbe/axgbe_ethdev.c | 51 ++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 53 insertions(+)
+
+diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst
+index 8fe75bf..9038463 100644
+--- a/doc/guides/nics/axgbe.rst
++++ b/doc/guides/nics/axgbe.rst
+@@ -136,6 +136,7 @@ AXGBE PMD has support for:
+ - Base L2 features
+ - TSS (Transmit Side Scaling)
+ - Promiscuous mode
++- Port statistics
+ - Multicast mode
+ - RSS (Receive Side Scaling)
+ - Checksum offload
+diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
+index 9f4d38f..042ff1e 100644
+--- a/doc/guides/nics/features/axgbe.ini
++++ b/doc/guides/nics/features/axgbe.ini
+@@ -13,5 +13,6 @@ RSS hash = Y
+ CRC offload = Y
+ L3 checksum offload = Y
+ L4 checksum offload = Y
++Basic stats = Y
+ Linux UIO = Y
+ x86-64 = Y
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 0b67cf6..9e5114b 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -143,6 +143,9 @@ static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+ static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+ static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
++static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
++ struct rte_eth_stats *stats);
++static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+ static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+@@ -204,6 +207,8 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
++ .stats_get = axgbe_dev_stats_get,
++ .stats_reset = axgbe_dev_stats_reset,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+@@ -418,6 +423,52 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
+ return old_link_status == dev->data->dev_link.link_status ? -1 : 0;
+ }
+
++static int
++axgbe_dev_stats_get(struct rte_eth_dev *dev,
++ struct rte_eth_stats *stats)
++{
++ struct axgbe_rx_queue *rxq;
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ stats->q_ipackets[i] = rxq->pkts;
++ stats->ipackets += rxq->pkts;
++ stats->q_ibytes[i] = rxq->bytes;
++ stats->ibytes += rxq->bytes;
++ }
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++ stats->q_opackets[i] = txq->pkts;
++ stats->opackets += txq->pkts;
++ stats->q_obytes[i] = txq->bytes;
++ stats->obytes += txq->bytes;
++ }
++
++ return 0;
++}
++
++static void
++axgbe_dev_stats_reset(struct rte_eth_dev *dev)
++{
++ struct axgbe_rx_queue *rxq;
++ struct axgbe_tx_queue *txq;
++ unsigned int i;
++
++ for (i = 0; i < dev->data->nb_rx_queues; i++) {
++ rxq = dev->data->rx_queues[i];
++ rxq->pkts = 0;
++ rxq->bytes = 0;
++ rxq->errors = 0;
++ }
++ for (i = 0; i < dev->data->nb_tx_queues; i++) {
++ txq = dev->data->tx_queues[i];
++ txq->pkts = 0;
++ txq->bytes = 0;
++ txq->errors = 0;
++ }
++}
+
+ static void
+ axgbe_dev_info_get(struct rte_eth_dev *dev,
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-16-18-net-axgbe-add-support-for-build-32-bit-mode.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-16-18-net-axgbe-add-support-for-build-32-bit-mode.patch
new file mode 100644
index 00000000..5b5535f1
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-16-18-net-axgbe-add-support-for-build-32-bit-mode.patch
@@ -0,0 +1,308 @@
+From patchwork Fri Mar 9 08:42:32 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,16/18] net/axgbe: add support for build 32-bit mode
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35835
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-16-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:32 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/features/axgbe.ini | 1 +
+ drivers/net/axgbe/axgbe_common.h | 53 ++++++++++++++++++++++----------------
+ drivers/net/axgbe/axgbe_ethdev.c | 10 ++++---
+ drivers/net/axgbe/axgbe_ethdev.h | 8 +++---
+ drivers/net/axgbe/axgbe_rxtx.c | 12 ++++-----
+ drivers/net/axgbe/axgbe_rxtx.h | 4 +--
+ 6 files changed, 50 insertions(+), 38 deletions(-)
+
+diff --git a/doc/guides/nics/features/axgbe.ini b/doc/guides/nics/features/axgbe.ini
+index 042ff1e..ab4da55 100644
+--- a/doc/guides/nics/features/axgbe.ini
++++ b/doc/guides/nics/features/axgbe.ini
+@@ -15,4 +15,5 @@ L3 checksum offload = Y
+ L4 checksum offload = Y
+ Basic stats = Y
+ Linux UIO = Y
++x86-32 = Y
+ x86-64 = Y
+diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
+index 294f2e4..189139b 100644
+--- a/drivers/net/axgbe/axgbe_common.h
++++ b/drivers/net/axgbe/axgbe_common.h
+@@ -1507,7 +1507,7 @@ do { \
+ * register definitions formed using the input names
+ */
+ #define AXGMAC_IOREAD(_pdata, _reg) \
+- rte_read32((void *)((_pdata)->xgmac_regs + (_reg)))
++ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+ #define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
+@@ -1515,7 +1515,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define AXGMAC_IOWRITE(_pdata, _reg, _val) \
+- rte_write32((_val), (void *)((_pdata)->xgmac_regs + (_reg)))
++ rte_write32((_val), \
++ (uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+ #define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+@@ -1531,8 +1532,8 @@ do { \
+ * base register value is calculated by the queue or traffic class number
+ */
+ #define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+- rte_read32((void *)((_pdata)->xgmac_regs + \
+- MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)))
++ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \
++ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+ #define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
+@@ -1540,8 +1541,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+- rte_write32((_val), (void *)((_pdata)->xgmac_regs + \
+- MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg)))
++ rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\
++ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+ #define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+ do { \
+@@ -1557,7 +1558,7 @@ do { \
+ * base register value is obtained from the ring
+ */
+ #define AXGMAC_DMA_IOREAD(_channel, _reg) \
+- rte_read32((void *)((_channel)->dma_regs + (_reg)))
++ rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg))
+
+ #define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
+@@ -1565,7 +1566,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+- rte_write32((_val), (void *)((_channel)->dma_regs + (_reg)))
++ rte_write32((_val), \
++ (uint8_t *)((_channel)->dma_regs) + (_reg))
+
+ #define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+ do { \
+@@ -1590,16 +1592,18 @@ do { \
+ _prefix##_##_field##_WIDTH, (_val))
+
+ #define XPCS32_IOWRITE(_pdata, _off, _val) \
+- rte_write32(_val, (void *)((_pdata)->xpcs_regs + (_off)))
++ rte_write32(_val, \
++ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+ #define XPCS32_IOREAD(_pdata, _off) \
+- rte_read32((void *)((_pdata)->xpcs_regs + (_off)))
++ rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+ #define XPCS16_IOWRITE(_pdata, _off, _val) \
+- rte_write16(_val, (void *)((_pdata)->xpcs_regs + (_off)))
++ rte_write16(_val, \
++ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+ #define XPCS16_IOREAD(_pdata, _off) \
+- rte_read16((void *)((_pdata)->xpcs_regs + (_off)))
++ rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+ /* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+@@ -1615,7 +1619,7 @@ do { \
+ _prefix##_##_field##_WIDTH, (_val))
+
+ #define XSIR0_IOREAD(_pdata, _reg) \
+- rte_read16((void *)((_pdata)->sir0_regs + (_reg)))
++ rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+ #define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+@@ -1623,7 +1627,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define XSIR0_IOWRITE(_pdata, _reg, _val) \
+- rte_write16((_val), (void *)((_pdata)->sir0_regs + (_reg)))
++ rte_write16((_val), \
++ (uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+ #define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+@@ -1635,7 +1640,7 @@ do { \
+ } while (0)
+
+ #define XSIR1_IOREAD(_pdata, _reg) \
+- rte_read16((void *)((_pdata)->sir1_regs + _reg))
++ rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg)
+
+ #define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+@@ -1643,7 +1648,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define XSIR1_IOWRITE(_pdata, _reg, _val) \
+- rte_write16((_val), (void *)((_pdata)->sir1_regs + (_reg)))
++ rte_write16((_val), \
++ (uint8_t *)((_pdata)->sir1_regs) + (_reg))
+
+ #define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+@@ -1658,7 +1664,7 @@ do { \
+ * within the register values of SerDes RxTx registers.
+ */
+ #define XRXTX_IOREAD(_pdata, _reg) \
+- rte_read16((void *)((_pdata)->rxtx_regs + (_reg)))
++ rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+ #define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+@@ -1666,7 +1672,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define XRXTX_IOWRITE(_pdata, _reg, _val) \
+- rte_write16((_val), (void *)((_pdata)->rxtx_regs + (_reg)))
++ rte_write16((_val), \
++ (uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+ #define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+@@ -1691,7 +1698,7 @@ do { \
+ _prefix##_##_field##_WIDTH, (_val))
+
+ #define XP_IOREAD(_pdata, _reg) \
+- rte_read32((void *)((_pdata)->xprop_regs + (_reg)))
++ rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+ #define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+@@ -1699,7 +1706,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define XP_IOWRITE(_pdata, _reg, _val) \
+- rte_write32((_val), (void *)((_pdata)->xprop_regs + (_reg)))
++ rte_write32((_val), \
++ (uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+ #define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+@@ -1724,7 +1732,7 @@ do { \
+ _prefix##_##_field##_WIDTH, (_val))
+
+ #define XI2C_IOREAD(_pdata, _reg) \
+- rte_read32((void *)((_pdata)->xi2c_regs + (_reg)))
++ rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+ #define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+@@ -1732,7 +1740,8 @@ do { \
+ _reg##_##_field##_WIDTH)
+
+ #define XI2C_IOWRITE(_pdata, _reg, _val) \
+- rte_write32((_val), (void *)((_pdata)->xi2c_regs + (_reg)))
++ rte_write32((_val), \
++ (uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+ #define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+ do { \
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index 9e5114b..d4d437a 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -712,10 +712,12 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+ pdata->pci_dev = pci_dev;
+
+ pdata->xgmac_regs =
+- (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+- pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET;
+- pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET;
+- pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
++ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
++ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
++ + AXGBE_MAC_PROP_OFFSET);
++ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
++ + AXGBE_I2C_CTRL_OFFSET);
++ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+
+ /* version specific driver data*/
+ if (pci_dev->id.device_id == 0x1458)
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 4091d1a..91260ca 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -567,10 +567,10 @@ struct axgbe_port {
+ struct axgbe_version_data *vdata;
+
+ /* AXGMAC/XPCS related mmio registers */
+- uint64_t xgmac_regs; /* AXGMAC CSRs */
+- uint64_t xpcs_regs; /* XPCS MMD registers */
+- uint64_t xprop_regs; /* AXGBE property registers */
+- uint64_t xi2c_regs; /* AXGBE I2C CSRs */
++ void *xgmac_regs; /* AXGMAC CSRs */
++ void *xpcs_regs; /* XPCS MMD registers */
++ void *xprop_regs; /* AXGBE property registers */
++ void *xi2c_regs; /* AXGBE I2C CSRs */
+
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
+index c616fc1..4c38e47 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.c
++++ b/drivers/net/axgbe/axgbe_rxtx.c
+@@ -192,9 +192,9 @@ int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->nb_desc = rx_desc;
+- rxq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+- (DMA_CH_INC * rxq->queue_id);
+- rxq->dma_tail_reg = (volatile uint32_t *)(rxq->dma_regs +
++ rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
++ (DMA_CH_INC * rxq->queue_id));
++ rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
+ DMA_CH_RDTR_LO);
+
+ rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
+@@ -509,9 +509,9 @@ int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ txq->desc = tz->addr;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+- txq->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+- (DMA_CH_INC * txq->queue_id);
+- txq->dma_tail_reg = (volatile uint32_t *)(txq->dma_regs +
++ txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
++ (DMA_CH_INC * txq->queue_id));
++ txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
+ DMA_CH_TDTR_LO);
+ txq->cur = 0;
+ txq->dirty = 0;
+diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
+index 45aaf89..e7b3cfd 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.h
++++ b/drivers/net/axgbe/axgbe_rxtx.h
+@@ -202,7 +202,7 @@ struct axgbe_rx_queue {
+ /* Ring physical address */
+ uint64_t ring_phys_addr;
+ /* Dma Channel register address */
+- uint64_t dma_regs;
++ void *dma_regs;
+ /* Dma channel tail register address*/
+ volatile uint32_t *dma_tail_reg;
+ /* DPDK queue index */
+@@ -249,7 +249,7 @@ struct axgbe_tx_queue {
+ /* Physical address of ring */
+ uint64_t ring_phys_addr;
+ /* Dma channel register space */
+- uint64_t dma_regs;
++ void *dma_regs;
+ /* Dma tail register address of ring*/
+ volatile uint32_t *dma_tail_reg;
+ /* Tx queue index/id*/
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-17-18-net-axgbe-add-workaround-for-axgbe-ethernet-training-bug.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-17-18-net-axgbe-add-workaround-for-axgbe-ethernet-training-bug.patch
new file mode 100644
index 00000000..28acc9d0
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-17-18-net-axgbe-add-workaround-for-axgbe-ethernet-training-bug.patch
@@ -0,0 +1,319 @@
+From patchwork Fri Mar 9 08:42:33 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v3,
+ 17/18] net/axgbe: add workaround for axgbe ethernet training bug
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35836
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-17-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:33 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/net/axgbe/axgbe_common.h | 8 +++
+ drivers/net/axgbe/axgbe_ethdev.c | 2 +
+ drivers/net/axgbe/axgbe_ethdev.h | 6 ++
+ drivers/net/axgbe/axgbe_mdio.c | 13 ++++-
+ drivers/net/axgbe/axgbe_phy_impl.c | 117 +++++++++++++++++++++++++++++++++++++
+ 5 files changed, 144 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
+index 189139b..e1f3b45 100644
+--- a/drivers/net/axgbe/axgbe_common.h
++++ b/drivers/net/axgbe/axgbe_common.h
+@@ -1369,6 +1369,10 @@
+ #define MDIO_VEND2_AN_STAT 0x8002
+ #endif
+
++#ifndef MDIO_VEND2_PMA_CDR_CONTROL
++#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
++#endif
++
+ #ifndef MDIO_CTRL1_SPEED1G
+ #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+ #endif
+@@ -1416,6 +1420,10 @@
+ #define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
+ #define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
++#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01
++#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00
++#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01
++
+ /*generic*/
+ #define __iomem
+
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index d4d437a..ca21152 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -172,6 +172,7 @@ static struct axgbe_version_data axgbe_v2a = {
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
++ .an_cdr_workaround = 1,
+ };
+
+ static struct axgbe_version_data axgbe_v2b = {
+@@ -183,6 +184,7 @@ static struct axgbe_version_data axgbe_v2b = {
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
++ .an_cdr_workaround = 1,
+ };
+
+ static const struct rte_eth_desc_lim rx_desc_lim = {
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 91260ca..4d5e6e5 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -459,6 +459,10 @@ struct axgbe_phy_impl_if {
+ /* Process results of auto-negotiation */
+ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
+
++ /* Pre/Post auto-negotiation support */
++ void (*an_pre)(struct axgbe_port *port);
++ void (*an_post)(struct axgbe_port *port);
++
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct axgbe_port *);
+ void (*kr_training_post)(struct axgbe_port *);
+@@ -553,6 +557,7 @@ struct axgbe_version_data {
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
++ unsigned int an_cdr_workaround;
+ };
+
+ /*
+@@ -572,6 +577,7 @@ struct axgbe_port {
+ void *xprop_regs; /* AXGBE property registers */
+ void *xi2c_regs; /* AXGBE I2C CSRs */
+
++ bool cdr_track_early;
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
+index 07f4087..5f629f5 100644
+--- a/drivers/net/axgbe/axgbe_mdio.c
++++ b/drivers/net/axgbe/axgbe_mdio.c
+@@ -409,10 +409,14 @@ static void axgbe_an73_disable(struct axgbe_port *pdata)
+ {
+ axgbe_an73_set(pdata, false, false);
+ axgbe_an73_disable_interrupts(pdata);
++ pdata->an_start = 0;
+ }
+
+ static void axgbe_an_restart(struct axgbe_port *pdata)
+ {
++ if (pdata->phy_if.phy_impl.an_pre)
++ pdata->phy_if.phy_impl.an_pre(pdata);
++
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+@@ -429,6 +433,9 @@ static void axgbe_an_restart(struct axgbe_port *pdata)
+
+ static void axgbe_an_disable(struct axgbe_port *pdata)
+ {
++ if (pdata->phy_if.phy_impl.an_post)
++ pdata->phy_if.phy_impl.an_post(pdata);
++
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+@@ -604,9 +611,9 @@ static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+ return AXGBE_AN_NO_LINK;
+ }
+
+- axgbe_an73_disable(pdata);
++ axgbe_an_disable(pdata);
+ axgbe_switch_mode(pdata);
+- axgbe_an73_restart(pdata);
++ axgbe_an_restart(pdata);
+
+ return AXGBE_AN_INCOMPAT_LINK;
+ }
+@@ -675,6 +682,8 @@ static void axgbe_an73_state_machine(struct axgbe_port *pdata)
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+ pdata->an_start = 0;
++ if (pdata->phy_if.phy_impl.an_post)
++ pdata->phy_if.phy_impl.an_post(pdata);
+ }
+
+ if (cur_state != pdata->an_state)
+diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
+index 5f69651..d9c7dc4 100644
+--- a/drivers/net/axgbe/axgbe_phy_impl.c
++++ b/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -153,6 +153,11 @@
+ /* Rate-change complete wait/retry count */
+ #define AXGBE_RATECHANGE_COUNT 500
+
++/* CDR delay values for KR support (in usec) */
++#define AXGBE_CDR_DELAY_INIT 10000
++#define AXGBE_CDR_DELAY_INC 10000
++#define AXGBE_CDR_DELAY_MAX 100000
++
+ enum axgbe_port_mode {
+ AXGBE_PORT_MODE_RSVD = 0,
+ AXGBE_PORT_MODE_BACKPLANE,
+@@ -359,6 +364,10 @@ struct axgbe_phy_data {
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
++
++ /* KR AN support */
++ unsigned int phy_cdr_notrack;
++ unsigned int phy_cdr_delay;
+ };
+
+ static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
+@@ -1888,6 +1897,100 @@ static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+ return true;
+ }
+
++static void axgbe_phy_cdr_track(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (!pdata->vdata->an_cdr_workaround)
++ return;
++
++ if (!phy_data->phy_cdr_notrack)
++ return;
++
++ rte_delay_us(phy_data->phy_cdr_delay + 400);
++
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
++ AXGBE_PMA_CDR_TRACK_EN_MASK,
++ AXGBE_PMA_CDR_TRACK_EN_ON);
++
++ phy_data->phy_cdr_notrack = 0;
++}
++
++static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (!pdata->vdata->an_cdr_workaround)
++ return;
++
++ if (phy_data->phy_cdr_notrack)
++ return;
++
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
++ AXGBE_PMA_CDR_TRACK_EN_MASK,
++ AXGBE_PMA_CDR_TRACK_EN_OFF);
++
++ axgbe_phy_rrc(pdata);
++
++ phy_data->phy_cdr_notrack = 1;
++}
++
++static void axgbe_phy_kr_training_post(struct axgbe_port *pdata)
++{
++ if (!pdata->cdr_track_early)
++ axgbe_phy_cdr_track(pdata);
++}
++
++static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata)
++{
++ if (pdata->cdr_track_early)
++ axgbe_phy_cdr_track(pdata);
++}
++
++static void axgbe_phy_an_post(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ if (phy_data->cur_mode != AXGBE_MODE_KR)
++ break;
++
++ axgbe_phy_cdr_track(pdata);
++
++ switch (pdata->an_result) {
++ case AXGBE_AN_READY:
++ case AXGBE_AN_COMPLETE:
++ break;
++ default:
++ if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX)
++ phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC;
++ break;
++ }
++ break;
++ default:
++ break;
++ }
++}
++
++static void axgbe_phy_an_pre(struct axgbe_port *pdata)
++{
++ struct axgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (pdata->an_mode) {
++ case AXGBE_AN_MODE_CL73:
++ case AXGBE_AN_MODE_CL73_REDRV:
++ if (phy_data->cur_mode != AXGBE_MODE_KR)
++ break;
++
++ axgbe_phy_cdr_notrack(pdata);
++ break;
++ default:
++ break;
++ }
++}
++
+ static void axgbe_phy_stop(struct axgbe_port *pdata)
+ {
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+@@ -1896,6 +1999,9 @@ static void axgbe_phy_stop(struct axgbe_port *pdata)
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+
++ /* Reset CDR support */
++ axgbe_phy_cdr_track(pdata);
++
+ /* Power off the PHY */
+ axgbe_phy_power_off(pdata);
+
+@@ -1916,6 +2022,9 @@ static int axgbe_phy_start(struct axgbe_port *pdata)
+ /* Start in highest supported mode */
+ axgbe_phy_set_mode(pdata, phy_data->start_mode);
+
++ /* Reset CDR support */
++ axgbe_phy_cdr_track(pdata);
++
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_SFP:
+@@ -2173,6 +2282,8 @@ static int axgbe_phy_init(struct axgbe_port *pdata)
+ return -EINVAL;
+ }
+ }
++
++ phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT;
+ return 0;
+ }
+ void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+@@ -2193,4 +2304,10 @@ void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+ phy_impl->an_config = axgbe_phy_an_config;
+ phy_impl->an_advertising = axgbe_phy_an_advertising;
+ phy_impl->an_outcome = axgbe_phy_an_outcome;
++
++ phy_impl->an_pre = axgbe_phy_an_pre;
++ phy_impl->an_post = axgbe_phy_an_post;
++
++ phy_impl->kr_training_pre = axgbe_phy_kr_training_pre;
++ phy_impl->kr_training_post = axgbe_phy_kr_training_post;
+ }
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-18-18-net-axgbe-moved-license-headers-to-SPDX-format.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-18-18-net-axgbe-moved-license-headers-to-SPDX-format.patch
new file mode 100644
index 00000000..9fd23471
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v3-18-18-net-axgbe-moved-license-headers-to-SPDX-format.patch
@@ -0,0 +1,1942 @@
+From patchwork Fri Mar 9 08:42:34 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v3,18/18] net/axgbe: moved license headers to SPDX format
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35837
+X-Patchwork-Delegate: ferruh.yigit@intel.com
+Message-Id: <1520584954-130575-18-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: ferruh.yigit@intel.com
+Date: Fri, 9 Mar 2018 03:42:34 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/nics/axgbe.rst | 125 +-------------------------------
+ drivers/net/axgbe/Makefile | 127 +--------------------------------
+ drivers/net/axgbe/axgbe_common.h | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_dev.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_ethdev.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_ethdev.h | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_i2c.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_logs.h | 126 +-------------------------------
+ drivers/net/axgbe/axgbe_mdio.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_phy.h | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_phy_impl.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_rxtx.c | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_rxtx.h | 127 ++-------------------------------
+ drivers/net/axgbe/axgbe_rxtx_vec_sse.c | 127 ++-------------------------------
+ 14 files changed, 50 insertions(+), 1725 deletions(-)
+
+diff --git a/doc/guides/nics/axgbe.rst b/doc/guides/nics/axgbe.rst
+index 9038463..b484d51 100644
+--- a/doc/guides/nics/axgbe.rst
++++ b/doc/guides/nics/axgbe.rst
+@@ -1,126 +1,5 @@
+-.. Copyright (c) 2017 Advanced Micro Devices, Inc.
+- All rights reserved.
+-
+- AMD 10Gb Ethernet driver
+-
+- This file is available to you under your choice of the following two
+- licenses:
+-
+- License 1: GPLv2
+-
+- Copyright (c) 2017 Advanced Micro Devices, Inc.
+-
+- This file is free software; you may copy, redistribute and/or modify
+- it under the terms of the GNU General Public License as published by
+- the Free Software Foundation, either version 2 of the License, or (at
+- your option) any later version.
+-
+- This file is distributed in the hope that it will be useful, but
+- WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- General Public License for more details.
+-
+- You should have received a copy of the GNU General Public License
+- along with this program. If not, see <http://www.gnu.org/licenses/>.
+-
+- This file incorporates work covered by the following copyright and
+- permission notice:
+-
+- Copyright (c) 2013 Synopsys, Inc.
+-
+- The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- Inc. unless otherwise expressly agreed to in writing between Synopsys
+- and you.
+-
+- The Software IS NOT an item of Licensed Software or Licensed Product
+- under any End User Software License Agreement or Agreement for Licensed
+- Product with Synopsys or any supplement thereto. Permission is hereby
+- granted, free of charge, to any person obtaining a copy of this software
+- annotated with this license and the Software, to deal in the Software
+- without restriction, including without limitation the rights to use,
+- copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- of the Software, and to permit persons to whom the Software is furnished
+- to do so, subject to the following conditions:
+-
+- The above copyright notice and this permission notice shall be included
+- in all copies or substantial portions of the Software.
+-
+- THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- THE POSSIBILITY OF SUCH DAMAGE.
+-
+- License 2: Modified BSD
+-
+- Copyright (c) 2017 Advanced Micro Devices, Inc.
+- All rights reserved.
+-
+- Redistribution and use in source and binary forms, with or without
+- modification, are permitted provided that the following conditions
+- are met:
+-
+- * Redistributions of source code must retain the above copyright
+- notice, this list of conditions and the following disclaimer.
+- * Redistributions in binary form must reproduce the above copyright
+- notice, this list of conditions and the following disclaimer in the
+- documentation and/or other materials provided with the distribution.
+- * Neither the name of Advanced Micro Devices, Inc. nor the
+- names of its contributors may be used to endorse or promote products
+- derived from this software without specific prior written permission.
+-
+- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+- AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+- IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+- ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+- (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+- LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+- THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-
+- This file incorporates work covered by the following copyright and
+- permission notice:
+-
+- Copyright (c) 2013 Synopsys, Inc.
+-
+- The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- Inc. unless otherwise expressly agreed to in writing between Synopsys
+- and you.
+-
+- The Software IS NOT an item of Licensed Software or Licensed Product
+- under any End User Software License Agreement or Agreement for Licensed
+- Product with Synopsys or any supplement thereto. Permission is hereby
+- granted, free of charge, to any person obtaining a copy of this software
+- annotated with this license and the Software, to deal in the Software
+- without restriction, including without limitation the rights to use,
+- copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- of the Software, and to permit persons to whom the Software is furnished
+- to do so, subject to the following conditions:
+-
+- The above copyright notice and this permission notice shall be included
+- in all copies or substantial portions of the Software.
+-
+- THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- THE POSSIBILITY OF SUCH DAMAGE.
++.. Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ SPDX-License-Identifier: BSD-3-Clause
+
+ AXGBE Poll Mode Driver
+ ======================
+diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
+index 58eb41e..e1e0306 100644
+--- a/drivers/net/axgbe/Makefile
++++ b/drivers/net/axgbe/Makefile
+@@ -1,128 +1,5 @@
+-#
+-# Copyright (c) 2017 Advanced Micro Devices, Inc.
+-# All rights reserved.
+-#
+-# AMD 10Gb Ethernet driver
+-#
+-# This file is available to you under your choice of the following two
+-# licenses:
+-#
+-# License 1: GPLv2
+-#
+-# Copyright (c) 2017 Advanced Micro Devices, Inc.
+-#
+-# This file is free software; you may copy, redistribute and/or modify
+-# it under the terms of the GNU General Public License as published by
+-# the Free Software Foundation, either version 2 of the License, or (at
+-# your option) any later version.
+-#
+-# This file is distributed in the hope that it will be useful, but
+-# WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+-# General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program. If not, see <http://www.gnu.org/licenses/>.
+-#
+-# This file incorporates work covered by the following copyright and
+-# permission notice:
+-#
+-# Copyright (c) 2013 Synopsys, Inc.
+-#
+-# The Synopsys DWC ETHER XGMAC Software Driver and documentation
+-# (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+-# Inc. unless otherwise expressly agreed to in writing between Synopsys
+-# and you.
+-#
+-# The Software IS NOT an item of Licensed Software or Licensed Product
+-# under any End User Software License Agreement or Agreement for Licensed
+-# Product with Synopsys or any supplement thereto. Permission is hereby
+-# granted, free of charge, to any person obtaining a copy of this software
+-# annotated with this license and the Software, to deal in the Software
+-# without restriction, including without limitation the rights to use,
+-# copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+-# of the Software, and to permit persons to whom the Software is furnished
+-# to do so, subject to the following conditions:
+-#
+-# The above copyright notice and this permission notice shall be included
+-# in all copies or substantial portions of the Software.
+-#
+-# THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+-# BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+-# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+-# PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+-# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+-# THE POSSIBILITY OF SUCH DAMAGE.
+-#
+-# License 2: Modified BSD
+-#
+-# Copyright (c) 2017 Advanced Micro Devices, Inc.
+-# All rights reserved.
+-#
+-# Redistribution and use in source and binary forms, with or without
+-# modification, are permitted provided that the following conditions
+-# are met:
+-#
+-# * Redistributions of source code must retain the above copyright
+-# notice, this list of conditions and the following disclaimer.
+-# * Redistributions in binary form must reproduce the above copyright
+-# notice, this list of conditions and the following disclaimer in
+-# the documentation and/or other materials provided with the
+-# distribution.
+-# * Neither the name of Advanced Micro Devices, Inc. nor the
+-# names of its contributors may be used to endorse or promote products
+-# derived from this software without specific prior written permission.
+-#
+-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+-# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+-# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+-# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+-#
+-# This file incorporates work covered by the following copyright and
+-# permission notice:
+-#
+-# Copyright (c) 2013 Synopsys, Inc.
+-#
+-# The Synopsys DWC ETHER XGMAC Software Driver and documentation
+-# (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+-# Inc. unless otherwise expressly agreed to in writing between Synopsys
+-# and you.
+-#
+-# The Software IS NOT an item of Licensed Software or Licensed Product
+-# under any End User Software License Agreement or Agreement for Licensed
+-# Product with Synopsys or any supplement thereto. Permission is hereby
+-# granted, free of charge, to any person obtaining a copy of this software
+-# annotated with this license and the Software, to deal in the Software
+-# without restriction, including without limitation the rights to use,
+-# copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+-# of the Software, and to permit persons to whom the Software is furnished
+-# to do so, subject to the following conditions:
+-#
+-# The above copyright notice and this permission notice shall be included
+-# in all copies or substantial portions of the Software.
+-#
+-# THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+-# BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+-# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+-# PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+-# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+-# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+-# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+-# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+-# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+-# THE POSSIBILITY OF SUCH DAMAGE.
++# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++# SPDX-License-Identifier: BSD-3-Clause
+
+ include $(RTE_SDK)/mk/rte.vars.mk
+
+diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
+index e1f3b45..194045b 100644
+--- a/drivers/net/axgbe/axgbe_common.h
++++ b/drivers/net/axgbe/axgbe_common.h
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef __AXGBE_COMMON_H__
+diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
+index a69a078..1176580 100644
+--- a/drivers/net/axgbe/axgbe_dev.c
++++ b/drivers/net/axgbe/axgbe_dev.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
+diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
+index ca21152..c070ae8 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.c
++++ b/drivers/net/axgbe/axgbe_ethdev.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_rxtx.h"
+diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
+index 4d5e6e5..40ce6e1 100644
+--- a/drivers/net/axgbe/axgbe_ethdev.h
++++ b/drivers/net/axgbe/axgbe_ethdev.h
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef RTE_ETH_AXGBE_H_
+diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
+index 468955e..c5d5247 100644
+--- a/drivers/net/axgbe/axgbe_i2c.c
++++ b/drivers/net/axgbe/axgbe_i2c.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
+diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h
+index aaa8efc..5c5dab9 100644
+--- a/drivers/net/axgbe/axgbe_logs.h
++++ b/drivers/net/axgbe/axgbe_logs.h
+@@ -1,128 +1,6 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _AXGBE_LOGS_H_
+diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
+index 5f629f5..b010ad0 100644
+--- a/drivers/net/axgbe/axgbe_mdio.c
++++ b/drivers/net/axgbe/axgbe_mdio.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
+diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h
+index f7c8a88..78d7967 100644
+--- a/drivers/net/axgbe/axgbe_phy.h
++++ b/drivers/net/axgbe/axgbe_phy.h
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef __AXGBE_PHY_H__
+diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
+index d9c7dc4..aab854d 100644
+--- a/drivers/net/axgbe/axgbe_phy_impl.c
++++ b/drivers/net/axgbe/axgbe_phy_impl.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2014-2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
+diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
+index 4c38e47..7cf1bd6 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.c
++++ b/drivers/net/axgbe/axgbe_rxtx.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
+diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
+index e7b3cfd..1a63e16 100644
+--- a/drivers/net/axgbe/axgbe_rxtx.h
++++ b/drivers/net/axgbe/axgbe_rxtx.h
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _AXGBE_RXTX_H_
+diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+index c2bd5da..bfd6b0c 100644
+--- a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
++++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
+@@ -1,128 +1,9 @@
+ /*-
+- * Copyright(c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ *
+- * AMD 10Gb Ethernet driver
+- *
+- * This file is available to you under your choice of the following two
+- * licenses:
+- *
+- * License 1: GPLv2
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- *
+- * This file is free software; you may copy, redistribute and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation, either version 2 of the License, or
+- * (at your option) any later version.
+- *
+- * This file is distributed in the hope that it will be useful, but
+- * WITHOUT ANY WARRANTY; without even the implied warranty of
+- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- * General Public License for more details.
+- *
+- * You should have received a copy of the GNU General Public License
+- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * License 2: Modified BSD
+- *
+- * Copyright (c) 2017 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of Advanced Micro Devices, Inc. nor the
+- * names of its contributors may be used to endorse or promote products
+- * derived from this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+- * <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+- *
+- * This file incorporates work covered by the following copyright and
+- * permission notice:
+- *
+- * Copyright (c) 2013 Synopsys, Inc.
+- *
+- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+- * Inc. unless otherwise expressly agreed to in writing between Synopsys
+- * and you.
+- *
+- * The Software IS NOT an item of Licensed Software or Licensed Product
+- * under any End User Software License Agreement or Agreement for Licensed
+- * Product with Synopsys or any supplement thereto. Permission is hereby
+- * granted, free of charge, to any person obtaining a copy of this software
+- * annotated with this license and the Software, to deal in the Software
+- * without restriction, including without limitation the rights to use,
+- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+- * of the Software, and to permit persons to whom the Software is furnished
+- * to do so, subject to the following conditions:
+- *
+- * The above copyright notice and this permission notice shall be included
+- * in all copies or substantial portions of the Software.
+- *
+- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+- * THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include "axgbe_ethdev.h"
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-01-20-crypto-ccp-add-AMD-ccp-skeleton-PMD.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-01-20-crypto-ccp-add-AMD-ccp-skeleton-PMD.patch
new file mode 100644
index 00000000..c4448f7a
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-01-20-crypto-ccp-add-AMD-ccp-skeleton-PMD.patch
@@ -0,0 +1,241 @@
+From patchwork Fri Mar 9 08:35:01 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,01/20] crypto/ccp: add AMD ccp skeleton PMD
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35800
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-1-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:01 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ MAINTAINERS | 6 +++
+ config/common_base | 5 +++
+ doc/guides/rel_notes/release_18_02.rst | 5 +++
+ drivers/crypto/Makefile | 1 +
+ drivers/crypto/ccp/Makefile | 55 ++++++++++++++++++++++++++
+ drivers/crypto/ccp/rte_ccp_pmd.c | 62 ++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/rte_pmd_ccp_version.map | 4 ++
+ mk/rte.app.mk | 2 +
+ 8 files changed, 140 insertions(+)
+ create mode 100644 drivers/crypto/ccp/Makefile
+ create mode 100644 drivers/crypto/ccp/rte_ccp_pmd.c
+ create mode 100644 drivers/crypto/ccp/rte_pmd_ccp_version.map
+
+diff --git a/MAINTAINERS b/MAINTAINERS
+index a646ca3..8481731 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -640,6 +640,12 @@ M: Pablo de Lara <pablo.de.lara.guarch@intel.com>
+ T: git://dpdk.org/next/dpdk-next-crypto
+ F: doc/guides/cryptodevs/features/default.ini
+
++AMD CCP Crypto PMD
++M: Ravi Kumar <ravi1.kumar@amd.com>
++F: drivers/crypto/ccp/
++F: doc/guides/cryptodevs/ccp.rst
++F: doc/guides/cryptodevs/features/ccp.ini
++
+ ARMv8 Crypto
+ M: Jerin Jacob <jerin.jacob@caviumnetworks.com>
+ F: drivers/crypto/armv8/
+diff --git a/config/common_base b/config/common_base
+index ad03cf4..28237f0 100644
+--- a/config/common_base
++++ b/config/common_base
+@@ -529,6 +529,11 @@ CONFIG_RTE_LIBRTE_PMD_CRYPTO_SCHEDULER_DEBUG=n
+ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
+
+ #
++# Compile PMD for AMD CCP crypto device
++#
++CONFIG_RTE_LIBRTE_PMD_CCP=n
++
++#
+ # Compile PMD for Marvell Crypto device
+ #
+ CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO=n
+diff --git a/doc/guides/rel_notes/release_18_02.rst b/doc/guides/rel_notes/release_18_02.rst
+index 3923dc2..c5b2854 100644
+--- a/doc/guides/rel_notes/release_18_02.rst
++++ b/doc/guides/rel_notes/release_18_02.rst
+@@ -41,6 +41,11 @@ New Features
+ Also, make sure to start the actual text at the margin.
+ =========================================================
+
++* **Added a new crypto poll mode driver for AMD CCP devices.**
++
++ Added the new ``ccp`` crypto driver for AMD CCP devices. See the
++ :doc:`../cryptodevs/ccp` crypto driver guide for more details on
++ this new driver.
+
+ API Changes
+ -----------
+diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
+index 628bd14..fe41edd 100644
+--- a/drivers/crypto/Makefile
++++ b/drivers/crypto/Makefile
+@@ -16,5 +16,6 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_MRVL_CRYPTO) += mrvl
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA2_SEC) += dpaa2_sec
+ DIRS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += dpaa_sec
++DIRS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp
+
+ include $(RTE_SDK)/mk/rte.subdir.mk
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+new file mode 100644
+index 0000000..51c5e5b
+--- /dev/null
++++ b/drivers/crypto/ccp/Makefile
+@@ -0,0 +1,55 @@
++#
++# Copyright(c) 2018 Advanced Micro Devices, Inc.
++# All rights reserved.
++#
++# Redistribution and use in source and binary forms, with or without
++# modification, are permitted provided that the following conditions
++# are met:
++#
++# * Redistributions of source code must retain the above copyright
++# notice, this list of conditions and the following disclaimer.
++# * Redistributions in binary form must reproduce the above copyright
++# notice, this list of conditions and the following disclaimer in the
++# documentation and/or other materials provided with the distribution.
++# * Neither the name of the copyright holder nor the names of its
++# contributors may be used to endorse or promote products derived from
++# this software without specific prior written permission.
++#
++# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++include $(RTE_SDK)/mk/rte.vars.mk
++
++# library name
++LIB = librte_pmd_ccp.a
++
++# build flags
++CFLAGS += -O3
++CFLAGS += -I$(SRCDIR)
++CFLAGS += $(WERROR_FLAGS)
++
++# library version
++LIBABIVER := 1
++
++# external library include paths
++LDLIBS += -lcrypto
++LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
++LDLIBS += -lrte_cryptodev
++LDLIBS += -lrte_pci -lrte_bus_pci
++
++# versioning export map
++EXPORT_MAP := rte_pmd_ccp_version.map
++
++# library source files
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
++
++include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
+new file mode 100644
+index 0000000..6fa14bd
+--- /dev/null
++++ b/drivers/crypto/ccp/rte_ccp_pmd.c
+@@ -0,0 +1,62 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <rte_bus_vdev.h>
++#include <rte_cryptodev.h>
++#include <rte_cryptodev_pmd.h>
++
++uint8_t ccp_cryptodev_driver_id;
++
++/** Remove ccp pmd */
++static int
++cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
++{
++ return 0;
++}
++
++/** Probe ccp pmd */
++static int
++cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
++{
++ return 0;
++}
++
++static struct rte_vdev_driver cryptodev_ccp_pmd_drv = {
++ .probe = cryptodev_ccp_probe,
++ .remove = cryptodev_ccp_remove
++};
++
++static struct cryptodev_driver ccp_crypto_drv;
++
++RTE_PMD_REGISTER_VDEV(CRYPTODEV_NAME_CCP_PMD, cryptodev_ccp_pmd_drv);
++RTE_PMD_REGISTER_PARAM_STRING(CRYPTODEV_NAME_CCP_PMD,
++ "max_nb_queue_pairs=<int> max_nb_sessions=<int> socket_id=<int>");
++RTE_PMD_REGISTER_CRYPTO_DRIVER(ccp_crypto_drv, cryptodev_ccp_pmd_drv,
++ ccp_cryptodev_driver_id);
+diff --git a/drivers/crypto/ccp/rte_pmd_ccp_version.map b/drivers/crypto/ccp/rte_pmd_ccp_version.map
+new file mode 100644
+index 0000000..9b9ab1a
+--- /dev/null
++++ b/drivers/crypto/ccp/rte_pmd_ccp_version.map
+@@ -0,0 +1,4 @@
++DPDK_18.05 {
++
++ local: *;
++};
+diff --git a/mk/rte.app.mk b/mk/rte.app.mk
+index 3eb41d1..95c1221 100644
+--- a/mk/rte.app.mk
++++ b/mk/rte.app.mk
+@@ -215,6 +215,8 @@ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_bus_dpaa
+ _LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_DPAA_SEC) += -lrte_pmd_dpaa_sec
+ endif # CONFIG_RTE_LIBRTE_DPAA_BUS
+
++_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += -lrte_pmd_ccp -lcrypto
++
+ endif # CONFIG_RTE_LIBRTE_CRYPTODEV
+
+ ifeq ($(CONFIG_RTE_LIBRTE_EVENTDEV),y)
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-02-20-crypto-ccp-support-ccp-device-initialization-and-deintialization.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-02-20-crypto-ccp-support-ccp-device-initialization-and-deintialization.patch
new file mode 100644
index 00000000..2b973ef6
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-02-20-crypto-ccp-support-ccp-device-initialization-and-deintialization.patch
@@ -0,0 +1,1809 @@
+From patchwork Fri Mar 9 08:35:02 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 02/20] crypto/ccp: support ccp device initialization and
+ deintialization
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35801
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-2-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:02 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/Makefile | 3 +
+ drivers/crypto/ccp/ccp_dev.c | 787 +++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_dev.h | 310 ++++++++++++++
+ drivers/crypto/ccp/ccp_pci.c | 262 ++++++++++++
+ drivers/crypto/ccp/ccp_pci.h | 53 +++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 55 +++
+ drivers/crypto/ccp/ccp_pmd_private.h | 82 ++++
+ drivers/crypto/ccp/rte_ccp_pmd.c | 151 ++++++-
+ 8 files changed, 1701 insertions(+), 2 deletions(-)
+ create mode 100644 drivers/crypto/ccp/ccp_dev.c
+ create mode 100644 drivers/crypto/ccp/ccp_dev.h
+ create mode 100644 drivers/crypto/ccp/ccp_pci.c
+ create mode 100644 drivers/crypto/ccp/ccp_pci.h
+ create mode 100644 drivers/crypto/ccp/ccp_pmd_ops.c
+ create mode 100644 drivers/crypto/ccp/ccp_pmd_private.h
+
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+index 51c5e5b..5e58c31 100644
+--- a/drivers/crypto/ccp/Makefile
++++ b/drivers/crypto/ccp/Makefile
+@@ -51,5 +51,8 @@ EXPORT_MAP := rte_pmd_ccp_version.map
+
+ # library source files
+ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
+new file mode 100644
+index 0000000..5af2b49
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_dev.c
+@@ -0,0 +1,787 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <dirent.h>
++#include <fcntl.h>
++#include <stdio.h>
++#include <string.h>
++#include <sys/mman.h>
++#include <sys/queue.h>
++#include <sys/types.h>
++#include <sys/file.h>
++#include <unistd.h>
++
++#include <rte_hexdump.h>
++#include <rte_memzone.h>
++#include <rte_malloc.h>
++#include <rte_memory.h>
++#include <rte_spinlock.h>
++#include <rte_string_fns.h>
++
++#include "ccp_dev.h"
++#include "ccp_pci.h"
++#include "ccp_pmd_private.h"
++
++struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
++static int ccp_dev_id;
++
++static const struct rte_memzone *
++ccp_queue_dma_zone_reserve(const char *queue_name,
++ uint32_t queue_size,
++ int socket_id)
++{
++ const struct rte_memzone *mz;
++ unsigned int memzone_flags = 0;
++ const struct rte_memseg *ms;
++
++ mz = rte_memzone_lookup(queue_name);
++ if (mz != 0)
++ return mz;
++
++ ms = rte_eal_get_physmem_layout();
++ switch (ms[0].hugepage_sz) {
++ case(RTE_PGSIZE_2M):
++ memzone_flags = RTE_MEMZONE_2MB;
++ break;
++ case(RTE_PGSIZE_1G):
++ memzone_flags = RTE_MEMZONE_1GB;
++ break;
++ case(RTE_PGSIZE_16M):
++ memzone_flags = RTE_MEMZONE_16MB;
++ break;
++ case(RTE_PGSIZE_16G):
++ memzone_flags = RTE_MEMZONE_16GB;
++ break;
++ default:
++ memzone_flags = RTE_MEMZONE_SIZE_HINT_ONLY;
++ }
++
++ return rte_memzone_reserve_aligned(queue_name,
++ queue_size,
++ socket_id,
++ memzone_flags,
++ queue_size);
++}
++
++/* bitmap support apis */
++static inline void
++ccp_set_bit(unsigned long *bitmap, int n)
++{
++ __sync_fetch_and_or(&bitmap[WORD_OFFSET(n)], (1UL << BIT_OFFSET(n)));
++}
++
++static inline void
++ccp_clear_bit(unsigned long *bitmap, int n)
++{
++ __sync_fetch_and_and(&bitmap[WORD_OFFSET(n)], ~(1UL << BIT_OFFSET(n)));
++}
++
++static inline uint32_t
++ccp_get_bit(unsigned long *bitmap, int n)
++{
++ return ((bitmap[WORD_OFFSET(n)] & (1 << BIT_OFFSET(n))) != 0);
++}
++
++
++static inline uint32_t
++ccp_ffz(unsigned long word)
++{
++ unsigned long first_zero;
++
++ first_zero = __builtin_ffsl(~word);
++ return first_zero ? (first_zero - 1) :
++ BITS_PER_WORD;
++}
++
++static inline uint32_t
++ccp_find_first_zero_bit(unsigned long *addr, uint32_t limit)
++{
++ uint32_t i;
++ uint32_t nwords = 0;
++
++ nwords = (limit - 1) / BITS_PER_WORD + 1;
++ for (i = 0; i < nwords; i++) {
++ if (addr[i] == 0UL)
++ return i * BITS_PER_WORD;
++ if (addr[i] < ~(0UL))
++ break;
++ }
++ return (i == nwords) ? limit : i * BITS_PER_WORD + ccp_ffz(addr[i]);
++}
++
++static void
++ccp_bitmap_set(unsigned long *map, unsigned int start, int len)
++{
++ unsigned long *p = map + WORD_OFFSET(start);
++ const unsigned int size = start + len;
++ int bits_to_set = BITS_PER_WORD - (start % BITS_PER_WORD);
++ unsigned long mask_to_set = CCP_BITMAP_FIRST_WORD_MASK(start);
++
++ while (len - bits_to_set >= 0) {
++ *p |= mask_to_set;
++ len -= bits_to_set;
++ bits_to_set = BITS_PER_WORD;
++ mask_to_set = ~0UL;
++ p++;
++ }
++ if (len) {
++ mask_to_set &= CCP_BITMAP_LAST_WORD_MASK(size);
++ *p |= mask_to_set;
++ }
++}
++
++static void
++ccp_bitmap_clear(unsigned long *map, unsigned int start, int len)
++{
++ unsigned long *p = map + WORD_OFFSET(start);
++ const unsigned int size = start + len;
++ int bits_to_clear = BITS_PER_WORD - (start % BITS_PER_WORD);
++ unsigned long mask_to_clear = CCP_BITMAP_FIRST_WORD_MASK(start);
++
++ while (len - bits_to_clear >= 0) {
++ *p &= ~mask_to_clear;
++ len -= bits_to_clear;
++ bits_to_clear = BITS_PER_WORD;
++ mask_to_clear = ~0UL;
++ p++;
++ }
++ if (len) {
++ mask_to_clear &= CCP_BITMAP_LAST_WORD_MASK(size);
++ *p &= ~mask_to_clear;
++ }
++}
++
++
++static unsigned long
++_ccp_find_next_bit(const unsigned long *addr,
++ unsigned long nbits,
++ unsigned long start,
++ unsigned long invert)
++{
++ unsigned long tmp;
++
++ if (!nbits || start >= nbits)
++ return nbits;
++
++ tmp = addr[start / BITS_PER_WORD] ^ invert;
++
++ /* Handle 1st word. */
++ tmp &= CCP_BITMAP_FIRST_WORD_MASK(start);
++ start = ccp_round_down(start, BITS_PER_WORD);
++
++ while (!tmp) {
++ start += BITS_PER_WORD;
++ if (start >= nbits)
++ return nbits;
++
++ tmp = addr[start / BITS_PER_WORD] ^ invert;
++ }
++
++ return RTE_MIN(start + (ffs(tmp) - 1), nbits);
++}
++
++static unsigned long
++ccp_find_next_bit(const unsigned long *addr,
++ unsigned long size,
++ unsigned long offset)
++{
++ return _ccp_find_next_bit(addr, size, offset, 0UL);
++}
++
++static unsigned long
++ccp_find_next_zero_bit(const unsigned long *addr,
++ unsigned long size,
++ unsigned long offset)
++{
++ return _ccp_find_next_bit(addr, size, offset, ~0UL);
++}
++
++/**
++ * bitmap_find_next_zero_area - find a contiguous aligned zero area
++ * @map: The address to base the search on
++ * @size: The bitmap size in bits
++ * @start: The bitnumber to start searching at
++ * @nr: The number of zeroed bits we're looking for
++ */
++static unsigned long
++ccp_bitmap_find_next_zero_area(unsigned long *map,
++ unsigned long size,
++ unsigned long start,
++ unsigned int nr)
++{
++ unsigned long index, end, i;
++
++again:
++ index = ccp_find_next_zero_bit(map, size, start);
++
++ end = index + nr;
++ if (end > size)
++ return end;
++ i = ccp_find_next_bit(map, end, index);
++ if (i < end) {
++ start = i + 1;
++ goto again;
++ }
++ return index;
++}
++
++static uint32_t
++ccp_lsb_alloc(struct ccp_queue *cmd_q, unsigned int count)
++{
++ struct ccp_device *ccp;
++ int start;
++
++ /* First look at the map for the queue */
++ if (cmd_q->lsb >= 0) {
++ start = (uint32_t)ccp_bitmap_find_next_zero_area(cmd_q->lsbmap,
++ LSB_SIZE, 0,
++ count);
++ if (start < LSB_SIZE) {
++ ccp_bitmap_set(cmd_q->lsbmap, start, count);
++ return start + cmd_q->lsb * LSB_SIZE;
++ }
++ }
++
++ /* try to get an entry from the shared blocks */
++ ccp = cmd_q->dev;
++
++ rte_spinlock_lock(&ccp->lsb_lock);
++
++ start = (uint32_t)ccp_bitmap_find_next_zero_area(ccp->lsbmap,
++ MAX_LSB_CNT * LSB_SIZE,
++ 0, count);
++ if (start <= MAX_LSB_CNT * LSB_SIZE) {
++ ccp_bitmap_set(ccp->lsbmap, start, count);
++ rte_spinlock_unlock(&ccp->lsb_lock);
++ return start * LSB_ITEM_SIZE;
++ }
++ CCP_LOG_ERR("NO LSBs available");
++
++ rte_spinlock_unlock(&ccp->lsb_lock);
++
++ return 0;
++}
++
++static void __rte_unused
++ccp_lsb_free(struct ccp_queue *cmd_q,
++ unsigned int start,
++ unsigned int count)
++{
++ int lsbno = start / LSB_SIZE;
++
++ if (!start)
++ return;
++
++ if (cmd_q->lsb == lsbno) {
++ /* An entry from the private LSB */
++ ccp_bitmap_clear(cmd_q->lsbmap, start % LSB_SIZE, count);
++ } else {
++ /* From the shared LSBs */
++ struct ccp_device *ccp = cmd_q->dev;
++
++ rte_spinlock_lock(&ccp->lsb_lock);
++ ccp_bitmap_clear(ccp->lsbmap, start, count);
++ rte_spinlock_unlock(&ccp->lsb_lock);
++ }
++}
++
++static int
++ccp_find_lsb_regions(struct ccp_queue *cmd_q, uint64_t status)
++{
++ int q_mask = 1 << cmd_q->id;
++ int weight = 0;
++ int j;
++
++ /* Build a bit mask to know which LSBs
++ * this queue has access to.
++ * Don't bother with segment 0
++ * as it has special
++ * privileges.
++ */
++ cmd_q->lsbmask = 0;
++ status >>= LSB_REGION_WIDTH;
++ for (j = 1; j < MAX_LSB_CNT; j++) {
++ if (status & q_mask)
++ ccp_set_bit(&cmd_q->lsbmask, j);
++
++ status >>= LSB_REGION_WIDTH;
++ }
++
++ for (j = 0; j < MAX_LSB_CNT; j++)
++ if (ccp_get_bit(&cmd_q->lsbmask, j))
++ weight++;
++
++ printf("Queue %d can access %d LSB regions of mask %lu\n",
++ (int)cmd_q->id, weight, cmd_q->lsbmask);
++
++ return weight ? 0 : -EINVAL;
++}
++
++static int
++ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp,
++ int lsb_cnt, int n_lsbs,
++ unsigned long *lsb_pub)
++{
++ unsigned long qlsb = 0;
++ int bitno = 0;
++ int qlsb_wgt = 0;
++ int i, j;
++
++ /* For each queue:
++ * If the count of potential LSBs available to a queue matches the
++ * ordinal given to us in lsb_cnt:
++ * Copy the mask of possible LSBs for this queue into "qlsb";
++ * For each bit in qlsb, see if the corresponding bit in the
++ * aggregation mask is set; if so, we have a match.
++ * If we have a match, clear the bit in the aggregation to
++ * mark it as no longer available.
++ * If there is no match, clear the bit in qlsb and keep looking.
++ */
++ for (i = 0; i < ccp->cmd_q_count; i++) {
++ struct ccp_queue *cmd_q = &ccp->cmd_q[i];
++
++ qlsb_wgt = 0;
++ for (j = 0; j < MAX_LSB_CNT; j++)
++ if (ccp_get_bit(&cmd_q->lsbmask, j))
++ qlsb_wgt++;
++
++ if (qlsb_wgt == lsb_cnt) {
++ qlsb = cmd_q->lsbmask;
++
++ bitno = ffs(qlsb) - 1;
++ while (bitno < MAX_LSB_CNT) {
++ if (ccp_get_bit(lsb_pub, bitno)) {
++ /* We found an available LSB
++ * that this queue can access
++ */
++ cmd_q->lsb = bitno;
++ ccp_clear_bit(lsb_pub, bitno);
++ break;
++ }
++ ccp_clear_bit(&qlsb, bitno);
++ bitno = ffs(qlsb) - 1;
++ }
++ if (bitno >= MAX_LSB_CNT)
++ return -EINVAL;
++ n_lsbs--;
++ }
++ }
++ return n_lsbs;
++}
++
++/* For each queue, from the most- to least-constrained:
++ * find an LSB that can be assigned to the queue. If there are N queues that
++ * can only use M LSBs, where N > M, fail; otherwise, every queue will get a
++ * dedicated LSB. Remaining LSB regions become a shared resource.
++ * If we have fewer LSBs than queues, all LSB regions become shared
++ * resources.
++ */
++static int
++ccp_assign_lsbs(struct ccp_device *ccp)
++{
++ unsigned long lsb_pub = 0, qlsb = 0;
++ int n_lsbs = 0;
++ int bitno;
++ int i, lsb_cnt;
++ int rc = 0;
++
++ rte_spinlock_init(&ccp->lsb_lock);
++
++ /* Create an aggregate bitmap to get a total count of available LSBs */
++ for (i = 0; i < ccp->cmd_q_count; i++)
++ lsb_pub |= ccp->cmd_q[i].lsbmask;
++
++ for (i = 0; i < MAX_LSB_CNT; i++)
++ if (ccp_get_bit(&lsb_pub, i))
++ n_lsbs++;
++
++ if (n_lsbs >= ccp->cmd_q_count) {
++ /* We have enough LSBS to give every queue a private LSB.
++ * Brute force search to start with the queues that are more
++ * constrained in LSB choice. When an LSB is privately
++ * assigned, it is removed from the public mask.
++ * This is an ugly N squared algorithm with some optimization.
++ */
++ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT);
++ lsb_cnt++) {
++ rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs,
++ &lsb_pub);
++ if (rc < 0)
++ return -EINVAL;
++ n_lsbs = rc;
++ }
++ }
++
++ rc = 0;
++ /* What's left of the LSBs, according to the public mask, now become
++ * shared. Any zero bits in the lsb_pub mask represent an LSB region
++ * that can't be used as a shared resource, so mark the LSB slots for
++ * them as "in use".
++ */
++ qlsb = lsb_pub;
++ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
++ while (bitno < MAX_LSB_CNT) {
++ ccp_bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE);
++ ccp_set_bit(&qlsb, bitno);
++ bitno = ccp_find_first_zero_bit(&qlsb, MAX_LSB_CNT);
++ }
++
++ return rc;
++}
++
++static int
++ccp_add_device(struct ccp_device *dev, int type)
++{
++ int i;
++ uint32_t qmr, status_lo, status_hi, dma_addr_lo, dma_addr_hi;
++ uint64_t status;
++ struct ccp_queue *cmd_q;
++ const struct rte_memzone *q_mz;
++ void *vaddr;
++
++ if (dev == NULL)
++ return -1;
++
++ dev->id = ccp_dev_id++;
++ dev->qidx = 0;
++ vaddr = (void *)(dev->pci.mem_resource[2].addr);
++
++ if (type == CCP_VERSION_5B) {
++ CCP_WRITE_REG(vaddr, CMD_TRNG_CTL_OFFSET, 0x00012D57);
++ CCP_WRITE_REG(vaddr, CMD_CONFIG_0_OFFSET, 0x00000003);
++ for (i = 0; i < 12; i++) {
++ CCP_WRITE_REG(vaddr, CMD_AES_MASK_OFFSET,
++ CCP_READ_REG(vaddr, TRNG_OUT_REG));
++ }
++ CCP_WRITE_REG(vaddr, CMD_QUEUE_MASK_OFFSET, 0x0000001F);
++ CCP_WRITE_REG(vaddr, CMD_QUEUE_PRIO_OFFSET, 0x00005B6D);
++ CCP_WRITE_REG(vaddr, CMD_CMD_TIMEOUT_OFFSET, 0x00000000);
++
++ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET, 0x3FFFFFFF);
++ CCP_WRITE_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET, 0x000003FF);
++
++ CCP_WRITE_REG(vaddr, CMD_CLK_GATE_CTL_OFFSET, 0x00108823);
++ }
++ CCP_WRITE_REG(vaddr, CMD_REQID_CONFIG_OFFSET, 0x00001249);
++
++ /* Copy the private LSB mask to the public registers */
++ status_lo = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_LO_OFFSET);
++ status_hi = CCP_READ_REG(vaddr, LSB_PRIVATE_MASK_HI_OFFSET);
++ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_LO_OFFSET, status_lo);
++ CCP_WRITE_REG(vaddr, LSB_PUBLIC_MASK_HI_OFFSET, status_hi);
++ status = ((uint64_t)status_hi<<30) | ((uint64_t)status_lo);
++
++ dev->cmd_q_count = 0;
++ /* Find available queues */
++ qmr = CCP_READ_REG(vaddr, Q_MASK_REG);
++ for (i = 0; i < MAX_HW_QUEUES; i++) {
++ if (!(qmr & (1 << i)))
++ continue;
++ cmd_q = &dev->cmd_q[dev->cmd_q_count++];
++ cmd_q->dev = dev;
++ cmd_q->id = i;
++ cmd_q->qidx = 0;
++ cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
++
++ cmd_q->reg_base = (uint8_t *)vaddr +
++ CMD_Q_STATUS_INCR * (i + 1);
++
++ /* CCP queue memory */
++ snprintf(cmd_q->memz_name, sizeof(cmd_q->memz_name),
++ "%s_%d_%s_%d_%s",
++ "ccp_dev",
++ (int)dev->id, "queue",
++ (int)cmd_q->id, "mem");
++ q_mz = ccp_queue_dma_zone_reserve(cmd_q->memz_name,
++ cmd_q->qsize, SOCKET_ID_ANY);
++ cmd_q->qbase_addr = (void *)q_mz->addr;
++ cmd_q->qbase_desc = (void *)q_mz->addr;
++ cmd_q->qbase_phys_addr = q_mz->phys_addr;
++
++ cmd_q->qcontrol = 0;
++ /* init control reg to zero */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol);
++
++ /* Disable the interrupts */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INT_ENABLE_BASE, 0x00);
++ CCP_READ_REG(cmd_q->reg_base, CMD_Q_INT_STATUS_BASE);
++ CCP_READ_REG(cmd_q->reg_base, CMD_Q_STATUS_BASE);
++
++ /* Clear the interrupts */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_INTERRUPT_STATUS_BASE,
++ ALL_INTERRUPTS);
++
++ /* Configure size of each virtual queue accessible to host */
++ cmd_q->qcontrol &= ~(CMD_Q_SIZE << CMD_Q_SHIFT);
++ cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD_Q_SHIFT;
++
++ dma_addr_lo = low32_value(cmd_q->qbase_phys_addr);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
++ (uint32_t)dma_addr_lo);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_HEAD_LO_BASE,
++ (uint32_t)dma_addr_lo);
++
++ dma_addr_hi = high32_value(cmd_q->qbase_phys_addr);
++ cmd_q->qcontrol |= (dma_addr_hi << 16);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol);
++
++ /* create LSB Mask map */
++ if (ccp_find_lsb_regions(cmd_q, status))
++ CCP_LOG_ERR("queue doesn't have lsb regions");
++ cmd_q->lsb = -1;
++
++ rte_atomic64_init(&cmd_q->free_slots);
++ rte_atomic64_set(&cmd_q->free_slots, (COMMANDS_PER_QUEUE - 1));
++ /* unused slot barrier b/w H&T */
++ }
++
++ if (ccp_assign_lsbs(dev))
++ CCP_LOG_ERR("Unable to assign lsb region");
++
++ /* pre-allocate LSB slots */
++ for (i = 0; i < dev->cmd_q_count; i++) {
++ dev->cmd_q[i].sb_key =
++ ccp_lsb_alloc(&dev->cmd_q[i], 1);
++ dev->cmd_q[i].sb_iv =
++ ccp_lsb_alloc(&dev->cmd_q[i], 1);
++ dev->cmd_q[i].sb_sha =
++ ccp_lsb_alloc(&dev->cmd_q[i], 2);
++ dev->cmd_q[i].sb_hmac =
++ ccp_lsb_alloc(&dev->cmd_q[i], 2);
++ }
++
++ TAILQ_INSERT_TAIL(&ccp_list, dev, next);
++ return 0;
++}
++
++static void
++ccp_remove_device(struct ccp_device *dev)
++{
++ if (dev == NULL)
++ return;
++
++ TAILQ_REMOVE(&ccp_list, dev, next);
++}
++
++static int
++is_ccp_device(const char *dirname,
++ const struct rte_pci_id *ccp_id,
++ int *type)
++{
++ char filename[PATH_MAX];
++ const struct rte_pci_id *id;
++ uint16_t vendor, device_id;
++ int i;
++ unsigned long tmp;
++
++ /* get vendor id */
++ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ return 0;
++ vendor = (uint16_t)tmp;
++
++ /* get device id */
++ snprintf(filename, sizeof(filename), "%s/device", dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ return 0;
++ device_id = (uint16_t)tmp;
++
++ for (id = ccp_id, i = 0; id->vendor_id != 0; id++, i++) {
++ if (vendor == id->vendor_id &&
++ device_id == id->device_id) {
++ *type = i;
++ return 1; /* Matched device */
++ }
++ }
++ return 0;
++}
++
++static int
++ccp_probe_device(const char *dirname, uint16_t domain,
++ uint8_t bus, uint8_t devid,
++ uint8_t function, int ccp_type)
++{
++ struct ccp_device *ccp_dev = NULL;
++ struct rte_pci_device *pci;
++ char filename[PATH_MAX];
++ unsigned long tmp;
++ int uio_fd = -1, i, uio_num;
++ char uio_devname[PATH_MAX];
++ void *map_addr;
++
++ ccp_dev = rte_zmalloc("ccp_device", sizeof(*ccp_dev),
++ RTE_CACHE_LINE_SIZE);
++ if (ccp_dev == NULL)
++ goto fail;
++ pci = &(ccp_dev->pci);
++
++ pci->addr.domain = domain;
++ pci->addr.bus = bus;
++ pci->addr.devid = devid;
++ pci->addr.function = function;
++
++ /* get vendor id */
++ snprintf(filename, sizeof(filename), "%s/vendor", dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ goto fail;
++ pci->id.vendor_id = (uint16_t)tmp;
++
++ /* get device id */
++ snprintf(filename, sizeof(filename), "%s/device", dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ goto fail;
++ pci->id.device_id = (uint16_t)tmp;
++
++ /* get subsystem_vendor id */
++ snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
++ dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ goto fail;
++ pci->id.subsystem_vendor_id = (uint16_t)tmp;
++
++ /* get subsystem_device id */
++ snprintf(filename, sizeof(filename), "%s/subsystem_device",
++ dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ goto fail;
++ pci->id.subsystem_device_id = (uint16_t)tmp;
++
++ /* get class_id */
++ snprintf(filename, sizeof(filename), "%s/class",
++ dirname);
++ if (ccp_pci_parse_sysfs_value(filename, &tmp) < 0)
++ goto fail;
++ /* the least 24 bits are valid: class, subclass, program interface */
++ pci->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
++
++ /* parse resources */
++ snprintf(filename, sizeof(filename), "%s/resource", dirname);
++ if (ccp_pci_parse_sysfs_resource(filename, pci) < 0)
++ goto fail;
++
++ uio_num = ccp_find_uio_devname(dirname);
++ if (uio_num < 0) {
++ /*
++ * It may take time for uio device to appear,
++ * wait here and try again
++ */
++ usleep(100000);
++ uio_num = ccp_find_uio_devname(dirname);
++ if (uio_num < 0)
++ goto fail;
++ }
++ snprintf(uio_devname, sizeof(uio_devname), "/dev/uio%u", uio_num);
++
++ uio_fd = open(uio_devname, O_RDWR | O_NONBLOCK);
++ if (uio_fd < 0)
++ goto fail;
++ if (flock(uio_fd, LOCK_EX | LOCK_NB))
++ goto fail;
++
++ /* Map the PCI memory resource of device */
++ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
++
++ char devname[PATH_MAX];
++ int res_fd;
++
++ if (pci->mem_resource[i].phys_addr == 0)
++ continue;
++ snprintf(devname, sizeof(devname), "%s/resource%d", dirname, i);
++ res_fd = open(devname, O_RDWR);
++ if (res_fd < 0)
++ goto fail;
++ map_addr = mmap(NULL, pci->mem_resource[i].len,
++ PROT_READ | PROT_WRITE,
++ MAP_SHARED, res_fd, 0);
++ if (map_addr == MAP_FAILED)
++ goto fail;
++
++ pci->mem_resource[i].addr = map_addr;
++ }
++
++ /* device is valid, add in list */
++ if (ccp_add_device(ccp_dev, ccp_type)) {
++ ccp_remove_device(ccp_dev);
++ goto fail;
++ }
++
++ return 0;
++fail:
++ CCP_LOG_ERR("CCP Device probe failed");
++ if (uio_fd > 0)
++ close(uio_fd);
++ if (ccp_dev)
++ rte_free(ccp_dev);
++ return -1;
++}
++
++int
++ccp_probe_devices(const struct rte_pci_id *ccp_id)
++{
++ int dev_cnt = 0;
++ int ccp_type = 0;
++ struct dirent *d;
++ DIR *dir;
++ int ret = 0;
++ int module_idx = 0;
++ uint16_t domain;
++ uint8_t bus, devid, function;
++ char dirname[PATH_MAX];
++
++ module_idx = ccp_check_pci_uio_module();
++ if (module_idx < 0)
++ return -1;
++
++ TAILQ_INIT(&ccp_list);
++ dir = opendir(SYSFS_PCI_DEVICES);
++ if (dir == NULL)
++ return -1;
++ while ((d = readdir(dir)) != NULL) {
++ if (d->d_name[0] == '.')
++ continue;
++ if (ccp_parse_pci_addr_format(d->d_name, sizeof(d->d_name),
++ &domain, &bus, &devid, &function) != 0)
++ continue;
++ snprintf(dirname, sizeof(dirname), "%s/%s",
++ SYSFS_PCI_DEVICES, d->d_name);
++ if (is_ccp_device(dirname, ccp_id, &ccp_type)) {
++ printf("CCP : Detected CCP device with ID = 0x%x\n",
++ ccp_id[ccp_type].device_id);
++ ret = ccp_probe_device(dirname, domain, bus, devid,
++ function, ccp_type);
++ if (ret == 0)
++ dev_cnt++;
++ }
++ }
++ closedir(dir);
++ return dev_cnt;
++}
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+new file mode 100644
+index 0000000..fe05bf0
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -0,0 +1,310 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CCP_DEV_H_
++#define _CCP_DEV_H_
++
++#include <limits.h>
++#include <stdbool.h>
++#include <stdint.h>
++#include <string.h>
++
++#include <rte_bus_pci.h>
++#include <rte_atomic.h>
++#include <rte_byteorder.h>
++#include <rte_io.h>
++#include <rte_pci.h>
++#include <rte_spinlock.h>
++#include <rte_crypto_sym.h>
++#include <rte_cryptodev.h>
++
++/**< CCP sspecific */
++#define MAX_HW_QUEUES 5
++
++/**< CCP Register Mappings */
++#define Q_MASK_REG 0x000
++#define TRNG_OUT_REG 0x00c
++
++/* CCP Version 5 Specifics */
++#define CMD_QUEUE_MASK_OFFSET 0x00
++#define CMD_QUEUE_PRIO_OFFSET 0x04
++#define CMD_REQID_CONFIG_OFFSET 0x08
++#define CMD_CMD_TIMEOUT_OFFSET 0x10
++#define LSB_PUBLIC_MASK_LO_OFFSET 0x18
++#define LSB_PUBLIC_MASK_HI_OFFSET 0x1C
++#define LSB_PRIVATE_MASK_LO_OFFSET 0x20
++#define LSB_PRIVATE_MASK_HI_OFFSET 0x24
++
++#define CMD_Q_CONTROL_BASE 0x0000
++#define CMD_Q_TAIL_LO_BASE 0x0004
++#define CMD_Q_HEAD_LO_BASE 0x0008
++#define CMD_Q_INT_ENABLE_BASE 0x000C
++#define CMD_Q_INTERRUPT_STATUS_BASE 0x0010
++
++#define CMD_Q_STATUS_BASE 0x0100
++#define CMD_Q_INT_STATUS_BASE 0x0104
++
++#define CMD_CONFIG_0_OFFSET 0x6000
++#define CMD_TRNG_CTL_OFFSET 0x6008
++#define CMD_AES_MASK_OFFSET 0x6010
++#define CMD_CLK_GATE_CTL_OFFSET 0x603C
++
++/* Address offset between two virtual queue registers */
++#define CMD_Q_STATUS_INCR 0x1000
++
++/* Bit masks */
++#define CMD_Q_RUN 0x1
++#define CMD_Q_SIZE 0x1F
++#define CMD_Q_SHIFT 3
++#define COMMANDS_PER_QUEUE 2048
++
++#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \
++ CMD_Q_SIZE)
++#define Q_DESC_SIZE sizeof(struct ccp_desc)
++#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n))
++
++#define INT_COMPLETION 0x1
++#define INT_ERROR 0x2
++#define INT_QUEUE_STOPPED 0x4
++#define ALL_INTERRUPTS (INT_COMPLETION| \
++ INT_ERROR| \
++ INT_QUEUE_STOPPED)
++
++#define LSB_REGION_WIDTH 5
++#define MAX_LSB_CNT 8
++
++#define LSB_SIZE 16
++#define LSB_ITEM_SIZE 32
++#define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
++
++/* bitmap */
++enum {
++ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
++};
++
++#define WORD_OFFSET(b) ((b) / BITS_PER_WORD)
++#define BIT_OFFSET(b) ((b) % BITS_PER_WORD)
++
++#define CCP_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
++#define CCP_BITMAP_SIZE(nr) \
++ CCP_DIV_ROUND_UP(nr, CHAR_BIT * sizeof(unsigned long))
++
++#define CCP_BITMAP_FIRST_WORD_MASK(start) \
++ (~0UL << ((start) & (BITS_PER_WORD - 1)))
++#define CCP_BITMAP_LAST_WORD_MASK(nbits) \
++ (~0UL >> (-(nbits) & (BITS_PER_WORD - 1)))
++
++#define __ccp_round_mask(x, y) ((typeof(x))((y)-1))
++#define ccp_round_down(x, y) ((x) & ~__ccp_round_mask(x, y))
++
++/** CCP registers Write/Read */
++
++static inline void ccp_pci_reg_write(void *base, int offset,
++ uint32_t value)
++{
++ volatile void *reg_addr = ((uint8_t *)base + offset);
++
++ rte_write32((rte_cpu_to_le_32(value)), reg_addr);
++}
++
++static inline uint32_t ccp_pci_reg_read(void *base, int offset)
++{
++ volatile void *reg_addr = ((uint8_t *)base + offset);
++
++ return rte_le_to_cpu_32(rte_read32(reg_addr));
++}
++
++#define CCP_READ_REG(hw_addr, reg_offset) \
++ ccp_pci_reg_read(hw_addr, reg_offset)
++
++#define CCP_WRITE_REG(hw_addr, reg_offset, value) \
++ ccp_pci_reg_write(hw_addr, reg_offset, value)
++
++TAILQ_HEAD(ccp_list, ccp_device);
++
++extern struct ccp_list ccp_list;
++
++/**
++ * CCP device version
++ */
++enum ccp_device_version {
++ CCP_VERSION_5A = 0,
++ CCP_VERSION_5B,
++};
++
++/**
++ * A structure describing a CCP command queue.
++ */
++struct ccp_queue {
++ struct ccp_device *dev;
++ char memz_name[RTE_MEMZONE_NAMESIZE];
++
++ rte_atomic64_t free_slots;
++ /**< available free slots updated from enq/deq calls */
++
++ /* Queue identifier */
++ uint64_t id; /**< queue id */
++ uint64_t qidx; /**< queue index */
++ uint64_t qsize; /**< queue size */
++
++ /* Queue address */
++ struct ccp_desc *qbase_desc;
++ void *qbase_addr;
++ phys_addr_t qbase_phys_addr;
++ /**< queue-page registers addr */
++ void *reg_base;
++
++ uint32_t qcontrol;
++ /**< queue ctrl reg */
++
++ int lsb;
++ /**< lsb region assigned to queue */
++ unsigned long lsbmask;
++ /**< lsb regions queue can access */
++ unsigned long lsbmap[CCP_BITMAP_SIZE(LSB_SIZE)];
++ /**< all lsb resources which queue is using */
++ uint32_t sb_key;
++ /**< lsb assigned for queue */
++ uint32_t sb_iv;
++ /**< lsb assigned for iv */
++ uint32_t sb_sha;
++ /**< lsb assigned for sha ctx */
++ uint32_t sb_hmac;
++ /**< lsb assigned for hmac ctx */
++} ____cacheline_aligned;
++
++/**
++ * A structure describing a CCP device.
++ */
++struct ccp_device {
++ TAILQ_ENTRY(ccp_device) next;
++ int id;
++ /**< ccp dev id on platform */
++ struct ccp_queue cmd_q[MAX_HW_QUEUES];
++ /**< ccp queue */
++ int cmd_q_count;
++ /**< no. of ccp Queues */
++ struct rte_pci_device pci;
++ /**< ccp pci identifier */
++ unsigned long lsbmap[CCP_BITMAP_SIZE(SLSB_MAP_SIZE)];
++ /**< shared lsb mask of ccp */
++ rte_spinlock_t lsb_lock;
++ /**< protection for shared lsb region allocation */
++ int qidx;
++ /**< current queue index */
++} __rte_cache_aligned;
++
++/**
++ * descriptor for version 5 CPP commands
++ * 8 32-bit words:
++ * word 0: function; engine; control bits
++ * word 1: length of source data
++ * word 2: low 32 bits of source pointer
++ * word 3: upper 16 bits of source pointer; source memory type
++ * word 4: low 32 bits of destination pointer
++ * word 5: upper 16 bits of destination pointer; destination memory
++ * type
++ * word 6: low 32 bits of key pointer
++ * word 7: upper 16 bits of key pointer; key memory type
++ */
++struct dword0 {
++ uint32_t soc:1;
++ uint32_t ioc:1;
++ uint32_t rsvd1:1;
++ uint32_t init:1;
++ uint32_t eom:1;
++ uint32_t function:15;
++ uint32_t engine:4;
++ uint32_t prot:1;
++ uint32_t rsvd2:7;
++};
++
++struct dword3 {
++ uint32_t src_hi:16;
++ uint32_t src_mem:2;
++ uint32_t lsb_cxt_id:8;
++ uint32_t rsvd1:5;
++ uint32_t fixed:1;
++};
++
++union dword4 {
++ uint32_t dst_lo; /* NON-SHA */
++ uint32_t sha_len_lo; /* SHA */
++};
++
++union dword5 {
++ struct {
++ uint32_t dst_hi:16;
++ uint32_t dst_mem:2;
++ uint32_t rsvd1:13;
++ uint32_t fixed:1;
++ }
++ fields;
++ uint32_t sha_len_hi;
++};
++
++struct dword7 {
++ uint32_t key_hi:16;
++ uint32_t key_mem:2;
++ uint32_t rsvd1:14;
++};
++
++struct ccp_desc {
++ struct dword0 dw0;
++ uint32_t length;
++ uint32_t src_lo;
++ struct dword3 dw3;
++ union dword4 dw4;
++ union dword5 dw5;
++ uint32_t key_lo;
++ struct dword7 dw7;
++};
++
++static inline uint32_t
++low32_value(unsigned long addr)
++{
++ return ((uint64_t)addr) & 0x0ffffffff;
++}
++
++static inline uint32_t
++high32_value(unsigned long addr)
++{
++ return ((uint64_t)addr >> 32) & 0x00000ffff;
++}
++
++/**
++ * Detect ccp platform and initialize all ccp devices
++ *
++ * @param ccp_id rte_pci_id list for supported CCP devices
++ * @return no. of successfully initialized CCP devices
++ */
++int ccp_probe_devices(const struct rte_pci_id *ccp_id);
++
++#endif /* _CCP_DEV_H_ */
+diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
+new file mode 100644
+index 0000000..ddf4b49
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_pci.c
+@@ -0,0 +1,262 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <dirent.h>
++#include <fcntl.h>
++#include <stdio.h>
++#include <string.h>
++#include <unistd.h>
++
++#include <rte_string_fns.h>
++
++#include "ccp_pci.h"
++
++static const char * const uio_module_names[] = {
++ "igb_uio",
++ "uio_pci_generic",
++};
++
++int
++ccp_check_pci_uio_module(void)
++{
++ FILE *fp;
++ int i;
++ char buf[BUFSIZ];
++
++ fp = fopen(PROC_MODULES, "r");
++ if (fp == NULL)
++ return -1;
++ i = 0;
++ while (uio_module_names[i] != NULL) {
++ while (fgets(buf, sizeof(buf), fp) != NULL) {
++ if (!strncmp(buf, uio_module_names[i],
++ strlen(uio_module_names[i])))
++ return i;
++ }
++ i++;
++ rewind(fp);
++ }
++ printf("Insert igb_uio or uio_pci_generic kernel module(s)");
++ return -1;/* uio not inserted */
++}
++
++/*
++ * split up a pci address into its constituent parts.
++ */
++int
++ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
++ uint8_t *bus, uint8_t *devid, uint8_t *function)
++{
++ /* first split on ':' */
++ union splitaddr {
++ struct {
++ char *domain;
++ char *bus;
++ char *devid;
++ char *function;
++ };
++ char *str[PCI_FMT_NVAL];
++ /* last element-separator is "." not ":" */
++ } splitaddr;
++
++ char *buf_copy = strndup(buf, bufsize);
++
++ if (buf_copy == NULL)
++ return -1;
++
++ if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
++ != PCI_FMT_NVAL - 1)
++ goto error;
++ /* final split is on '.' between devid and function */
++ splitaddr.function = strchr(splitaddr.devid, '.');
++ if (splitaddr.function == NULL)
++ goto error;
++ *splitaddr.function++ = '\0';
++
++ /* now convert to int values */
++ errno = 0;
++ *domain = (uint8_t)strtoul(splitaddr.domain, NULL, 16);
++ *bus = (uint8_t)strtoul(splitaddr.bus, NULL, 16);
++ *devid = (uint8_t)strtoul(splitaddr.devid, NULL, 16);
++ *function = (uint8_t)strtoul(splitaddr.function, NULL, 10);
++ if (errno != 0)
++ goto error;
++
++ free(buf_copy); /* free the copy made with strdup */
++ return 0;
++error:
++ free(buf_copy);
++ return -1;
++}
++
++int
++ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val)
++{
++ FILE *f;
++ char buf[BUFSIZ];
++ char *end = NULL;
++
++ f = fopen(filename, "r");
++ if (f == NULL)
++ return -1;
++ if (fgets(buf, sizeof(buf), f) == NULL) {
++ fclose(f);
++ return -1;
++ }
++ *val = strtoul(buf, &end, 0);
++ if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
++ fclose(f);
++ return -1;
++ }
++ fclose(f);
++ return 0;
++}
++
++/** IO resource type: */
++#define IORESOURCE_IO 0x00000100
++#define IORESOURCE_MEM 0x00000200
++
++/* parse one line of the "resource" sysfs file (note that the 'line'
++ * string is modified)
++ */
++static int
++ccp_pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
++ uint64_t *end_addr, uint64_t *flags)
++{
++ union pci_resource_info {
++ struct {
++ char *phys_addr;
++ char *end_addr;
++ char *flags;
++ };
++ char *ptrs[PCI_RESOURCE_FMT_NVAL];
++ } res_info;
++
++ if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3)
++ return -1;
++ errno = 0;
++ *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
++ *end_addr = strtoull(res_info.end_addr, NULL, 16);
++ *flags = strtoull(res_info.flags, NULL, 16);
++ if (errno != 0)
++ return -1;
++
++ return 0;
++}
++
++/* parse the "resource" sysfs file */
++int
++ccp_pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
++{
++ FILE *fp;
++ char buf[BUFSIZ];
++ int i;
++ uint64_t phys_addr, end_addr, flags;
++
++ fp = fopen(filename, "r");
++ if (fp == NULL)
++ return -1;
++
++ for (i = 0; i < PCI_MAX_RESOURCE; i++) {
++ if (fgets(buf, sizeof(buf), fp) == NULL)
++ goto error;
++ if (ccp_pci_parse_one_sysfs_resource(buf, sizeof(buf),
++ &phys_addr, &end_addr, &flags) < 0)
++ goto error;
++
++ if (flags & IORESOURCE_MEM) {
++ dev->mem_resource[i].phys_addr = phys_addr;
++ dev->mem_resource[i].len = end_addr - phys_addr + 1;
++ /* not mapped for now */
++ dev->mem_resource[i].addr = NULL;
++ }
++ }
++ fclose(fp);
++ return 0;
++
++error:
++ fclose(fp);
++ return -1;
++}
++
++int
++ccp_find_uio_devname(const char *dirname)
++{
++
++ DIR *dir;
++ struct dirent *e;
++ char dirname_uio[PATH_MAX];
++ unsigned int uio_num;
++ int ret = -1;
++
++ /* depending on kernel version, uio can be located in uio/uioX
++ * or uio:uioX
++ */
++ snprintf(dirname_uio, sizeof(dirname_uio), "%s/uio", dirname);
++ dir = opendir(dirname_uio);
++ if (dir == NULL) {
++ /* retry with the parent directory might be different kernel version*/
++ dir = opendir(dirname);
++ if (dir == NULL)
++ return -1;
++ }
++
++ /* take the first file starting with "uio" */
++ while ((e = readdir(dir)) != NULL) {
++ /* format could be uio%d ...*/
++ int shortprefix_len = sizeof("uio") - 1;
++ /* ... or uio:uio%d */
++ int longprefix_len = sizeof("uio:uio") - 1;
++ char *endptr;
++
++ if (strncmp(e->d_name, "uio", 3) != 0)
++ continue;
++
++ /* first try uio%d */
++ errno = 0;
++ uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
++ if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
++ ret = uio_num;
++ break;
++ }
++
++ /* then try uio:uio%d */
++ errno = 0;
++ uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
++ if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
++ ret = uio_num;
++ break;
++ }
++ }
++ closedir(dir);
++ return ret;
++
++
++}
+diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
+new file mode 100644
+index 0000000..a4c09c8
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_pci.h
+@@ -0,0 +1,53 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CCP_PCI_H_
++#define _CCP_PCI_H_
++
++#include <stdint.h>
++
++#include <rte_bus_pci.h>
++
++#define SYSFS_PCI_DEVICES "/sys/bus/pci/devices"
++#define PROC_MODULES "/proc/modules"
++
++int ccp_check_pci_uio_module(void);
++
++int ccp_parse_pci_addr_format(const char *buf, int bufsize, uint16_t *domain,
++ uint8_t *bus, uint8_t *devid, uint8_t *function);
++
++int ccp_pci_parse_sysfs_value(const char *filename, unsigned long *val);
++
++int ccp_pci_parse_sysfs_resource(const char *filename,
++ struct rte_pci_device *dev);
++
++int ccp_find_uio_devname(const char *dirname);
++
++#endif /* _CCP_PCI_H_ */
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+new file mode 100644
+index 0000000..bc4120b
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -0,0 +1,55 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <rte_cryptodev_pmd.h>
++
++struct rte_cryptodev_ops ccp_ops = {
++ .dev_configure = NULL,
++ .dev_start = NULL,
++ .dev_stop = NULL,
++ .dev_close = NULL,
++
++ .stats_get = NULL,
++ .stats_reset = NULL,
++
++ .dev_infos_get = NULL,
++
++ .queue_pair_setup = NULL,
++ .queue_pair_release = NULL,
++ .queue_pair_start = NULL,
++ .queue_pair_stop = NULL,
++ .queue_pair_count = NULL,
++
++ .session_get_size = NULL,
++ .session_configure = NULL,
++ .session_clear = NULL,
++};
++
++struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
+diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
+new file mode 100644
+index 0000000..f5b6061
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_pmd_private.h
+@@ -0,0 +1,82 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CCP_PMD_PRIVATE_H_
++#define _CCP_PMD_PRIVATE_H_
++
++#include <rte_cryptodev.h>
++
++#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
++
++#define CCP_LOG_ERR(fmt, args...) \
++ RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
++ __func__, __LINE__, ## args)
++
++#ifdef RTE_LIBRTE_CCP_DEBUG
++#define CCP_LOG_INFO(fmt, args...) \
++ RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
++ __func__, __LINE__, ## args)
++
++#define CCP_LOG_DBG(fmt, args...) \
++ RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD), \
++ __func__, __LINE__, ## args)
++#else
++#define CCP_LOG_INFO(fmt, args...)
++#define CCP_LOG_DBG(fmt, args...)
++#endif
++
++/**< Maximum queue pairs supported by CCP PMD */
++#define CCP_PMD_MAX_QUEUE_PAIRS 1
++#define CCP_NB_MAX_DESCRIPTORS 1024
++#define CCP_MAX_BURST 64
++
++/* private data structure for each CCP crypto device */
++struct ccp_private {
++ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
++ unsigned int max_nb_sessions; /**< Max number of sessions */
++ uint8_t crypto_num_dev; /**< Number of working crypto devices */
++};
++
++/**< device specific operations function pointer structure */
++extern struct rte_cryptodev_ops *ccp_pmd_ops;
++
++uint16_t
++ccp_cpu_pmd_enqueue_burst(void *queue_pair,
++ struct rte_crypto_op **ops,
++ uint16_t nb_ops);
++uint16_t
++ccp_cpu_pmd_dequeue_burst(void *queue_pair,
++ struct rte_crypto_op **ops,
++ uint16_t nb_ops);
++
++#endif /* _CCP_PMD_PRIVATE_H_ */
+diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
+index 6fa14bd..cc35a97 100644
+--- a/drivers/crypto/ccp/rte_ccp_pmd.c
++++ b/drivers/crypto/ccp/rte_ccp_pmd.c
+@@ -28,23 +28,170 @@
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <rte_bus_pci.h>
+ #include <rte_bus_vdev.h>
++#include <rte_common.h>
++#include <rte_config.h>
+ #include <rte_cryptodev.h>
+ #include <rte_cryptodev_pmd.h>
++#include <rte_pci.h>
++#include <rte_dev.h>
++#include <rte_malloc.h>
+
++#include "ccp_dev.h"
++#include "ccp_pmd_private.h"
++
++/**
++ * Global static parameter used to find if CCP device is already initialized.
++ */
++static unsigned int ccp_pmd_init_done;
+ uint8_t ccp_cryptodev_driver_id;
+
++static uint16_t
++ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
++ struct rte_crypto_op **ops __rte_unused,
++ uint16_t nb_ops __rte_unused)
++{
++ uint16_t enq_cnt = 0;
++
++ return enq_cnt;
++}
++
++static uint16_t
++ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
++ struct rte_crypto_op **ops __rte_unused,
++ uint16_t nb_ops __rte_unused)
++{
++ uint16_t nb_dequeued = 0;
++
++ return nb_dequeued;
++}
++
++/*
++ * The set of PCI devices this driver supports
++ */
++static struct rte_pci_id ccp_pci_id[] = {
++ {
++ RTE_PCI_DEVICE(0x1022, 0x1456), /* AMD CCP-5a */
++ },
++ {
++ RTE_PCI_DEVICE(0x1022, 0x1468), /* AMD CCP-5b */
++ },
++ {.device_id = 0},
++};
++
+ /** Remove ccp pmd */
+ static int
+-cryptodev_ccp_remove(struct rte_vdev_device *dev __rte_unused)
++cryptodev_ccp_remove(struct rte_vdev_device *dev)
+ {
++ const char *name;
++
++ ccp_pmd_init_done = 0;
++ name = rte_vdev_device_name(dev);
++ if (name == NULL)
++ return -EINVAL;
++
++ RTE_LOG(INFO, PMD, "Closing ccp device %s on numa socket %u\n",
++ name, rte_socket_id());
++
+ return 0;
+ }
+
++/** Create crypto device */
++static int
++cryptodev_ccp_create(const char *name,
++ struct rte_vdev_device *vdev,
++ struct rte_cryptodev_pmd_init_params *init_params)
++{
++ struct rte_cryptodev *dev;
++ struct ccp_private *internals;
++ uint8_t cryptodev_cnt = 0;
++
++ if (init_params->name[0] == '\0')
++ snprintf(init_params->name, sizeof(init_params->name),
++ "%s", name);
++
++ dev = rte_cryptodev_pmd_create(init_params->name,
++ &vdev->device,
++ init_params);
++ if (dev == NULL) {
++ CCP_LOG_ERR("failed to create cryptodev vdev");
++ goto init_error;
++ }
++
++ cryptodev_cnt = ccp_probe_devices(ccp_pci_id);
++
++ if (cryptodev_cnt == 0) {
++ CCP_LOG_ERR("failed to detect CCP crypto device");
++ goto init_error;
++ }
++
++ printf("CCP : Crypto device count = %d\n", cryptodev_cnt);
++ dev->driver_id = ccp_cryptodev_driver_id;
++
++ /* register rx/tx burst functions for data path */
++ dev->dev_ops = ccp_pmd_ops;
++ dev->enqueue_burst = ccp_pmd_enqueue_burst;
++ dev->dequeue_burst = ccp_pmd_dequeue_burst;
++
++ dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
++ RTE_CRYPTODEV_FF_HW_ACCELERATED |
++ RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
++
++ internals = dev->data->dev_private;
++
++ internals->max_nb_qpairs = init_params->max_nb_queue_pairs;
++ internals->max_nb_sessions = init_params->max_nb_sessions;
++ internals->crypto_num_dev = cryptodev_cnt;
++
++ return 0;
++
++init_error:
++ CCP_LOG_ERR("driver %s: %s() failed",
++ init_params->name, __func__);
++ cryptodev_ccp_remove(vdev);
++
++ return -EFAULT;
++}
++
+ /** Probe ccp pmd */
+ static int
+-cryptodev_ccp_probe(struct rte_vdev_device *vdev __rte_unused)
++cryptodev_ccp_probe(struct rte_vdev_device *vdev)
+ {
++ int rc = 0;
++ const char *name;
++ struct rte_cryptodev_pmd_init_params init_params = {
++ "",
++ sizeof(struct ccp_private),
++ rte_socket_id(),
++ CCP_PMD_MAX_QUEUE_PAIRS,
++ RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_SESSIONS
++ };
++ const char *input_args;
++
++ if (ccp_pmd_init_done) {
++ RTE_LOG(INFO, PMD, "CCP PMD already initialized\n");
++ return -EFAULT;
++ }
++ name = rte_vdev_device_name(vdev);
++ if (name == NULL)
++ return -EINVAL;
++
++ input_args = rte_vdev_device_args(vdev);
++ rte_cryptodev_pmd_parse_input_args(&init_params, input_args);
++ init_params.max_nb_queue_pairs = CCP_PMD_MAX_QUEUE_PAIRS;
++
++ RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name,
++ init_params.socket_id);
++ RTE_LOG(INFO, PMD, "Max number of queue pairs = %d\n",
++ init_params.max_nb_queue_pairs);
++ RTE_LOG(INFO, PMD, "Max number of sessions = %d\n",
++ init_params.max_nb_sessions);
++
++ rc = cryptodev_ccp_create(name, vdev, &init_params);
++ if (rc)
++ return rc;
++ ccp_pmd_init_done = 1;
+ return 0;
+ }
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-03-20-crypto-ccp-support-basic-pmd-ops.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-03-20-crypto-ccp-support-basic-pmd-ops.patch
new file mode 100644
index 00000000..708b05e1
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-03-20-crypto-ccp-support-basic-pmd-ops.patch
@@ -0,0 +1,209 @@
+From patchwork Fri Mar 9 08:35:03 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,03/20] crypto/ccp: support basic pmd ops
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35802
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-3-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:03 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_dev.c | 9 ++++++
+ drivers/crypto/ccp/ccp_dev.h | 9 ++++++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 61 +++++++++++++++++++++++++++++++++---
+ drivers/crypto/ccp/ccp_pmd_private.h | 43 +++++++++++++++++++++++++
+ 4 files changed, 117 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
+index 5af2b49..57bccf4 100644
+--- a/drivers/crypto/ccp/ccp_dev.c
++++ b/drivers/crypto/ccp/ccp_dev.c
+@@ -52,6 +52,15 @@
+ struct ccp_list ccp_list = TAILQ_HEAD_INITIALIZER(ccp_list);
+ static int ccp_dev_id;
+
++int
++ccp_dev_start(struct rte_cryptodev *dev)
++{
++ struct ccp_private *priv = dev->data->dev_private;
++
++ priv->last_dev = TAILQ_FIRST(&ccp_list);
++ return 0;
++}
++
+ static const struct rte_memzone *
+ ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index fe05bf0..b321530 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -104,6 +104,10 @@
+ #define LSB_ITEM_SIZE 32
+ #define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
+
++/* General CCP Defines */
++
++#define CCP_SB_BYTES 32
++
+ /* bitmap */
+ enum {
+ BITS_PER_WORD = sizeof(unsigned long) * CHAR_BIT
+@@ -299,6 +303,11 @@ high32_value(unsigned long addr)
+ return ((uint64_t)addr >> 32) & 0x00000ffff;
+ }
+
++/*
++ * Start CCP device
++ */
++int ccp_dev_start(struct rte_cryptodev *dev);
++
+ /**
+ * Detect ccp platform and initialize all ccp devices
+ *
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index bc4120b..b6f8c48 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -28,18 +28,69 @@
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
++#include <string.h>
++
++#include <rte_common.h>
+ #include <rte_cryptodev_pmd.h>
++#include <rte_malloc.h>
++
++#include "ccp_pmd_private.h"
++#include "ccp_dev.h"
++
++static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
++ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
++};
++
++static int
++ccp_pmd_config(struct rte_cryptodev *dev __rte_unused,
++ struct rte_cryptodev_config *config __rte_unused)
++{
++ return 0;
++}
++
++static int
++ccp_pmd_start(struct rte_cryptodev *dev)
++{
++ return ccp_dev_start(dev);
++}
++
++static void
++ccp_pmd_stop(struct rte_cryptodev *dev __rte_unused)
++{
++
++}
++
++static int
++ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
++{
++ return 0;
++}
++
++static void
++ccp_pmd_info_get(struct rte_cryptodev *dev,
++ struct rte_cryptodev_info *dev_info)
++{
++ struct ccp_private *internals = dev->data->dev_private;
++
++ if (dev_info != NULL) {
++ dev_info->driver_id = dev->driver_id;
++ dev_info->feature_flags = dev->feature_flags;
++ dev_info->capabilities = ccp_pmd_capabilities;
++ dev_info->max_nb_queue_pairs = internals->max_nb_qpairs;
++ dev_info->sym.max_nb_sessions = internals->max_nb_sessions;
++ }
++}
+
+ struct rte_cryptodev_ops ccp_ops = {
+- .dev_configure = NULL,
+- .dev_start = NULL,
+- .dev_stop = NULL,
+- .dev_close = NULL,
++ .dev_configure = ccp_pmd_config,
++ .dev_start = ccp_pmd_start,
++ .dev_stop = ccp_pmd_stop,
++ .dev_close = ccp_pmd_close,
+
+ .stats_get = NULL,
+ .stats_reset = NULL,
+
+- .dev_infos_get = NULL,
++ .dev_infos_get = ccp_pmd_info_get,
+
+ .queue_pair_setup = NULL,
+ .queue_pair_release = NULL,
+diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
+index f5b6061..d278a8c 100644
+--- a/drivers/crypto/ccp/ccp_pmd_private.h
++++ b/drivers/crypto/ccp/ccp_pmd_private.h
+@@ -60,13 +60,56 @@
+ #define CCP_NB_MAX_DESCRIPTORS 1024
+ #define CCP_MAX_BURST 64
+
++#include "ccp_dev.h"
++
+ /* private data structure for each CCP crypto device */
+ struct ccp_private {
+ unsigned int max_nb_qpairs; /**< Max number of queue pairs */
+ unsigned int max_nb_sessions; /**< Max number of sessions */
+ uint8_t crypto_num_dev; /**< Number of working crypto devices */
++ struct ccp_device *last_dev; /**< Last working crypto device */
+ };
+
++/* CCP batch info */
++struct ccp_batch_info {
++ struct rte_crypto_op *op[CCP_MAX_BURST];
++ /**< optable populated at enque time from app*/
++ int op_idx;
++ struct ccp_queue *cmd_q;
++ uint16_t opcnt;
++ /**< no. of crypto ops in batch*/
++ int desccnt;
++ /**< no. of ccp queue descriptors*/
++ uint32_t head_offset;
++ /**< ccp queue head tail offsets time of enqueue*/
++ uint32_t tail_offset;
++ uint8_t lsb_buf[CCP_SB_BYTES * CCP_MAX_BURST];
++ phys_addr_t lsb_buf_phys;
++ /**< LSB intermediate buf for passthru */
++ int lsb_buf_idx;
++} __rte_cache_aligned;
++
++/**< CCP crypto queue pair */
++struct ccp_qp {
++ uint16_t id;
++ /**< Queue Pair Identifier */
++ char name[RTE_CRYPTODEV_NAME_MAX_LEN];
++ /**< Unique Queue Pair Name */
++ struct rte_ring *processed_pkts;
++ /**< Ring for placing process packets */
++ struct rte_mempool *sess_mp;
++ /**< Session Mempool */
++ struct rte_mempool *batch_mp;
++ /**< Session Mempool for batch info */
++ struct rte_cryptodev_stats qp_stats;
++ /**< Queue pair statistics */
++ struct ccp_batch_info *b_info;
++ /**< Store ops pulled out of queue */
++ struct rte_cryptodev *dev;
++ /**< rte crypto device to which this qp belongs */
++} __rte_cache_aligned;
++
++
+ /**< device specific operations function pointer structure */
+ extern struct rte_cryptodev_ops *ccp_pmd_ops;
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-04-20-crypto-ccp-support-session-related-crypto-pmd-ops.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-04-20-crypto-ccp-support-session-related-crypto-pmd-ops.patch
new file mode 100644
index 00000000..4cac48af
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-04-20-crypto-ccp-support-session-related-crypto-pmd-ops.patch
@@ -0,0 +1,782 @@
+From patchwork Fri Mar 9 08:35:04 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 04/20] crypto/ccp: support session related crypto pmd ops
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35803
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-4-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:04 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/Makefile | 3 +-
+ drivers/crypto/ccp/ccp_crypto.c | 229 +++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_crypto.h | 267 +++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_dev.h | 129 +++++++++++++++++++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 61 ++++++++-
+ 5 files changed, 685 insertions(+), 4 deletions(-)
+ create mode 100644 drivers/crypto/ccp/ccp_crypto.c
+ create mode 100644 drivers/crypto/ccp/ccp_crypto.h
+
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+index 5e58c31..5241465 100644
+--- a/drivers/crypto/ccp/Makefile
++++ b/drivers/crypto/ccp/Makefile
+@@ -51,8 +51,9 @@ EXPORT_MAP := rte_pmd_ccp_version.map
+
+ # library source files
+ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += rte_ccp_pmd.c
+-SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_crypto.c
+ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_dev.c
+ SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pci.c
++SRCS-$(CONFIG_RTE_LIBRTE_PMD_CCP) += ccp_pmd_ops.c
+
+ include $(RTE_SDK)/mk/rte.lib.mk
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+new file mode 100644
+index 0000000..c365c0f
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -0,0 +1,229 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <dirent.h>
++#include <fcntl.h>
++#include <stdio.h>
++#include <string.h>
++#include <sys/mman.h>
++#include <sys/queue.h>
++#include <sys/types.h>
++#include <unistd.h>
++
++#include <rte_hexdump.h>
++#include <rte_memzone.h>
++#include <rte_malloc.h>
++#include <rte_memory.h>
++#include <rte_spinlock.h>
++#include <rte_string_fns.h>
++#include <rte_cryptodev_pmd.h>
++
++#include "ccp_dev.h"
++#include "ccp_crypto.h"
++#include "ccp_pci.h"
++#include "ccp_pmd_private.h"
++
++static enum ccp_cmd_order
++ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
++{
++ enum ccp_cmd_order res = CCP_CMD_NOT_SUPPORTED;
++
++ if (xform == NULL)
++ return res;
++ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
++ if (xform->next == NULL)
++ return CCP_CMD_AUTH;
++ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER)
++ return CCP_CMD_HASH_CIPHER;
++ }
++ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
++ if (xform->next == NULL)
++ return CCP_CMD_CIPHER;
++ else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
++ return CCP_CMD_CIPHER_HASH;
++ }
++ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD)
++ return CCP_CMD_COMBINED;
++ return res;
++}
++
++/* configure session */
++static int
++ccp_configure_session_cipher(struct ccp_session *sess,
++ const struct rte_crypto_sym_xform *xform)
++{
++ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
++
++ cipher_xform = &xform->cipher;
++
++ /* set cipher direction */
++ if (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
++ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
++ else
++ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
++
++ /* set cipher key */
++ sess->cipher.key_length = cipher_xform->key.length;
++ rte_memcpy(sess->cipher.key, cipher_xform->key.data,
++ cipher_xform->key.length);
++
++ /* set iv parameters */
++ sess->iv.offset = cipher_xform->iv.offset;
++ sess->iv.length = cipher_xform->iv.length;
++
++ switch (cipher_xform->algo) {
++ default:
++ CCP_LOG_ERR("Unsupported cipher algo");
++ return -1;
++ }
++
++
++ switch (sess->cipher.engine) {
++ default:
++ CCP_LOG_ERR("Invalid CCP Engine");
++ return -ENOTSUP;
++ }
++ return 0;
++}
++
++static int
++ccp_configure_session_auth(struct ccp_session *sess,
++ const struct rte_crypto_sym_xform *xform)
++{
++ const struct rte_crypto_auth_xform *auth_xform = NULL;
++
++ auth_xform = &xform->auth;
++
++ sess->auth.digest_length = auth_xform->digest_length;
++ if (auth_xform->op == RTE_CRYPTO_AUTH_OP_GENERATE)
++ sess->auth.op = CCP_AUTH_OP_GENERATE;
++ else
++ sess->auth.op = CCP_AUTH_OP_VERIFY;
++ switch (auth_xform->algo) {
++ default:
++ CCP_LOG_ERR("Unsupported hash algo");
++ return -ENOTSUP;
++ }
++ return 0;
++}
++
++static int
++ccp_configure_session_aead(struct ccp_session *sess,
++ const struct rte_crypto_sym_xform *xform)
++{
++ const struct rte_crypto_aead_xform *aead_xform = NULL;
++
++ aead_xform = &xform->aead;
++
++ sess->cipher.key_length = aead_xform->key.length;
++ rte_memcpy(sess->cipher.key, aead_xform->key.data,
++ aead_xform->key.length);
++
++ if (aead_xform->op == RTE_CRYPTO_AEAD_OP_ENCRYPT) {
++ sess->cipher.dir = CCP_CIPHER_DIR_ENCRYPT;
++ sess->auth.op = CCP_AUTH_OP_GENERATE;
++ } else {
++ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
++ sess->auth.op = CCP_AUTH_OP_VERIFY;
++ }
++ sess->auth.aad_length = aead_xform->aad_length;
++ sess->auth.digest_length = aead_xform->digest_length;
++
++ /* set iv parameters */
++ sess->iv.offset = aead_xform->iv.offset;
++ sess->iv.length = aead_xform->iv.length;
++
++ switch (aead_xform->algo) {
++ default:
++ CCP_LOG_ERR("Unsupported aead algo");
++ return -ENOTSUP;
++ }
++ return 0;
++}
++
++int
++ccp_set_session_parameters(struct ccp_session *sess,
++ const struct rte_crypto_sym_xform *xform)
++{
++ const struct rte_crypto_sym_xform *cipher_xform = NULL;
++ const struct rte_crypto_sym_xform *auth_xform = NULL;
++ const struct rte_crypto_sym_xform *aead_xform = NULL;
++ int ret = 0;
++
++ sess->cmd_id = ccp_get_cmd_id(xform);
++
++ switch (sess->cmd_id) {
++ case CCP_CMD_CIPHER:
++ cipher_xform = xform;
++ break;
++ case CCP_CMD_AUTH:
++ auth_xform = xform;
++ break;
++ case CCP_CMD_CIPHER_HASH:
++ cipher_xform = xform;
++ auth_xform = xform->next;
++ break;
++ case CCP_CMD_HASH_CIPHER:
++ auth_xform = xform;
++ cipher_xform = xform->next;
++ break;
++ case CCP_CMD_COMBINED:
++ aead_xform = xform;
++ break;
++ default:
++ CCP_LOG_ERR("Unsupported cmd_id");
++ return -1;
++ }
++
++ /* Default IV length = 0 */
++ sess->iv.length = 0;
++ if (cipher_xform) {
++ ret = ccp_configure_session_cipher(sess, cipher_xform);
++ if (ret != 0) {
++ CCP_LOG_ERR("Invalid/unsupported cipher parameters");
++ return ret;
++ }
++ }
++ if (auth_xform) {
++ ret = ccp_configure_session_auth(sess, auth_xform);
++ if (ret != 0) {
++ CCP_LOG_ERR("Invalid/unsupported auth parameters");
++ return ret;
++ }
++ }
++ if (aead_xform) {
++ ret = ccp_configure_session_aead(sess, aead_xform);
++ if (ret != 0) {
++ CCP_LOG_ERR("Invalid/unsupported aead parameters");
++ return ret;
++ }
++ }
++ return ret;
++}
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+new file mode 100644
+index 0000000..346d5ee
+--- /dev/null
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -0,0 +1,267 @@
++/*-
++ * Copyright(c) 2018 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ *
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of the copyright holder nor the names of its
++ * contributors may be used to endorse or promote products derived from
++ * this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#ifndef _CCP_CRYPTO_H_
++#define _CCP_CRYPTO_H_
++
++#include <limits.h>
++#include <stdbool.h>
++#include <stdint.h>
++#include <string.h>
++
++#include <rte_atomic.h>
++#include <rte_byteorder.h>
++#include <rte_io.h>
++#include <rte_pci.h>
++#include <rte_spinlock.h>
++#include <rte_crypto_sym.h>
++#include <rte_cryptodev.h>
++
++#include "ccp_dev.h"
++
++#define CCP_SHA3_CTX_SIZE 200
++/**
++ * CCP supported AES modes
++ */
++enum ccp_aes_mode {
++ CCP_AES_MODE_ECB = 0,
++ CCP_AES_MODE_CBC,
++ CCP_AES_MODE_OFB,
++ CCP_AES_MODE_CFB,
++ CCP_AES_MODE_CTR,
++ CCP_AES_MODE_CMAC,
++ CCP_AES_MODE_GHASH,
++ CCP_AES_MODE_GCTR,
++ CCP_AES_MODE__LAST,
++};
++
++/**
++ * CCP AES GHASH mode
++ */
++enum ccp_aes_ghash_mode {
++ CCP_AES_MODE_GHASH_AAD = 0,
++ CCP_AES_MODE_GHASH_FINAL
++};
++
++/**
++ * CCP supported AES types
++ */
++enum ccp_aes_type {
++ CCP_AES_TYPE_128 = 0,
++ CCP_AES_TYPE_192,
++ CCP_AES_TYPE_256,
++ CCP_AES_TYPE__LAST,
++};
++
++/***** 3DES engine *****/
++
++/**
++ * CCP supported DES/3DES modes
++ */
++enum ccp_des_mode {
++ CCP_DES_MODE_ECB = 0, /* Not supported */
++ CCP_DES_MODE_CBC,
++ CCP_DES_MODE_CFB,
++};
++
++/**
++ * CCP supported DES types
++ */
++enum ccp_des_type {
++ CCP_DES_TYPE_128 = 0, /* 112 + 16 parity */
++ CCP_DES_TYPE_192, /* 168 + 24 parity */
++ CCP_DES_TYPE__LAST,
++};
++
++/***** SHA engine *****/
++
++/**
++ * ccp_sha_type - type of SHA operation
++ *
++ * @CCP_SHA_TYPE_1: SHA-1 operation
++ * @CCP_SHA_TYPE_224: SHA-224 operation
++ * @CCP_SHA_TYPE_256: SHA-256 operation
++ */
++enum ccp_sha_type {
++ CCP_SHA_TYPE_1 = 1,
++ CCP_SHA_TYPE_224,
++ CCP_SHA_TYPE_256,
++ CCP_SHA_TYPE_384,
++ CCP_SHA_TYPE_512,
++ CCP_SHA_TYPE_RSVD1,
++ CCP_SHA_TYPE_RSVD2,
++ CCP_SHA3_TYPE_224,
++ CCP_SHA3_TYPE_256,
++ CCP_SHA3_TYPE_384,
++ CCP_SHA3_TYPE_512,
++ CCP_SHA_TYPE__LAST,
++};
++
++/**
++ * CCP supported cipher algorithms
++ */
++enum ccp_cipher_algo {
++ CCP_CIPHER_ALGO_AES_CBC = 0,
++ CCP_CIPHER_ALGO_AES_ECB,
++ CCP_CIPHER_ALGO_AES_CTR,
++ CCP_CIPHER_ALGO_AES_GCM,
++ CCP_CIPHER_ALGO_3DES_CBC,
++};
++
++/**
++ * CCP cipher operation type
++ */
++enum ccp_cipher_dir {
++ CCP_CIPHER_DIR_DECRYPT = 0,
++ CCP_CIPHER_DIR_ENCRYPT = 1,
++};
++
++/**
++ * CCP supported hash algorithms
++ */
++enum ccp_hash_algo {
++ CCP_AUTH_ALGO_SHA1 = 0,
++ CCP_AUTH_ALGO_SHA1_HMAC,
++ CCP_AUTH_ALGO_SHA224,
++ CCP_AUTH_ALGO_SHA224_HMAC,
++ CCP_AUTH_ALGO_SHA3_224,
++ CCP_AUTH_ALGO_SHA3_224_HMAC,
++ CCP_AUTH_ALGO_SHA256,
++ CCP_AUTH_ALGO_SHA256_HMAC,
++ CCP_AUTH_ALGO_SHA3_256,
++ CCP_AUTH_ALGO_SHA3_256_HMAC,
++ CCP_AUTH_ALGO_SHA384,
++ CCP_AUTH_ALGO_SHA384_HMAC,
++ CCP_AUTH_ALGO_SHA3_384,
++ CCP_AUTH_ALGO_SHA3_384_HMAC,
++ CCP_AUTH_ALGO_SHA512,
++ CCP_AUTH_ALGO_SHA512_HMAC,
++ CCP_AUTH_ALGO_SHA3_512,
++ CCP_AUTH_ALGO_SHA3_512_HMAC,
++ CCP_AUTH_ALGO_AES_CMAC,
++ CCP_AUTH_ALGO_AES_GCM,
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ CCP_AUTH_ALGO_MD5_HMAC,
++#endif
++};
++
++/**
++ * CCP hash operation type
++ */
++enum ccp_hash_op {
++ CCP_AUTH_OP_GENERATE = 0,
++ CCP_AUTH_OP_VERIFY = 1,
++};
++
++/* CCP crypto private session structure */
++struct ccp_session {
++ enum ccp_cmd_order cmd_id;
++ /**< chain order mode */
++ struct {
++ uint16_t length;
++ uint16_t offset;
++ } iv;
++ /**< IV parameters */
++ struct {
++ enum ccp_cipher_algo algo;
++ enum ccp_engine engine;
++ union {
++ enum ccp_aes_mode aes_mode;
++ enum ccp_des_mode des_mode;
++ } um;
++ union {
++ enum ccp_aes_type aes_type;
++ enum ccp_des_type des_type;
++ } ut;
++ enum ccp_cipher_dir dir;
++ uint64_t key_length;
++ /**< max cipher key size 256 bits */
++ uint8_t key[32];
++ /**ccp key format*/
++ uint8_t key_ccp[32];
++ phys_addr_t key_phys;
++ /**AES-ctr nonce(4) iv(8) ctr*/
++ uint8_t nonce[32];
++ phys_addr_t nonce_phys;
++ } cipher;
++ /**< Cipher Parameters */
++
++ struct {
++ enum ccp_hash_algo algo;
++ enum ccp_engine engine;
++ union {
++ enum ccp_aes_mode aes_mode;
++ } um;
++ union {
++ enum ccp_sha_type sha_type;
++ enum ccp_aes_type aes_type;
++ } ut;
++ enum ccp_hash_op op;
++ uint64_t key_length;
++ /**< max hash key size 144 bytes (struct capabilties) */
++ uint8_t key[144];
++ /**< max be key size of AES is 32*/
++ uint8_t key_ccp[32];
++ phys_addr_t key_phys;
++ uint64_t digest_length;
++ void *ctx;
++ int ctx_len;
++ int offset;
++ int block_size;
++ /**< Buffer to store Software generated precomute values*/
++ /**< For HMAC H(ipad ^ key) and H(opad ^ key) */
++ /**< For CMAC K1 IV and K2 IV*/
++ uint8_t pre_compute[2 * CCP_SHA3_CTX_SIZE];
++ /**< SHA3 initial ctx all zeros*/
++ uint8_t sha3_ctx[200];
++ int aad_length;
++ } auth;
++ /**< Authentication Parameters */
++ enum rte_crypto_aead_algorithm aead_algo;
++ /**< AEAD Algorithm */
++
++ uint32_t reserved;
++} __rte_cache_aligned;
++
++extern uint8_t ccp_cryptodev_driver_id;
++
++struct ccp_qp;
++
++/**
++ * Set and validate CCP crypto session parameters
++ *
++ * @param sess ccp private session
++ * @param xform crypto xform for this session
++ * @return 0 on success otherwise -1
++ */
++int ccp_set_session_parameters(struct ccp_session *sess,
++ const struct rte_crypto_sym_xform *xform);
++
++#endif /* _CCP_CRYPTO_H_ */
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index b321530..a16ba81 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -225,6 +225,123 @@ struct ccp_device {
+ /**< current queue index */
+ } __rte_cache_aligned;
+
++/**< CCP H/W engine related */
++/**
++ * ccp_engine - CCP operation identifiers
++ *
++ * @CCP_ENGINE_AES: AES operation
++ * @CCP_ENGINE_XTS_AES: 128-bit XTS AES operation
++ * @CCP_ENGINE_3DES: DES/3DES operation
++ * @CCP_ENGINE_SHA: SHA operation
++ * @CCP_ENGINE_RSA: RSA operation
++ * @CCP_ENGINE_PASSTHRU: pass-through operation
++ * @CCP_ENGINE_ZLIB_DECOMPRESS: unused
++ * @CCP_ENGINE_ECC: ECC operation
++ */
++enum ccp_engine {
++ CCP_ENGINE_AES = 0,
++ CCP_ENGINE_XTS_AES_128,
++ CCP_ENGINE_3DES,
++ CCP_ENGINE_SHA,
++ CCP_ENGINE_RSA,
++ CCP_ENGINE_PASSTHRU,
++ CCP_ENGINE_ZLIB_DECOMPRESS,
++ CCP_ENGINE_ECC,
++ CCP_ENGINE__LAST,
++};
++
++/* Passthru engine */
++/**
++ * ccp_passthru_bitwise - type of bitwise passthru operation
++ *
++ * @CCP_PASSTHRU_BITWISE_NOOP: no bitwise operation performed
++ * @CCP_PASSTHRU_BITWISE_AND: perform bitwise AND of src with mask
++ * @CCP_PASSTHRU_BITWISE_OR: perform bitwise OR of src with mask
++ * @CCP_PASSTHRU_BITWISE_XOR: perform bitwise XOR of src with mask
++ * @CCP_PASSTHRU_BITWISE_MASK: overwrite with mask
++ */
++enum ccp_passthru_bitwise {
++ CCP_PASSTHRU_BITWISE_NOOP = 0,
++ CCP_PASSTHRU_BITWISE_AND,
++ CCP_PASSTHRU_BITWISE_OR,
++ CCP_PASSTHRU_BITWISE_XOR,
++ CCP_PASSTHRU_BITWISE_MASK,
++ CCP_PASSTHRU_BITWISE__LAST,
++};
++
++/**
++ * ccp_passthru_byteswap - type of byteswap passthru operation
++ *
++ * @CCP_PASSTHRU_BYTESWAP_NOOP: no byte swapping performed
++ * @CCP_PASSTHRU_BYTESWAP_32BIT: swap bytes within 32-bit words
++ * @CCP_PASSTHRU_BYTESWAP_256BIT: swap bytes within 256-bit words
++ */
++enum ccp_passthru_byteswap {
++ CCP_PASSTHRU_BYTESWAP_NOOP = 0,
++ CCP_PASSTHRU_BYTESWAP_32BIT,
++ CCP_PASSTHRU_BYTESWAP_256BIT,
++ CCP_PASSTHRU_BYTESWAP__LAST,
++};
++
++/**
++ * CCP passthru
++ */
++struct ccp_passthru {
++ phys_addr_t src_addr;
++ phys_addr_t dest_addr;
++ enum ccp_passthru_bitwise bit_mod;
++ enum ccp_passthru_byteswap byte_swap;
++ int len;
++ int dir;
++};
++
++/* CCP version 5: Union to define the function field (cmd_reg1/dword0) */
++union ccp_function {
++ struct {
++ uint16_t size:7;
++ uint16_t encrypt:1;
++ uint16_t mode:5;
++ uint16_t type:2;
++ } aes;
++ struct {
++ uint16_t size:7;
++ uint16_t encrypt:1;
++ uint16_t mode:5;
++ uint16_t type:2;
++ } des;
++ struct {
++ uint16_t size:7;
++ uint16_t encrypt:1;
++ uint16_t rsvd:5;
++ uint16_t type:2;
++ } aes_xts;
++ struct {
++ uint16_t rsvd1:10;
++ uint16_t type:4;
++ uint16_t rsvd2:1;
++ } sha;
++ struct {
++ uint16_t mode:3;
++ uint16_t size:12;
++ } rsa;
++ struct {
++ uint16_t byteswap:2;
++ uint16_t bitwise:3;
++ uint16_t reflect:2;
++ uint16_t rsvd:8;
++ } pt;
++ struct {
++ uint16_t rsvd:13;
++ } zlib;
++ struct {
++ uint16_t size:10;
++ uint16_t type:2;
++ uint16_t mode:3;
++ } ecc;
++ uint16_t raw;
++};
++
++
+ /**
+ * descriptor for version 5 CPP commands
+ * 8 32-bit words:
+@@ -291,6 +408,18 @@ struct ccp_desc {
+ struct dword7 dw7;
+ };
+
++/**
++ * cmd id to follow order
++ */
++enum ccp_cmd_order {
++ CCP_CMD_CIPHER = 0,
++ CCP_CMD_AUTH,
++ CCP_CMD_CIPHER_HASH,
++ CCP_CMD_HASH_CIPHER,
++ CCP_CMD_COMBINED,
++ CCP_CMD_NOT_SUPPORTED,
++};
++
+ static inline uint32_t
+ low32_value(unsigned long addr)
+ {
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index b6f8c48..ad0a670 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -36,6 +36,7 @@
+
+ #include "ccp_pmd_private.h"
+ #include "ccp_dev.h"
++#include "ccp_crypto.h"
+
+ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+@@ -81,6 +82,60 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
+ }
+ }
+
++static unsigned
++ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
++{
++ return sizeof(struct ccp_session);
++}
++
++static int
++ccp_pmd_session_configure(struct rte_cryptodev *dev,
++ struct rte_crypto_sym_xform *xform,
++ struct rte_cryptodev_sym_session *sess,
++ struct rte_mempool *mempool)
++{
++ int ret;
++ void *sess_private_data;
++
++ if (unlikely(sess == NULL || xform == NULL)) {
++ CCP_LOG_ERR("Invalid session struct or xform");
++ return -ENOMEM;
++ }
++
++ if (rte_mempool_get(mempool, &sess_private_data)) {
++ CCP_LOG_ERR("Couldn't get object from session mempool");
++ return -ENOMEM;
++ }
++ ret = ccp_set_session_parameters(sess_private_data, xform);
++ if (ret != 0) {
++ CCP_LOG_ERR("failed configure session parameters");
++
++ /* Return session to mempool */
++ rte_mempool_put(mempool, sess_private_data);
++ return ret;
++ }
++ set_session_private_data(sess, dev->driver_id,
++ sess_private_data);
++
++ return 0;
++}
++
++static void
++ccp_pmd_session_clear(struct rte_cryptodev *dev,
++ struct rte_cryptodev_sym_session *sess)
++{
++ uint8_t index = dev->driver_id;
++ void *sess_priv = get_session_private_data(sess, index);
++
++ if (sess_priv) {
++ struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
++
++ rte_mempool_put(sess_mp, sess_priv);
++ memset(sess_priv, 0, sizeof(struct ccp_session));
++ set_session_private_data(sess, index, NULL);
++ }
++}
++
+ struct rte_cryptodev_ops ccp_ops = {
+ .dev_configure = ccp_pmd_config,
+ .dev_start = ccp_pmd_start,
+@@ -98,9 +153,9 @@ struct rte_cryptodev_ops ccp_ops = {
+ .queue_pair_stop = NULL,
+ .queue_pair_count = NULL,
+
+- .session_get_size = NULL,
+- .session_configure = NULL,
+- .session_clear = NULL,
++ .session_get_size = ccp_pmd_session_get_size,
++ .session_configure = ccp_pmd_session_configure,
++ .session_clear = ccp_pmd_session_clear,
+ };
+
+ struct rte_cryptodev_ops *ccp_pmd_ops = &ccp_ops;
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-05-20-crypto-ccp-support-queue-pair-related-pmd-ops.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-05-20-crypto-ccp-support-queue-pair-related-pmd-ops.patch
new file mode 100644
index 00000000..0c490d5e
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-05-20-crypto-ccp-support-queue-pair-related-pmd-ops.patch
@@ -0,0 +1,186 @@
+From patchwork Fri Mar 9 08:35:05 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,05/20] crypto/ccp: support queue pair related pmd ops
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35804
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-5-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:05 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_pmd_ops.c | 149 +++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 144 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index ad0a670..a02aa6f 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -82,6 +82,145 @@ ccp_pmd_info_get(struct rte_cryptodev *dev,
+ }
+ }
+
++static int
++ccp_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id)
++{
++ struct ccp_qp *qp;
++
++ if (dev->data->queue_pairs[qp_id] != NULL) {
++ qp = (struct ccp_qp *)dev->data->queue_pairs[qp_id];
++ rte_ring_free(qp->processed_pkts);
++ rte_mempool_free(qp->batch_mp);
++ rte_free(qp);
++ dev->data->queue_pairs[qp_id] = NULL;
++ }
++ return 0;
++}
++
++static int
++ccp_pmd_qp_set_unique_name(struct rte_cryptodev *dev,
++ struct ccp_qp *qp)
++{
++ unsigned int n = snprintf(qp->name, sizeof(qp->name),
++ "ccp_pmd_%u_qp_%u",
++ dev->data->dev_id, qp->id);
++
++ if (n > sizeof(qp->name))
++ return -1;
++
++ return 0;
++}
++
++static struct rte_ring *
++ccp_pmd_qp_create_batch_info_ring(struct ccp_qp *qp,
++ unsigned int ring_size, int socket_id)
++{
++ struct rte_ring *r;
++
++ r = rte_ring_lookup(qp->name);
++ if (r) {
++ if (r->size >= ring_size) {
++ CCP_LOG_INFO(
++ "Reusing ring %s for processed packets",
++ qp->name);
++ return r;
++ }
++ CCP_LOG_INFO(
++ "Unable to reuse ring %s for processed packets",
++ qp->name);
++ return NULL;
++ }
++
++ return rte_ring_create(qp->name, ring_size, socket_id,
++ RING_F_SP_ENQ | RING_F_SC_DEQ);
++}
++
++static int
++ccp_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id,
++ const struct rte_cryptodev_qp_conf *qp_conf,
++ int socket_id, struct rte_mempool *session_pool)
++{
++ struct ccp_private *internals = dev->data->dev_private;
++ struct ccp_qp *qp;
++ int retval = 0;
++
++ if (qp_id >= internals->max_nb_qpairs) {
++ CCP_LOG_ERR("Invalid qp_id %u, should be less than %u",
++ qp_id, internals->max_nb_qpairs);
++ return (-EINVAL);
++ }
++
++ /* Free memory prior to re-allocation if needed. */
++ if (dev->data->queue_pairs[qp_id] != NULL)
++ ccp_pmd_qp_release(dev, qp_id);
++
++ /* Allocate the queue pair data structure. */
++ qp = rte_zmalloc_socket("CCP Crypto PMD Queue Pair", sizeof(*qp),
++ RTE_CACHE_LINE_SIZE, socket_id);
++ if (qp == NULL) {
++ CCP_LOG_ERR("Failed to allocate queue pair memory");
++ return (-ENOMEM);
++ }
++
++ qp->dev = dev;
++ qp->id = qp_id;
++ dev->data->queue_pairs[qp_id] = qp;
++
++ retval = ccp_pmd_qp_set_unique_name(dev, qp);
++ if (retval) {
++ CCP_LOG_ERR("Failed to create unique name for ccp qp");
++ goto qp_setup_cleanup;
++ }
++
++ qp->processed_pkts = ccp_pmd_qp_create_batch_info_ring(qp,
++ qp_conf->nb_descriptors, socket_id);
++ if (qp->processed_pkts == NULL) {
++ CCP_LOG_ERR("Failed to create batch info ring");
++ goto qp_setup_cleanup;
++ }
++
++ qp->sess_mp = session_pool;
++
++ /* mempool for batch info */
++ qp->batch_mp = rte_mempool_create(
++ qp->name,
++ qp_conf->nb_descriptors,
++ sizeof(struct ccp_batch_info),
++ RTE_CACHE_LINE_SIZE,
++ 0, NULL, NULL, NULL, NULL,
++ SOCKET_ID_ANY, 0);
++ if (qp->batch_mp == NULL)
++ goto qp_setup_cleanup;
++ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
++ return 0;
++
++qp_setup_cleanup:
++ dev->data->queue_pairs[qp_id] = NULL;
++ if (qp)
++ rte_free(qp);
++ return -1;
++}
++
++static int
++ccp_pmd_qp_start(struct rte_cryptodev *dev __rte_unused,
++ uint16_t queue_pair_id __rte_unused)
++{
++ return -ENOTSUP;
++}
++
++static int
++ccp_pmd_qp_stop(struct rte_cryptodev *dev __rte_unused,
++ uint16_t queue_pair_id __rte_unused)
++{
++ return -ENOTSUP;
++}
++
++static uint32_t
++ccp_pmd_qp_count(struct rte_cryptodev *dev)
++{
++ return dev->data->nb_queue_pairs;
++}
++
+ static unsigned
+ ccp_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused)
+ {
+@@ -147,11 +286,11 @@ struct rte_cryptodev_ops ccp_ops = {
+
+ .dev_infos_get = ccp_pmd_info_get,
+
+- .queue_pair_setup = NULL,
+- .queue_pair_release = NULL,
+- .queue_pair_start = NULL,
+- .queue_pair_stop = NULL,
+- .queue_pair_count = NULL,
++ .queue_pair_setup = ccp_pmd_qp_setup,
++ .queue_pair_release = ccp_pmd_qp_release,
++ .queue_pair_start = ccp_pmd_qp_start,
++ .queue_pair_stop = ccp_pmd_qp_stop,
++ .queue_pair_count = ccp_pmd_qp_count,
+
+ .session_get_size = ccp_pmd_session_get_size,
+ .session_configure = ccp_pmd_session_configure,
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-06-20-crypto-ccp-support-crypto-enqueue-and-dequeue-burst-api.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-06-20-crypto-ccp-support-crypto-enqueue-and-dequeue-burst-api.patch
new file mode 100644
index 00000000..5e845408
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-06-20-crypto-ccp-support-crypto-enqueue-and-dequeue-burst-api.patch
@@ -0,0 +1,584 @@
+From patchwork Fri Mar 9 08:35:06 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 06/20] crypto/ccp: support crypto enqueue and dequeue burst api
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35805
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-6-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:06 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 360 +++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_crypto.h | 35 ++++
+ drivers/crypto/ccp/ccp_dev.c | 27 +++
+ drivers/crypto/ccp/ccp_dev.h | 9 +
+ drivers/crypto/ccp/rte_ccp_pmd.c | 64 ++++++-
+ 5 files changed, 488 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index c365c0f..c17e84f 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -227,3 +227,363 @@ ccp_set_session_parameters(struct ccp_session *sess,
+ }
+ return ret;
+ }
++
++/* calculate CCP descriptors requirement */
++static inline int
++ccp_cipher_slot(struct ccp_session *session)
++{
++ int count = 0;
++
++ switch (session->cipher.algo) {
++ default:
++ CCP_LOG_ERR("Unsupported cipher algo %d",
++ session->cipher.algo);
++ }
++ return count;
++}
++
++static inline int
++ccp_auth_slot(struct ccp_session *session)
++{
++ int count = 0;
++
++ switch (session->auth.algo) {
++ default:
++ CCP_LOG_ERR("Unsupported auth algo %d",
++ session->auth.algo);
++ }
++
++ return count;
++}
++
++static int
++ccp_aead_slot(struct ccp_session *session)
++{
++ int count = 0;
++
++ switch (session->aead_algo) {
++ default:
++ CCP_LOG_ERR("Unsupported aead algo %d",
++ session->aead_algo);
++ }
++ return count;
++}
++
++int
++ccp_compute_slot_count(struct ccp_session *session)
++{
++ int count = 0;
++
++ switch (session->cmd_id) {
++ case CCP_CMD_CIPHER:
++ count = ccp_cipher_slot(session);
++ break;
++ case CCP_CMD_AUTH:
++ count = ccp_auth_slot(session);
++ break;
++ case CCP_CMD_CIPHER_HASH:
++ case CCP_CMD_HASH_CIPHER:
++ count = ccp_cipher_slot(session);
++ count += ccp_auth_slot(session);
++ break;
++ case CCP_CMD_COMBINED:
++ count = ccp_aead_slot(session);
++ break;
++ default:
++ CCP_LOG_ERR("Unsupported cmd_id");
++
++ }
++
++ return count;
++}
++
++static inline int
++ccp_crypto_cipher(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q __rte_unused,
++ struct ccp_batch_info *b_info __rte_unused)
++{
++ int result = 0;
++ struct ccp_session *session;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ switch (session->cipher.algo) {
++ default:
++ CCP_LOG_ERR("Unsupported cipher algo %d",
++ session->cipher.algo);
++ return -ENOTSUP;
++ }
++ return result;
++}
++
++static inline int
++ccp_crypto_auth(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q __rte_unused,
++ struct ccp_batch_info *b_info __rte_unused)
++{
++
++ int result = 0;
++ struct ccp_session *session;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ switch (session->auth.algo) {
++ default:
++ CCP_LOG_ERR("Unsupported auth algo %d",
++ session->auth.algo);
++ return -ENOTSUP;
++ }
++
++ return result;
++}
++
++static inline int
++ccp_crypto_aead(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q __rte_unused,
++ struct ccp_batch_info *b_info __rte_unused)
++{
++ int result = 0;
++ struct ccp_session *session;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ switch (session->aead_algo) {
++ default:
++ CCP_LOG_ERR("Unsupported aead algo %d",
++ session->aead_algo);
++ return -ENOTSUP;
++ }
++ return result;
++}
++
++int
++process_ops_to_enqueue(const struct ccp_qp *qp,
++ struct rte_crypto_op **op,
++ struct ccp_queue *cmd_q,
++ uint16_t nb_ops,
++ int slots_req)
++{
++ int i, result = 0;
++ struct ccp_batch_info *b_info;
++ struct ccp_session *session;
++
++ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
++ CCP_LOG_ERR("batch info allocation failed");
++ return 0;
++ }
++ /* populate batch info necessary for dequeue */
++ b_info->op_idx = 0;
++ b_info->lsb_buf_idx = 0;
++ b_info->desccnt = 0;
++ b_info->cmd_q = cmd_q;
++ b_info->lsb_buf_phys =
++ (phys_addr_t)rte_mem_virt2phy((void *)b_info->lsb_buf);
++ rte_atomic64_sub(&b_info->cmd_q->free_slots, slots_req);
++
++ b_info->head_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
++ Q_DESC_SIZE);
++ for (i = 0; i < nb_ops; i++) {
++ session = (struct ccp_session *)get_session_private_data(
++ op[i]->sym->session,
++ ccp_cryptodev_driver_id);
++ switch (session->cmd_id) {
++ case CCP_CMD_CIPHER:
++ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
++ break;
++ case CCP_CMD_AUTH:
++ result = ccp_crypto_auth(op[i], cmd_q, b_info);
++ break;
++ case CCP_CMD_CIPHER_HASH:
++ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
++ if (result)
++ break;
++ result = ccp_crypto_auth(op[i], cmd_q, b_info);
++ break;
++ case CCP_CMD_HASH_CIPHER:
++ result = ccp_crypto_auth(op[i], cmd_q, b_info);
++ if (result)
++ break;
++ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
++ break;
++ case CCP_CMD_COMBINED:
++ result = ccp_crypto_aead(op[i], cmd_q, b_info);
++ break;
++ default:
++ CCP_LOG_ERR("Unsupported cmd_id");
++ result = -1;
++ }
++ if (unlikely(result < 0)) {
++ rte_atomic64_add(&b_info->cmd_q->free_slots,
++ (slots_req - b_info->desccnt));
++ break;
++ }
++ b_info->op[i] = op[i];
++ }
++
++ b_info->opcnt = i;
++ b_info->tail_offset = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx *
++ Q_DESC_SIZE);
++
++ rte_wmb();
++ /* Write the new tail address back to the queue register */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE,
++ b_info->tail_offset);
++ /* Turn the queue back on using our cached control register */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
++
++ return i;
++}
++
++static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
++{
++ struct ccp_session *session;
++ uint8_t *digest_data, *addr;
++ struct rte_mbuf *m_last;
++ int offset, digest_offset;
++ uint8_t digest_le[64];
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ if (session->cmd_id == CCP_CMD_COMBINED) {
++ digest_data = op->sym->aead.digest.data;
++ digest_offset = op->sym->aead.data.offset +
++ op->sym->aead.data.length;
++ } else {
++ digest_data = op->sym->auth.digest.data;
++ digest_offset = op->sym->auth.data.offset +
++ op->sym->auth.data.length;
++ }
++ m_last = rte_pktmbuf_lastseg(op->sym->m_src);
++ addr = (uint8_t *)((char *)m_last->buf_addr + m_last->data_off +
++ m_last->data_len - session->auth.ctx_len);
++
++ rte_mb();
++ offset = session->auth.offset;
++
++ if (session->auth.engine == CCP_ENGINE_SHA)
++ if ((session->auth.ut.sha_type != CCP_SHA_TYPE_1) &&
++ (session->auth.ut.sha_type != CCP_SHA_TYPE_224) &&
++ (session->auth.ut.sha_type != CCP_SHA_TYPE_256)) {
++ /* All other algorithms require byte
++ * swap done by host
++ */
++ unsigned int i;
++
++ offset = session->auth.ctx_len -
++ session->auth.offset - 1;
++ for (i = 0; i < session->auth.digest_length; i++)
++ digest_le[i] = addr[offset - i];
++ offset = 0;
++ addr = digest_le;
++ }
++
++ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++ if (session->auth.op == CCP_AUTH_OP_VERIFY) {
++ if (memcmp(addr + offset, digest_data,
++ session->auth.digest_length) != 0)
++ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
++
++ } else {
++ if (unlikely(digest_data == 0))
++ digest_data = rte_pktmbuf_mtod_offset(
++ op->sym->m_dst, uint8_t *,
++ digest_offset);
++ rte_memcpy(digest_data, addr + offset,
++ session->auth.digest_length);
++ }
++ /* Trim area used for digest from mbuf. */
++ rte_pktmbuf_trim(op->sym->m_src,
++ session->auth.ctx_len);
++}
++
++static int
++ccp_prepare_ops(struct rte_crypto_op **op_d,
++ struct ccp_batch_info *b_info,
++ uint16_t nb_ops)
++{
++ int i, min_ops;
++ struct ccp_session *session;
++
++ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
++
++ for (i = 0; i < min_ops; i++) {
++ op_d[i] = b_info->op[b_info->op_idx++];
++ session = (struct ccp_session *)get_session_private_data(
++ op_d[i]->sym->session,
++ ccp_cryptodev_driver_id);
++ switch (session->cmd_id) {
++ case CCP_CMD_CIPHER:
++ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++ break;
++ case CCP_CMD_AUTH:
++ case CCP_CMD_CIPHER_HASH:
++ case CCP_CMD_HASH_CIPHER:
++ case CCP_CMD_COMBINED:
++ ccp_auth_dq_prepare(op_d[i]);
++ break;
++ default:
++ CCP_LOG_ERR("Unsupported cmd_id");
++ }
++ }
++
++ b_info->opcnt -= min_ops;
++ return min_ops;
++}
++
++int
++process_ops_to_dequeue(struct ccp_qp *qp,
++ struct rte_crypto_op **op,
++ uint16_t nb_ops)
++{
++ struct ccp_batch_info *b_info;
++ uint32_t cur_head_offset;
++
++ if (qp->b_info != NULL) {
++ b_info = qp->b_info;
++ if (unlikely(b_info->op_idx > 0))
++ goto success;
++ } else if (rte_ring_dequeue(qp->processed_pkts,
++ (void **)&b_info))
++ return 0;
++ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
++ CMD_Q_HEAD_LO_BASE);
++
++ if (b_info->head_offset < b_info->tail_offset) {
++ if ((cur_head_offset >= b_info->head_offset) &&
++ (cur_head_offset < b_info->tail_offset)) {
++ qp->b_info = b_info;
++ return 0;
++ }
++ } else {
++ if ((cur_head_offset >= b_info->head_offset) ||
++ (cur_head_offset < b_info->tail_offset)) {
++ qp->b_info = b_info;
++ return 0;
++ }
++ }
++
++
++success:
++ nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
++ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
++ b_info->desccnt = 0;
++ if (b_info->opcnt > 0) {
++ qp->b_info = b_info;
++ } else {
++ rte_mempool_put(qp->batch_mp, (void *)b_info);
++ qp->b_info = NULL;
++ }
++
++ return nb_ops;
++}
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index 346d5ee..4455497 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -264,4 +264,39 @@ struct ccp_qp;
+ int ccp_set_session_parameters(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform);
+
++/**
++ * Find count of slots
++ *
++ * @param session CCP private session
++ * @return count of free slots available
++ */
++int ccp_compute_slot_count(struct ccp_session *session);
++
++/**
++ * process crypto ops to be enqueued
++ *
++ * @param qp CCP crypto queue-pair
++ * @param op crypto ops table
++ * @param cmd_q CCP cmd queue
++ * @param nb_ops No. of ops to be submitted
++ * @return 0 on success otherwise -1
++ */
++int process_ops_to_enqueue(const struct ccp_qp *qp,
++ struct rte_crypto_op **op,
++ struct ccp_queue *cmd_q,
++ uint16_t nb_ops,
++ int slots_req);
++
++/**
++ * process crypto ops to be dequeued
++ *
++ * @param qp CCP crypto queue-pair
++ * @param op crypto ops table
++ * @param nb_ops requested no. of ops
++ * @return 0 on success otherwise -1
++ */
++int process_ops_to_dequeue(struct ccp_qp *qp,
++ struct rte_crypto_op **op,
++ uint16_t nb_ops);
++
+ #endif /* _CCP_CRYPTO_H_ */
+diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
+index 57bccf4..fee90e3 100644
+--- a/drivers/crypto/ccp/ccp_dev.c
++++ b/drivers/crypto/ccp/ccp_dev.c
+@@ -61,6 +61,33 @@ ccp_dev_start(struct rte_cryptodev *dev)
+ return 0;
+ }
+
++struct ccp_queue *
++ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
++{
++ int i, ret = 0;
++ struct ccp_device *dev;
++ struct ccp_private *priv = cdev->data->dev_private;
++
++ dev = TAILQ_NEXT(priv->last_dev, next);
++ if (unlikely(dev == NULL))
++ dev = TAILQ_FIRST(&ccp_list);
++ priv->last_dev = dev;
++ if (dev->qidx >= dev->cmd_q_count)
++ dev->qidx = 0;
++ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
++ if (ret >= slot_req)
++ return &dev->cmd_q[dev->qidx];
++ for (i = 0; i < dev->cmd_q_count; i++) {
++ dev->qidx++;
++ if (dev->qidx >= dev->cmd_q_count)
++ dev->qidx = 0;
++ ret = rte_atomic64_read(&dev->cmd_q[dev->qidx].free_slots);
++ if (ret >= slot_req)
++ return &dev->cmd_q[dev->qidx];
++ }
++ return NULL;
++}
++
+ static const struct rte_memzone *
+ ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index a16ba81..cfb3b03 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -445,4 +445,13 @@ int ccp_dev_start(struct rte_cryptodev *dev);
+ */
+ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+
++/**
++ * allocate a ccp command queue
++ *
++ * @dev rte crypto device
++ * @param slot_req number of required
++ * @return allotted CCP queue on success otherwise NULL
++ */
++struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
++
+ #endif /* _CCP_DEV_H_ */
+diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
+index cc35a97..ed6ca5d 100644
+--- a/drivers/crypto/ccp/rte_ccp_pmd.c
++++ b/drivers/crypto/ccp/rte_ccp_pmd.c
+@@ -38,6 +38,7 @@
+ #include <rte_dev.h>
+ #include <rte_malloc.h>
+
++#include "ccp_crypto.h"
+ #include "ccp_dev.h"
+ #include "ccp_pmd_private.h"
+
+@@ -47,23 +48,72 @@
+ static unsigned int ccp_pmd_init_done;
+ uint8_t ccp_cryptodev_driver_id;
+
++static struct ccp_session *
++get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
++{
++ struct ccp_session *sess = NULL;
++
++ if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
++ if (unlikely(op->sym->session == NULL))
++ return NULL;
++
++ sess = (struct ccp_session *)
++ get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++ }
++
++ return sess;
++}
++
+ static uint16_t
+-ccp_pmd_enqueue_burst(void *queue_pair __rte_unused,
+- struct rte_crypto_op **ops __rte_unused,
+- uint16_t nb_ops __rte_unused)
++ccp_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
++ uint16_t nb_ops)
+ {
+- uint16_t enq_cnt = 0;
++ struct ccp_session *sess = NULL;
++ struct ccp_qp *qp = queue_pair;
++ struct ccp_queue *cmd_q;
++ struct rte_cryptodev *dev = qp->dev;
++ uint16_t i, enq_cnt = 0, slots_req = 0;
++
++ if (nb_ops == 0)
++ return 0;
++
++ if (unlikely(rte_ring_full(qp->processed_pkts) != 0))
++ return 0;
++
++ for (i = 0; i < nb_ops; i++) {
++ sess = get_ccp_session(qp, ops[i]);
++ if (unlikely(sess == NULL) && (i == 0)) {
++ qp->qp_stats.enqueue_err_count++;
++ return 0;
++ } else if (sess == NULL) {
++ nb_ops = i;
++ break;
++ }
++ slots_req += ccp_compute_slot_count(sess);
++ }
++
++ cmd_q = ccp_allot_queue(dev, slots_req);
++ if (unlikely(cmd_q == NULL))
++ return 0;
+
++ enq_cnt = process_ops_to_enqueue(qp, ops, cmd_q, nb_ops, slots_req);
++ qp->qp_stats.enqueued_count += enq_cnt;
+ return enq_cnt;
+ }
+
+ static uint16_t
+-ccp_pmd_dequeue_burst(void *queue_pair __rte_unused,
+- struct rte_crypto_op **ops __rte_unused,
+- uint16_t nb_ops __rte_unused)
++ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
++ uint16_t nb_ops)
+ {
++ struct ccp_qp *qp = queue_pair;
+ uint16_t nb_dequeued = 0;
+
++ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
++
++ qp->qp_stats.dequeued_count += nb_dequeued;
++
+ return nb_dequeued;
+ }
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-07-20-crypto-ccp-support-sessionless-operations.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-07-20-crypto-ccp-support-sessionless-operations.patch
new file mode 100644
index 00000000..44b75b0c
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-07-20-crypto-ccp-support-sessionless-operations.patch
@@ -0,0 +1,80 @@
+From patchwork Fri Mar 9 08:35:07 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,07/20] crypto/ccp: support sessionless operations
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35806
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-7-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:07 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/rte_ccp_pmd.c | 33 +++++++++++++++++++++++++++++++--
+ 1 file changed, 31 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
+index ed6ca5d..23d3af3 100644
+--- a/drivers/crypto/ccp/rte_ccp_pmd.c
++++ b/drivers/crypto/ccp/rte_ccp_pmd.c
+@@ -49,7 +49,7 @@ static unsigned int ccp_pmd_init_done;
+ uint8_t ccp_cryptodev_driver_id;
+
+ static struct ccp_session *
+-get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
++get_ccp_session(struct ccp_qp *qp, struct rte_crypto_op *op)
+ {
+ struct ccp_session *sess = NULL;
+
+@@ -61,6 +61,27 @@ get_ccp_session(struct ccp_qp *qp __rte_unused, struct rte_crypto_op *op)
+ get_session_private_data(
+ op->sym->session,
+ ccp_cryptodev_driver_id);
++ } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS) {
++ void *_sess;
++ void *_sess_private_data = NULL;
++
++ if (rte_mempool_get(qp->sess_mp, &_sess))
++ return NULL;
++ if (rte_mempool_get(qp->sess_mp, (void **)&_sess_private_data))
++ return NULL;
++
++ sess = (struct ccp_session *)_sess_private_data;
++
++ if (unlikely(ccp_set_session_parameters(sess,
++ op->sym->xform) != 0)) {
++ rte_mempool_put(qp->sess_mp, _sess);
++ rte_mempool_put(qp->sess_mp, _sess_private_data);
++ sess = NULL;
++ }
++ op->sym->session = (struct rte_cryptodev_sym_session *)_sess;
++ set_session_private_data(op->sym->session,
++ ccp_cryptodev_driver_id,
++ _sess_private_data);
+ }
+
+ return sess;
+@@ -108,10 +129,18 @@ ccp_pmd_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
+ uint16_t nb_ops)
+ {
+ struct ccp_qp *qp = queue_pair;
+- uint16_t nb_dequeued = 0;
++ uint16_t nb_dequeued = 0, i;
+
+ nb_dequeued = process_ops_to_dequeue(qp, ops, nb_ops);
+
++ /* Free session if a session-less crypto op */
++ for (i = 0; i < nb_dequeued; i++)
++ if (unlikely(ops[i]->sess_type ==
++ RTE_CRYPTO_OP_SESSIONLESS)) {
++ rte_mempool_put(qp->sess_mp,
++ ops[i]->sym->session);
++ ops[i]->sym->session = NULL;
++ }
+ qp->qp_stats.dequeued_count += nb_dequeued;
+
+ return nb_dequeued;
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-08-20-crypto-ccp-support-stats-related-crypto-pmd-ops.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-08-20-crypto-ccp-support-stats-related-crypto-pmd-ops.patch
new file mode 100644
index 00000000..6dfdd020
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-08-20-crypto-ccp-support-stats-related-crypto-pmd-ops.patch
@@ -0,0 +1,71 @@
+From patchwork Fri Mar 9 08:35:08 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,08/20] crypto/ccp: support stats related crypto pmd ops
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35807
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-8-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:08 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_pmd_ops.c | 34 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 32 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index a02aa6f..d483a74 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -68,6 +68,36 @@ ccp_pmd_close(struct rte_cryptodev *dev __rte_unused)
+ }
+
+ static void
++ccp_pmd_stats_get(struct rte_cryptodev *dev,
++ struct rte_cryptodev_stats *stats)
++{
++ int qp_id;
++
++ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
++ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
++
++ stats->enqueued_count += qp->qp_stats.enqueued_count;
++ stats->dequeued_count += qp->qp_stats.dequeued_count;
++
++ stats->enqueue_err_count += qp->qp_stats.enqueue_err_count;
++ stats->dequeue_err_count += qp->qp_stats.dequeue_err_count;
++ }
++
++}
++
++static void
++ccp_pmd_stats_reset(struct rte_cryptodev *dev)
++{
++ int qp_id;
++
++ for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
++ struct ccp_qp *qp = dev->data->queue_pairs[qp_id];
++
++ memset(&qp->qp_stats, 0, sizeof(qp->qp_stats));
++ }
++}
++
++static void
+ ccp_pmd_info_get(struct rte_cryptodev *dev,
+ struct rte_cryptodev_info *dev_info)
+ {
+@@ -281,8 +311,8 @@ struct rte_cryptodev_ops ccp_ops = {
+ .dev_stop = ccp_pmd_stop,
+ .dev_close = ccp_pmd_close,
+
+- .stats_get = NULL,
+- .stats_reset = NULL,
++ .stats_get = ccp_pmd_stats_get,
++ .stats_reset = ccp_pmd_stats_reset,
+
+ .dev_infos_get = ccp_pmd_info_get,
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-09-20-crypto-ccp-support-ccp-hwrng-feature.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-09-20-crypto-ccp-support-ccp-hwrng-feature.patch
new file mode 100644
index 00000000..2e8fe8c4
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-09-20-crypto-ccp-support-ccp-hwrng-feature.patch
@@ -0,0 +1,85 @@
+From patchwork Fri Mar 9 08:35:09 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,09/20] crypto/ccp: support ccp hwrng feature
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35808
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-9-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:09 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_dev.c | 20 ++++++++++++++++++++
+ drivers/crypto/ccp/ccp_dev.h | 11 +++++++++++
+ 2 files changed, 31 insertions(+)
+
+diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
+index fee90e3..d8c0ab4 100644
+--- a/drivers/crypto/ccp/ccp_dev.c
++++ b/drivers/crypto/ccp/ccp_dev.c
+@@ -88,6 +88,26 @@ ccp_allot_queue(struct rte_cryptodev *cdev, int slot_req)
+ return NULL;
+ }
+
++int
++ccp_read_hwrng(uint32_t *value)
++{
++ struct ccp_device *dev;
++
++ TAILQ_FOREACH(dev, &ccp_list, next) {
++ void *vaddr = (void *)(dev->pci.mem_resource[2].addr);
++
++ while (dev->hwrng_retries++ < CCP_MAX_TRNG_RETRIES) {
++ *value = CCP_READ_REG(vaddr, TRNG_OUT_REG);
++ if (*value) {
++ dev->hwrng_retries = 0;
++ return 0;
++ }
++ }
++ dev->hwrng_retries = 0;
++ }
++ return -1;
++}
++
+ static const struct rte_memzone *
+ ccp_queue_dma_zone_reserve(const char *queue_name,
+ uint32_t queue_size,
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index cfb3b03..a5c9ef3 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -47,6 +47,7 @@
+
+ /**< CCP sspecific */
+ #define MAX_HW_QUEUES 5
++#define CCP_MAX_TRNG_RETRIES 10
+
+ /**< CCP Register Mappings */
+ #define Q_MASK_REG 0x000
+@@ -223,6 +224,8 @@ struct ccp_device {
+ /**< protection for shared lsb region allocation */
+ int qidx;
+ /**< current queue index */
++ int hwrng_retries;
++ /**< retry counter for CCP TRNG */
+ } __rte_cache_aligned;
+
+ /**< CCP H/W engine related */
+@@ -454,4 +457,12 @@ int ccp_probe_devices(const struct rte_pci_id *ccp_id);
+ */
+ struct ccp_queue *ccp_allot_queue(struct rte_cryptodev *dev, int slot_req);
+
++/**
++ * read hwrng value
++ *
++ * @param trng_value data pointer to write RNG value
++ * @return 0 on success otherwise -1
++ */
++int ccp_read_hwrng(uint32_t *trng_value);
++
+ #endif /* _CCP_DEV_H_ */
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-10-20-crypto-ccp-support-aes-cipher-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-10-20-crypto-ccp-support-aes-cipher-algo.patch
new file mode 100644
index 00000000..b00686e6
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-10-20-crypto-ccp-support-aes-cipher-algo.patch
@@ -0,0 +1,449 @@
+From patchwork Fri Mar 9 08:35:10 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,10/20] crypto/ccp: support aes cipher algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35809
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-10-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:10 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 197 ++++++++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_crypto.h | 13 +++
+ drivers/crypto/ccp/ccp_dev.h | 53 +++++++++++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 60 ++++++++++++
+ 4 files changed, 321 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index c17e84f..b097355 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -80,6 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+ {
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
++ size_t i;
+
+ cipher_xform = &xform->cipher;
+
+@@ -99,6 +100,21 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+ sess->iv.length = cipher_xform->iv.length;
+
+ switch (cipher_xform->algo) {
++ case RTE_CRYPTO_CIPHER_AES_CTR:
++ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CTR;
++ sess->cipher.um.aes_mode = CCP_AES_MODE_CTR;
++ sess->cipher.engine = CCP_ENGINE_AES;
++ break;
++ case RTE_CRYPTO_CIPHER_AES_ECB:
++ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
++ sess->cipher.um.aes_mode = CCP_AES_MODE_ECB;
++ sess->cipher.engine = CCP_ENGINE_AES;
++ break;
++ case RTE_CRYPTO_CIPHER_AES_CBC:
++ sess->cipher.algo = CCP_CIPHER_ALGO_AES_CBC;
++ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
++ sess->cipher.engine = CCP_ENGINE_AES;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+@@ -106,10 +122,27 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+
+
+ switch (sess->cipher.engine) {
++ case CCP_ENGINE_AES:
++ if (sess->cipher.key_length == 16)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
++ else if (sess->cipher.key_length == 24)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
++ else if (sess->cipher.key_length == 32)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
++ else {
++ CCP_LOG_ERR("Invalid cipher key length");
++ return -1;
++ }
++ for (i = 0; i < sess->cipher.key_length ; i++)
++ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
++ sess->cipher.key[i];
++ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+ }
++ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
++ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+ }
+
+@@ -235,6 +268,18 @@ ccp_cipher_slot(struct ccp_session *session)
+ int count = 0;
+
+ switch (session->cipher.algo) {
++ case CCP_CIPHER_ALGO_AES_CBC:
++ count = 2;
++ /**< op + passthrough for iv */
++ break;
++ case CCP_CIPHER_ALGO_AES_ECB:
++ count = 1;
++ /**<only op*/
++ break;
++ case CCP_CIPHER_ALGO_AES_CTR:
++ count = 2;
++ /**< op + passthrough for iv */
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+@@ -297,10 +342,146 @@ ccp_compute_slot_count(struct ccp_session *session)
+ return count;
+ }
+
++static void
++ccp_perform_passthru(struct ccp_passthru *pst,
++ struct ccp_queue *cmd_q)
++{
++ struct ccp_desc *desc;
++ union ccp_function function;
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_PASSTHRU;
++
++ CCP_CMD_SOC(desc) = 0;
++ CCP_CMD_IOC(desc) = 0;
++ CCP_CMD_INIT(desc) = 0;
++ CCP_CMD_EOM(desc) = 0;
++ CCP_CMD_PROT(desc) = 0;
++
++ function.raw = 0;
++ CCP_PT_BYTESWAP(&function) = pst->byte_swap;
++ CCP_PT_BITWISE(&function) = pst->bit_mod;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = pst->len;
++
++ if (pst->dir) {
++ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(pst->src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
++ CCP_CMD_DST_HI(desc) = 0;
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
++
++ if (pst->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_key;
++ } else {
++
++ CCP_CMD_SRC_LO(desc) = (uint32_t)(pst->src_addr);
++ CCP_CMD_SRC_HI(desc) = 0;
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SB;
++
++ CCP_CMD_DST_LO(desc) = (uint32_t)(pst->dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(pst->dest_addr);
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++ }
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++}
++
++static int
++ccp_perform_aes(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q,
++ struct ccp_batch_info *b_info)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ uint8_t *lsb_buf;
++ struct ccp_passthru pst = {0};
++ struct ccp_desc *desc;
++ phys_addr_t src_addr, dest_addr, key_addr;
++ uint8_t *iv;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++ function.raw = 0;
++
++ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
++ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB) {
++ if (session->cipher.um.aes_mode == CCP_AES_MODE_CTR) {
++ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE,
++ iv, session->iv.length);
++ pst.src_addr = (phys_addr_t)session->cipher.nonce_phys;
++ CCP_AES_SIZE(&function) = 0x1F;
++ } else {
++ lsb_buf =
++ &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
++ rte_memcpy(lsb_buf +
++ (CCP_SB_BYTES - session->iv.length),
++ iv, session->iv.length);
++ pst.src_addr = b_info->lsb_buf_phys +
++ (b_info->lsb_buf_idx * CCP_SB_BYTES);
++ b_info->lsb_buf_idx++;
++ }
++
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++ }
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->cipher.data.offset);
++ if (likely(op->sym->m_dst != NULL))
++ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
++ op->sym->cipher.data.offset);
++ else
++ dest_addr = src_addr;
++ key_addr = session->cipher.key_phys;
++
++ /* prepare desc for aes command */
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++
++ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
++ CCP_AES_MODE(&function) = session->cipher.um.aes_mode;
++ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ if (session->cipher.um.aes_mode != CCP_AES_MODE_ECB)
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
+ static inline int
+ ccp_crypto_cipher(struct rte_crypto_op *op,
+- struct ccp_queue *cmd_q __rte_unused,
+- struct ccp_batch_info *b_info __rte_unused)
++ struct ccp_queue *cmd_q,
++ struct ccp_batch_info *b_info)
+ {
+ int result = 0;
+ struct ccp_session *session;
+@@ -310,6 +491,18 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
+ ccp_cryptodev_driver_id);
+
+ switch (session->cipher.algo) {
++ case CCP_CIPHER_ALGO_AES_CBC:
++ result = ccp_perform_aes(op, cmd_q, b_info);
++ b_info->desccnt += 2;
++ break;
++ case CCP_CIPHER_ALGO_AES_CTR:
++ result = ccp_perform_aes(op, cmd_q, b_info);
++ b_info->desccnt += 2;
++ break;
++ case CCP_CIPHER_ALGO_AES_ECB:
++ result = ccp_perform_aes(op, cmd_q, b_info);
++ b_info->desccnt += 1;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index 4455497..614cd47 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -46,7 +46,20 @@
+
+ #include "ccp_dev.h"
+
++#define AES_BLOCK_SIZE 16
++#define CMAC_PAD_VALUE 0x80
++#define CTR_NONCE_SIZE 4
++#define CTR_IV_SIZE 8
+ #define CCP_SHA3_CTX_SIZE 200
++
++/**Macro helpers for CCP command creation*/
++#define CCP_AES_SIZE(p) ((p)->aes.size)
++#define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
++#define CCP_AES_MODE(p) ((p)->aes.mode)
++#define CCP_AES_TYPE(p) ((p)->aes.type)
++#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
++#define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
++
+ /**
+ * CCP supported AES modes
+ */
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index a5c9ef3..759afc1 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -48,6 +48,7 @@
+ /**< CCP sspecific */
+ #define MAX_HW_QUEUES 5
+ #define CCP_MAX_TRNG_RETRIES 10
++#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
+
+ /**< CCP Register Mappings */
+ #define Q_MASK_REG 0x000
+@@ -104,10 +105,52 @@
+ #define LSB_SIZE 16
+ #define LSB_ITEM_SIZE 32
+ #define SLSB_MAP_SIZE (MAX_LSB_CNT * LSB_SIZE)
++#define LSB_ENTRY_NUMBER(LSB_ADDR) (LSB_ADDR / LSB_ITEM_SIZE)
+
+ /* General CCP Defines */
+
+ #define CCP_SB_BYTES 32
++/* Word 0 */
++#define CCP_CMD_DW0(p) ((p)->dw0)
++#define CCP_CMD_SOC(p) (CCP_CMD_DW0(p).soc)
++#define CCP_CMD_IOC(p) (CCP_CMD_DW0(p).ioc)
++#define CCP_CMD_INIT(p) (CCP_CMD_DW0(p).init)
++#define CCP_CMD_EOM(p) (CCP_CMD_DW0(p).eom)
++#define CCP_CMD_FUNCTION(p) (CCP_CMD_DW0(p).function)
++#define CCP_CMD_ENGINE(p) (CCP_CMD_DW0(p).engine)
++#define CCP_CMD_PROT(p) (CCP_CMD_DW0(p).prot)
++
++/* Word 1 */
++#define CCP_CMD_DW1(p) ((p)->length)
++#define CCP_CMD_LEN(p) (CCP_CMD_DW1(p))
++
++/* Word 2 */
++#define CCP_CMD_DW2(p) ((p)->src_lo)
++#define CCP_CMD_SRC_LO(p) (CCP_CMD_DW2(p))
++
++/* Word 3 */
++#define CCP_CMD_DW3(p) ((p)->dw3)
++#define CCP_CMD_SRC_MEM(p) ((p)->dw3.src_mem)
++#define CCP_CMD_SRC_HI(p) ((p)->dw3.src_hi)
++#define CCP_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id)
++#define CCP_CMD_FIX_SRC(p) ((p)->dw3.fixed)
++
++/* Words 4/5 */
++#define CCP_CMD_DW4(p) ((p)->dw4)
++#define CCP_CMD_DST_LO(p) (CCP_CMD_DW4(p).dst_lo)
++#define CCP_CMD_DW5(p) ((p)->dw5.fields.dst_hi)
++#define CCP_CMD_DST_HI(p) (CCP_CMD_DW5(p))
++#define CCP_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem)
++#define CCP_CMD_FIX_DST(p) ((p)->dw5.fields.fixed)
++#define CCP_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo)
++#define CCP_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi)
++
++/* Word 6/7 */
++#define CCP_CMD_DW6(p) ((p)->key_lo)
++#define CCP_CMD_KEY_LO(p) (CCP_CMD_DW6(p))
++#define CCP_CMD_DW7(p) ((p)->dw7)
++#define CCP_CMD_KEY_HI(p) ((p)->dw7.key_hi)
++#define CCP_CMD_KEY_MEM(p) ((p)->dw7.key_mem)
+
+ /* bitmap */
+ enum {
+@@ -412,6 +455,16 @@ struct ccp_desc {
+ };
+
+ /**
++ * ccp memory type
++ */
++enum ccp_memtype {
++ CCP_MEMTYPE_SYSTEM = 0,
++ CCP_MEMTYPE_SB,
++ CCP_MEMTYPE_LOCAL,
++ CCP_MEMTYPE_LAST,
++};
++
++/**
+ * cmd id to follow order
+ */
+ enum ccp_cmd_order {
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index d483a74..5f56242 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -39,6 +39,66 @@
+ #include "ccp_crypto.h"
+
+ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
++ { /* AES ECB */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
++ {.cipher = {
++ .algo = RTE_CRYPTO_CIPHER_AES_ECB,
++ .block_size = 16,
++ .key_size = {
++ .min = 16,
++ .max = 32,
++ .increment = 8
++ },
++ .iv_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ }
++ }, }
++ }, }
++ },
++ { /* AES CBC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
++ {.cipher = {
++ .algo = RTE_CRYPTO_CIPHER_AES_CBC,
++ .block_size = 16,
++ .key_size = {
++ .min = 16,
++ .max = 32,
++ .increment = 8
++ },
++ .iv_size = {
++ .min = 16,
++ .max = 16,
++ .increment = 0
++ }
++ }, }
++ }, }
++ },
++ { /* AES CTR */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
++ {.cipher = {
++ .algo = RTE_CRYPTO_CIPHER_AES_CTR,
++ .block_size = 16,
++ .key_size = {
++ .min = 16,
++ .max = 32,
++ .increment = 8
++ },
++ .iv_size = {
++ .min = 16,
++ .max = 16,
++ .increment = 0
++ }
++ }, }
++ }, }
++ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+ };
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-11-20-crypto-ccp-support-3des-cipher-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-11-20-crypto-ccp-support-3des-cipher-algo.patch
new file mode 100644
index 00000000..b51879be
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-11-20-crypto-ccp-support-3des-cipher-algo.patch
@@ -0,0 +1,244 @@
+From patchwork Fri Mar 9 08:35:11 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,11/20] crypto/ccp: support 3des cipher algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35810
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-11-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:11 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 132 ++++++++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_crypto.h | 3 +
+ drivers/crypto/ccp/ccp_pmd_ops.c | 20 ++++++
+ 3 files changed, 154 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index b097355..0660761 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -80,7 +80,7 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+ {
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+- size_t i;
++ size_t i, j, x;
+
+ cipher_xform = &xform->cipher;
+
+@@ -115,6 +115,11 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+ sess->cipher.um.aes_mode = CCP_AES_MODE_CBC;
+ sess->cipher.engine = CCP_ENGINE_AES;
+ break;
++ case RTE_CRYPTO_CIPHER_3DES_CBC:
++ sess->cipher.algo = CCP_CIPHER_ALGO_3DES_CBC;
++ sess->cipher.um.des_mode = CCP_DES_MODE_CBC;
++ sess->cipher.engine = CCP_ENGINE_3DES;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo");
+ return -1;
+@@ -137,6 +142,20 @@ ccp_configure_session_cipher(struct ccp_session *sess,
+ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
+ sess->cipher.key[i];
+ break;
++ case CCP_ENGINE_3DES:
++ if (sess->cipher.key_length == 16)
++ sess->cipher.ut.des_type = CCP_DES_TYPE_128;
++ else if (sess->cipher.key_length == 24)
++ sess->cipher.ut.des_type = CCP_DES_TYPE_192;
++ else {
++ CCP_LOG_ERR("Invalid cipher key length");
++ return -1;
++ }
++ for (j = 0, x = 0; j < sess->cipher.key_length/8; j++, x += 8)
++ for (i = 0; i < 8; i++)
++ sess->cipher.key_ccp[(8 + x) - i - 1] =
++ sess->cipher.key[i + x];
++ break;
+ default:
+ CCP_LOG_ERR("Invalid CCP Engine");
+ return -ENOTSUP;
+@@ -280,6 +299,10 @@ ccp_cipher_slot(struct ccp_session *session)
+ count = 2;
+ /**< op + passthrough for iv */
+ break;
++ case CCP_CIPHER_ALGO_3DES_CBC:
++ count = 2;
++ /**< op + passthrough for iv */
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+@@ -478,6 +501,109 @@ ccp_perform_aes(struct rte_crypto_op *op,
+ return 0;
+ }
+
++static int
++ccp_perform_3des(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q,
++ struct ccp_batch_info *b_info)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ unsigned char *lsb_buf;
++ struct ccp_passthru pst;
++ struct ccp_desc *desc;
++ uint32_t tail;
++ uint8_t *iv;
++ phys_addr_t src_addr, dest_addr, key_addr;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
++ switch (session->cipher.um.des_mode) {
++ case CCP_DES_MODE_CBC:
++ lsb_buf = &(b_info->lsb_buf[b_info->lsb_buf_idx*CCP_SB_BYTES]);
++ b_info->lsb_buf_idx++;
++
++ rte_memcpy(lsb_buf + (CCP_SB_BYTES - session->iv.length),
++ iv, session->iv.length);
++
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *) lsb_buf);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++ break;
++ case CCP_DES_MODE_CFB:
++ case CCP_DES_MODE_ECB:
++ CCP_LOG_ERR("Unsupported DES cipher mode");
++ return -ENOTSUP;
++ }
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->cipher.data.offset);
++ if (unlikely(op->sym->m_dst != NULL))
++ dest_addr =
++ rte_pktmbuf_mtophys_offset(op->sym->m_dst,
++ op->sym->cipher.data.offset);
++ else
++ dest_addr = src_addr;
++
++ key_addr = rte_mem_virt2phy(session->cipher.key_ccp);
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++
++ memset(desc, 0, Q_DESC_SIZE);
++
++ /* prepare desc for des command */
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_3DES;
++
++ CCP_CMD_SOC(desc) = 0;
++ CCP_CMD_IOC(desc) = 0;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_PROT(desc) = 0;
++
++ function.raw = 0;
++ CCP_DES_ENCRYPT(&function) = session->cipher.dir;
++ CCP_DES_MODE(&function) = session->cipher.um.des_mode;
++ CCP_DES_TYPE(&function) = session->cipher.ut.des_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->cipher.data.length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ if (session->cipher.um.des_mode)
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ /* Write the new tail address back to the queue register */
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ /* Turn the queue back on using our cached control register */
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
+ static inline int
+ ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+@@ -503,6 +629,10 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
+ result = ccp_perform_aes(op, cmd_q, b_info);
+ b_info->desccnt += 1;
+ break;
++ case CCP_CIPHER_ALGO_3DES_CBC:
++ result = ccp_perform_3des(op, cmd_q, b_info);
++ b_info->desccnt += 2;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported cipher algo %d",
+ session->cipher.algo);
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index 614cd47..d528ec9 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -57,6 +57,9 @@
+ #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt)
+ #define CCP_AES_MODE(p) ((p)->aes.mode)
+ #define CCP_AES_TYPE(p) ((p)->aes.type)
++#define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
++#define CCP_DES_MODE(p) ((p)->des.mode)
++#define CCP_DES_TYPE(p) ((p)->des.type)
+ #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+ #define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 5f56242..3a16be8 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -99,6 +99,26 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* 3DES CBC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
++ {.cipher = {
++ .algo = RTE_CRYPTO_CIPHER_3DES_CBC,
++ .block_size = 8,
++ .key_size = {
++ .min = 16,
++ .max = 24,
++ .increment = 8
++ },
++ .iv_size = {
++ .min = 8,
++ .max = 8,
++ .increment = 0
++ }
++ }, }
++ }, }
++ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+ };
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-12-20-crypto-ccp-support-aes-cmac-auth-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-12-20-crypto-ccp-support-aes-cmac-auth-algo.patch
new file mode 100644
index 00000000..b598e30e
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-12-20-crypto-ccp-support-aes-cmac-auth-algo.patch
@@ -0,0 +1,388 @@
+From patchwork Fri Mar 9 08:35:12 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,12/20] crypto/ccp: support aes-cmac auth algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35811
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-12-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:12 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 277 ++++++++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_pmd_ops.c | 20 +++
+ 2 files changed, 295 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index 0660761..6e593d8 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -36,6 +36,8 @@
+ #include <sys/queue.h>
+ #include <sys/types.h>
+ #include <unistd.h>
++#include <openssl/cmac.h> /*sub key apis*/
++#include <openssl/evp.h> /*sub key apis*/
+
+ #include <rte_hexdump.h>
+ #include <rte_memzone.h>
+@@ -74,6 +76,84 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+ return res;
+ }
+
++/* prepare temporary keys K1 and K2 */
++static void prepare_key(unsigned char *k, unsigned char *l, int bl)
++{
++ int i;
++ /* Shift block to left, including carry */
++ for (i = 0; i < bl; i++) {
++ k[i] = l[i] << 1;
++ if (i < bl - 1 && l[i + 1] & 0x80)
++ k[i] |= 1;
++ }
++ /* If MSB set fixup with R */
++ if (l[0] & 0x80)
++ k[bl - 1] ^= bl == 16 ? 0x87 : 0x1b;
++}
++
++/* subkeys K1 and K2 generation for CMAC */
++static int
++generate_cmac_subkeys(struct ccp_session *sess)
++{
++ const EVP_CIPHER *algo;
++ EVP_CIPHER_CTX *ctx;
++ unsigned char *ccp_ctx;
++ size_t i;
++ int dstlen, totlen;
++ unsigned char zero_iv[AES_BLOCK_SIZE] = {0};
++ unsigned char dst[2 * AES_BLOCK_SIZE] = {0};
++ unsigned char k1[AES_BLOCK_SIZE] = {0};
++ unsigned char k2[AES_BLOCK_SIZE] = {0};
++
++ if (sess->auth.ut.aes_type == CCP_AES_TYPE_128)
++ algo = EVP_aes_128_cbc();
++ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_192)
++ algo = EVP_aes_192_cbc();
++ else if (sess->auth.ut.aes_type == CCP_AES_TYPE_256)
++ algo = EVP_aes_256_cbc();
++ else {
++ CCP_LOG_ERR("Invalid CMAC type length");
++ return -1;
++ }
++
++ ctx = EVP_CIPHER_CTX_new();
++ if (!ctx) {
++ CCP_LOG_ERR("ctx creation failed");
++ return -1;
++ }
++ if (EVP_EncryptInit(ctx, algo, (unsigned char *)sess->auth.key,
++ (unsigned char *)zero_iv) <= 0)
++ goto key_generate_err;
++ if (EVP_CIPHER_CTX_set_padding(ctx, 0) <= 0)
++ goto key_generate_err;
++ if (EVP_EncryptUpdate(ctx, dst, &dstlen, zero_iv,
++ AES_BLOCK_SIZE) <= 0)
++ goto key_generate_err;
++ if (EVP_EncryptFinal_ex(ctx, dst + dstlen, &totlen) <= 0)
++ goto key_generate_err;
++
++ memset(sess->auth.pre_compute, 0, CCP_SB_BYTES * 2);
++
++ ccp_ctx = (unsigned char *)(sess->auth.pre_compute + CCP_SB_BYTES - 1);
++ prepare_key(k1, dst, AES_BLOCK_SIZE);
++ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
++ *ccp_ctx = k1[i];
++
++ ccp_ctx = (unsigned char *)(sess->auth.pre_compute +
++ (2 * CCP_SB_BYTES) - 1);
++ prepare_key(k2, k1, AES_BLOCK_SIZE);
++ for (i = 0; i < AES_BLOCK_SIZE; i++, ccp_ctx--)
++ *ccp_ctx = k2[i];
++
++ EVP_CIPHER_CTX_free(ctx);
++
++ return 0;
++
++key_generate_err:
++ CCP_LOG_ERR("CMAC Init failed");
++ return -1;
++}
++
+ /* configure session */
+ static int
+ ccp_configure_session_cipher(struct ccp_session *sess,
+@@ -170,6 +250,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+ {
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
++ size_t i;
+
+ auth_xform = &xform->auth;
+
+@@ -179,6 +260,33 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
++ case RTE_CRYPTO_AUTH_AES_CMAC:
++ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
++ sess->auth.engine = CCP_ENGINE_AES;
++ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
++ sess->auth.key_length = auth_xform->key.length;
++ /**<padding and hash result*/
++ sess->auth.ctx_len = CCP_SB_BYTES << 1;
++ sess->auth.offset = AES_BLOCK_SIZE;
++ sess->auth.block_size = AES_BLOCK_SIZE;
++ if (sess->auth.key_length == 16)
++ sess->auth.ut.aes_type = CCP_AES_TYPE_128;
++ else if (sess->auth.key_length == 24)
++ sess->auth.ut.aes_type = CCP_AES_TYPE_192;
++ else if (sess->auth.key_length == 32)
++ sess->auth.ut.aes_type = CCP_AES_TYPE_256;
++ else {
++ CCP_LOG_ERR("Invalid CMAC key length");
++ return -1;
++ }
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ sess->auth.key_length);
++ for (i = 0; i < sess->auth.key_length; i++)
++ sess->auth.key_ccp[sess->auth.key_length - i - 1] =
++ sess->auth.key[i];
++ if (generate_cmac_subkeys(sess))
++ return -1;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported hash algo");
+ return -ENOTSUP;
+@@ -316,6 +424,15 @@ ccp_auth_slot(struct ccp_session *session)
+ int count = 0;
+
+ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_AES_CMAC:
++ count = 4;
++ /**
++ * op
++ * extra descriptor in padding case
++ * (k1/k2(255:128) with iv(127:0))
++ * Retrieve result
++ */
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+@@ -415,6 +532,158 @@ ccp_perform_passthru(struct ccp_passthru *pst,
+ }
+
+ static int
++ccp_perform_aes_cmac(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ struct ccp_passthru pst;
++ struct ccp_desc *desc;
++ uint32_t tail;
++ uint8_t *src_tb, *append_ptr, *ctx_addr;
++ phys_addr_t src_addr, dest_addr, key_addr;
++ int length, non_align_len;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++ key_addr = rte_mem_virt2phy(session->auth.key_ccp);
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->auth.data.offset);
++ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
++ session->auth.ctx_len);
++ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
++
++ function.raw = 0;
++ CCP_AES_ENCRYPT(&function) = CCP_CIPHER_DIR_ENCRYPT;
++ CCP_AES_MODE(&function) = session->auth.um.aes_mode;
++ CCP_AES_TYPE(&function) = session->auth.ut.aes_type;
++
++ if (op->sym->auth.data.length % session->auth.block_size == 0) {
++
++ ctx_addr = session->auth.pre_compute;
++ memset(ctx_addr, 0, AES_BLOCK_SIZE);
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ /* prepare desc for aes-cmac command */
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ tail =
++ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++ } else {
++ ctx_addr = session->auth.pre_compute + CCP_SB_BYTES;
++ memset(ctx_addr, 0, AES_BLOCK_SIZE);
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ length = (op->sym->auth.data.length / AES_BLOCK_SIZE);
++ length *= AES_BLOCK_SIZE;
++ non_align_len = op->sym->auth.data.length - length;
++ /* prepare desc for aes-cmac command */
++ /*Command 1*/
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = length;
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ /*Command 2*/
++ append_ptr = append_ptr + CCP_SB_BYTES;
++ memset(append_ptr, 0, AES_BLOCK_SIZE);
++ src_tb = rte_pktmbuf_mtod_offset(op->sym->m_src,
++ uint8_t *,
++ op->sym->auth.data.offset +
++ length);
++ rte_memcpy(append_ptr, src_tb, non_align_len);
++ append_ptr[non_align_len] = CMAC_PAD_VALUE;
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)(dest_addr + CCP_SB_BYTES));
++ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr + CCP_SB_BYTES);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++ tail =
++ (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++ }
++ /* Retrieve result */
++ pst.dest_addr = dest_addr;
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
++static int
+ ccp_perform_aes(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+ struct ccp_batch_info *b_info)
+@@ -643,8 +912,8 @@ ccp_crypto_cipher(struct rte_crypto_op *op,
+
+ static inline int
+ ccp_crypto_auth(struct rte_crypto_op *op,
+- struct ccp_queue *cmd_q __rte_unused,
+- struct ccp_batch_info *b_info __rte_unused)
++ struct ccp_queue *cmd_q,
++ struct ccp_batch_info *b_info)
+ {
+
+ int result = 0;
+@@ -655,6 +924,10 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_AES_CMAC:
++ result = ccp_perform_aes_cmac(op, cmd_q);
++ b_info->desccnt += 4;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported auth algo %d",
+ session->auth.algo);
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 3a16be8..1fb6a6d 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -39,6 +39,26 @@
+ #include "ccp_crypto.h"
+
+ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
++ { /*AES-CMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_AES_CMAC,
++ .block_size = 16,
++ .key_size = {
++ .min = 16,
++ .max = 32,
++ .increment = 8
++ },
++ .digest_size = {
++ .min = 16,
++ .max = 16,
++ .increment = 0
++ },
++ }, }
++ }, }
++ },
+ { /* AES ECB */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-13-20-crypto-ccp-support-aes-gcm-aead-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-13-20-crypto-ccp-support-aes-gcm-aead-algo.patch
new file mode 100644
index 00000000..036dcc17
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-13-20-crypto-ccp-support-aes-gcm-aead-algo.patch
@@ -0,0 +1,357 @@
+From patchwork Fri Mar 9 08:35:13 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,13/20] crypto/ccp: support aes-gcm aead algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35812
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-13-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:13 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 235 ++++++++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_pmd_ops.c | 30 +++++
+ 2 files changed, 261 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index 6e593d8..ad9fa8e 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -299,6 +299,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
+ const struct rte_crypto_sym_xform *xform)
+ {
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
++ size_t i;
+
+ aead_xform = &xform->aead;
+
+@@ -313,6 +314,7 @@ ccp_configure_session_aead(struct ccp_session *sess,
+ sess->cipher.dir = CCP_CIPHER_DIR_DECRYPT;
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ }
++ sess->aead_algo = aead_xform->algo;
+ sess->auth.aad_length = aead_xform->aad_length;
+ sess->auth.digest_length = aead_xform->digest_length;
+
+@@ -321,10 +323,37 @@ ccp_configure_session_aead(struct ccp_session *sess,
+ sess->iv.length = aead_xform->iv.length;
+
+ switch (aead_xform->algo) {
++ case RTE_CRYPTO_AEAD_AES_GCM:
++ sess->cipher.algo = CCP_CIPHER_ALGO_AES_GCM;
++ sess->cipher.um.aes_mode = CCP_AES_MODE_GCTR;
++ sess->cipher.engine = CCP_ENGINE_AES;
++ if (sess->cipher.key_length == 16)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_128;
++ else if (sess->cipher.key_length == 24)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_192;
++ else if (sess->cipher.key_length == 32)
++ sess->cipher.ut.aes_type = CCP_AES_TYPE_256;
++ else {
++ CCP_LOG_ERR("Invalid aead key length");
++ return -1;
++ }
++ for (i = 0; i < sess->cipher.key_length; i++)
++ sess->cipher.key_ccp[sess->cipher.key_length - i - 1] =
++ sess->cipher.key[i];
++ sess->auth.algo = CCP_AUTH_ALGO_AES_GCM;
++ sess->auth.engine = CCP_ENGINE_AES;
++ sess->auth.um.aes_mode = CCP_AES_MODE_GHASH;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = 0;
++ sess->auth.block_size = AES_BLOCK_SIZE;
++ sess->cmd_id = CCP_CMD_COMBINED;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo");
+ return -ENOTSUP;
+ }
++ sess->cipher.nonce_phys = rte_mem_virt2phy(sess->cipher.nonce);
++ sess->cipher.key_phys = rte_mem_virt2phy(sess->cipher.key_ccp);
+ return 0;
+ }
+
+@@ -447,10 +476,27 @@ ccp_aead_slot(struct ccp_session *session)
+ int count = 0;
+
+ switch (session->aead_algo) {
++ case RTE_CRYPTO_AEAD_AES_GCM:
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+ }
++ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_AES_GCM:
++ count = 5;
++ /**
++ * 1. Passthru iv
++ * 2. Hash AAD
++ * 3. GCTR
++ * 4. Reload passthru
++ * 5. Hash Final tag
++ */
++ break;
++ default:
++ CCP_LOG_ERR("Unsupported combined auth ALGO %d",
++ session->auth.algo);
++ }
+ return count;
+ }
+
+@@ -873,6 +919,179 @@ ccp_perform_3des(struct rte_crypto_op *op,
+ return 0;
+ }
+
++static int
++ccp_perform_aes_gcm(struct rte_crypto_op *op, struct ccp_queue *cmd_q)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ uint8_t *iv;
++ struct ccp_passthru pst;
++ struct ccp_desc *desc;
++ uint32_t tail;
++ uint64_t *temp;
++ phys_addr_t src_addr, dest_addr, key_addr, aad_addr;
++ phys_addr_t digest_dest_addr;
++ int length, non_align_len;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++ iv = rte_crypto_op_ctod_offset(op, uint8_t *, session->iv.offset);
++ key_addr = session->cipher.key_phys;
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->aead.data.offset);
++ if (unlikely(op->sym->m_dst != NULL))
++ dest_addr = rte_pktmbuf_mtophys_offset(op->sym->m_dst,
++ op->sym->aead.data.offset);
++ else
++ dest_addr = src_addr;
++ rte_pktmbuf_append(op->sym->m_src, session->auth.ctx_len);
++ digest_dest_addr = op->sym->aead.digest.phys_addr;
++ temp = (uint64_t *)(op->sym->aead.digest.data + AES_BLOCK_SIZE);
++ *temp++ = rte_bswap64(session->auth.aad_length << 3);
++ *temp = rte_bswap64(op->sym->aead.data.length << 3);
++
++ non_align_len = op->sym->aead.data.length % AES_BLOCK_SIZE;
++ length = CCP_ALIGN(op->sym->aead.data.length, AES_BLOCK_SIZE);
++
++ aad_addr = op->sym->aead.aad.phys_addr;
++
++ /* CMD1 IV Passthru */
++ rte_memcpy(session->cipher.nonce + AES_BLOCK_SIZE, iv,
++ session->iv.length);
++ pst.src_addr = session->cipher.nonce_phys;
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ /* CMD2 GHASH-AAD */
++ function.raw = 0;
++ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_AAD;
++ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
++ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = session->auth.aad_length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)aad_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(aad_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* CMD3 : GCTR Plain text */
++ function.raw = 0;
++ CCP_AES_ENCRYPT(&function) = session->cipher.dir;
++ CCP_AES_MODE(&function) = CCP_AES_MODE_GCTR;
++ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
++ if (non_align_len == 0)
++ CCP_AES_SIZE(&function) = (AES_BLOCK_SIZE << 3) - 1;
++ else
++ CCP_AES_SIZE(&function) = (non_align_len << 3) - 1;
++
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* CMD4 : PT to copy IV */
++ pst.src_addr = session->cipher.nonce_phys;
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_iv * CCP_SB_BYTES);
++ pst.len = AES_BLOCK_SIZE;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ /* CMD5 : GHASH-Final */
++ function.raw = 0;
++ CCP_AES_ENCRYPT(&function) = CCP_AES_MODE_GHASH_FINAL;
++ CCP_AES_MODE(&function) = CCP_AES_MODE_GHASH;
++ CCP_AES_TYPE(&function) = session->cipher.ut.aes_type;
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_AES;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++ /* Last block (AAD_len || PT_len)*/
++ CCP_CMD_LEN(desc) = AES_BLOCK_SIZE;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)digest_dest_addr + AES_BLOCK_SIZE);
++ CCP_CMD_SRC_HI(desc) = high32_value(digest_dest_addr + AES_BLOCK_SIZE);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = ((uint32_t)digest_dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(digest_dest_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)key_addr);
++ CCP_CMD_KEY_HI(desc) = high32_value(key_addr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_iv;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
+ static inline int
+ ccp_crypto_cipher(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q,
+@@ -939,17 +1158,25 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+
+ static inline int
+ ccp_crypto_aead(struct rte_crypto_op *op,
+- struct ccp_queue *cmd_q __rte_unused,
+- struct ccp_batch_info *b_info __rte_unused)
++ struct ccp_queue *cmd_q,
++ struct ccp_batch_info *b_info)
+ {
+ int result = 0;
+ struct ccp_session *session;
+
+ session = (struct ccp_session *)get_session_private_data(
+- op->sym->session,
++ op->sym->session,
+ ccp_cryptodev_driver_id);
+
+- switch (session->aead_algo) {
++ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_AES_GCM:
++ if (session->cipher.algo != CCP_CIPHER_ALGO_AES_GCM) {
++ CCP_LOG_ERR("Incorrect chain order");
++ return -1;
++ }
++ result = ccp_perform_aes_gcm(op, cmd_q);
++ b_info->desccnt += 5;
++ break;
+ default:
+ CCP_LOG_ERR("Unsupported aead algo %d",
+ session->aead_algo);
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 1fb6a6d..24f577a 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -139,6 +139,36 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* AES GCM */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AEAD,
++ {.aead = {
++ .algo = RTE_CRYPTO_AEAD_AES_GCM,
++ .block_size = 16,
++ .key_size = {
++ .min = 16,
++ .max = 32,
++ .increment = 8
++ },
++ .digest_size = {
++ .min = 16,
++ .max = 16,
++ .increment = 0
++ },
++ .aad_size = {
++ .min = 0,
++ .max = 65535,
++ .increment = 1
++ },
++ .iv_size = {
++ .min = 12,
++ .max = 16,
++ .increment = 4
++ },
++ }, }
++ }, }
++ },
+ RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST()
+ };
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-14-20-crypto-ccp-support-sha1-authentication-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-14-20-crypto-ccp-support-sha1-authentication-algo.patch
new file mode 100644
index 00000000..6114237e
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-14-20-crypto-ccp-support-sha1-authentication-algo.patch
@@ -0,0 +1,531 @@
+From patchwork Fri Mar 9 08:35:14 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,14/20] crypto/ccp: support sha1 authentication algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35813
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-14-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:14 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 367 +++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_crypto.h | 23 +++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 42 +++++
+ 3 files changed, 432 insertions(+)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index ad9fa8e..c575dc1 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -36,6 +36,7 @@
+ #include <sys/queue.h>
+ #include <sys/types.h>
+ #include <unistd.h>
++#include <openssl/sha.h>
+ #include <openssl/cmac.h> /*sub key apis*/
+ #include <openssl/evp.h> /*sub key apis*/
+
+@@ -52,6 +53,14 @@
+ #include "ccp_pci.h"
+ #include "ccp_pmd_private.h"
+
++/* SHA initial context values */
++static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
++ SHA1_H4, SHA1_H3,
++ SHA1_H2, SHA1_H1,
++ SHA1_H0, 0x0U,
++ 0x0U, 0x0U,
++};
++
+ static enum ccp_cmd_order
+ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+ {
+@@ -76,6 +85,59 @@ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+ return res;
+ }
+
++/* partial hash using openssl */
++static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
++{
++ SHA_CTX ctx;
++
++ if (!SHA1_Init(&ctx))
++ return -EFAULT;
++ SHA1_Transform(&ctx, data_in);
++ rte_memcpy(data_out, &ctx, SHA_DIGEST_LENGTH);
++ return 0;
++}
++
++static int generate_partial_hash(struct ccp_session *sess)
++{
++
++ uint8_t ipad[sess->auth.block_size];
++ uint8_t opad[sess->auth.block_size];
++ uint8_t *ipad_t, *opad_t;
++ uint32_t *hash_value_be32, hash_temp32[8];
++ int i, count;
++
++ opad_t = ipad_t = (uint8_t *)sess->auth.key;
++
++ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
++
++ /* considering key size is always equal to block size of algorithm */
++ for (i = 0; i < sess->auth.block_size; i++) {
++ ipad[i] = (ipad_t[i] ^ HMAC_IPAD_VALUE);
++ opad[i] = (opad_t[i] ^ HMAC_OPAD_VALUE);
++ }
++
++ switch (sess->auth.algo) {
++ case CCP_AUTH_ALGO_SHA1_HMAC:
++ count = SHA1_DIGEST_SIZE >> 2;
++
++ if (partial_hash_sha1(ipad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++
++ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha1(opad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++ return 0;
++ default:
++ CCP_LOG_ERR("Invalid auth algo");
++ return -1;
++ }
++}
++
+ /* prepare temporary keys K1 and K2 */
+ static void prepare_key(unsigned char *k, unsigned char *l, int bl)
+ {
+@@ -260,6 +322,31 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
++ case RTE_CRYPTO_AUTH_SHA1:
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
++ sess->auth.ctx = (void *)ccp_sha1_init;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA1_HMAC:
++ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
++ return -1;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_1;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
++ sess->auth.block_size = SHA1_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+@@ -453,6 +540,13 @@ ccp_auth_slot(struct ccp_session *session)
+ int count = 0;
+
+ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_SHA1:
++ count = 3;
++ /**< op + lsb passthrough cpy to/from*/
++ break;
++ case CCP_AUTH_ALGO_SHA1_HMAC:
++ count = 6;
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+@@ -578,6 +672,271 @@ ccp_perform_passthru(struct ccp_passthru *pst,
+ }
+
+ static int
++ccp_perform_hmac(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q)
++{
++
++ struct ccp_session *session;
++ union ccp_function function;
++ struct ccp_desc *desc;
++ uint32_t tail;
++ phys_addr_t src_addr, dest_addr, dest_addr_t;
++ struct ccp_passthru pst;
++ uint64_t auth_msg_bits;
++ void *append_ptr;
++ uint8_t *addr;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++ addr = session->auth.pre_compute;
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->auth.data.offset);
++ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
++ session->auth.ctx_len);
++ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
++ dest_addr_t = dest_addr;
++
++ /** Load PHash1 to LSB*/
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.len = session->auth.ctx_len;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ /**sha engine command descriptor for IntermediateHash*/
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++
++ CCP_CMD_SOC(desc) = 0;
++ CCP_CMD_IOC(desc) = 0;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_PROT(desc) = 0;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
++ auth_msg_bits = (op->sym->auth.data.length +
++ session->auth.block_size) * 8;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
++ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
++ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* Intermediate Hash value retrieve */
++ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
++ (session->auth.ut.sha_type == CCP_SHA_TYPE_512)) {
++
++ pst.src_addr =
++ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t;
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ } else {
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t;
++ pst.len = session->auth.ctx_len;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ }
++
++ /** Load PHash2 to LSB*/
++ addr += session->auth.ctx_len;
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)addr);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.len = session->auth.ctx_len;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ /**sha engine command descriptor for FinalHash*/
++ dest_addr_t += session->auth.offset;
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++
++ CCP_CMD_SOC(desc) = 0;
++ CCP_CMD_IOC(desc) = 0;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_PROT(desc) = 0;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = (session->auth.ctx_len -
++ session->auth.offset);
++ auth_msg_bits = (session->auth.block_size +
++ session->auth.ctx_len -
++ session->auth.offset) * 8;
++
++ CCP_CMD_SRC_LO(desc) = (uint32_t)(dest_addr_t);
++ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
++ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
++ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* Retrieve hmac output */
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr;
++ pst.len = session->auth.ctx_len;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
++ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ else
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++
++}
++
++static int
++ccp_perform_sha(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ struct ccp_desc *desc;
++ uint32_t tail;
++ phys_addr_t src_addr, dest_addr;
++ struct ccp_passthru pst;
++ void *append_ptr;
++ uint64_t auth_msg_bits;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->auth.data.offset);
++
++ append_ptr = (void *)rte_pktmbuf_append(op->sym->m_src,
++ session->auth.ctx_len);
++ dest_addr = (phys_addr_t)rte_mem_virt2phy(append_ptr);
++
++ /** Passthru sha context*/
++
++ pst.src_addr = (phys_addr_t)rte_mem_virt2phy((void *)
++ session->auth.ctx);
++ pst.dest_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.len = session->auth.ctx_len;
++ pst.dir = 1;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ /**prepare sha command descriptor*/
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++
++ CCP_CMD_SOC(desc) = 0;
++ CCP_CMD_IOC(desc) = 0;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++ CCP_CMD_PROT(desc) = 0;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
++ auth_msg_bits = op->sym->auth.data.length * 8;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_LSB_ID(desc) = cmd_q->sb_sha;
++ CCP_CMD_SHA_LO(desc) = ((uint32_t)auth_msg_bits);
++ CCP_CMD_SHA_HI(desc) = high32_value(auth_msg_bits);
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* Hash value retrieve */
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr;
++ pst.len = session->auth.ctx_len;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ if ((session->auth.ut.sha_type == CCP_SHA_TYPE_384) ||
++ (session->auth.ut.sha_type == CCP_SHA_TYPE_512))
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
++ else
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++
++}
++
++static int
+ ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+ {
+@@ -1143,6 +1502,14 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+ ccp_cryptodev_driver_id);
+
+ switch (session->auth.algo) {
++ case CCP_AUTH_ALGO_SHA1:
++ result = ccp_perform_sha(op, cmd_q);
++ b_info->desccnt += 3;
++ break;
++ case CCP_AUTH_ALGO_SHA1_HMAC:
++ result = ccp_perform_hmac(op, cmd_q);
++ b_info->desccnt += 6;
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index d528ec9..42179de 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -60,9 +60,32 @@
+ #define CCP_DES_ENCRYPT(p) ((p)->des.encrypt)
+ #define CCP_DES_MODE(p) ((p)->des.mode)
+ #define CCP_DES_TYPE(p) ((p)->des.type)
++#define CCP_SHA_TYPE(p) ((p)->sha.type)
+ #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap)
+ #define CCP_PT_BITWISE(p) ((p)->pt.bitwise)
+
++/* HMAC */
++#define HMAC_IPAD_VALUE 0x36
++#define HMAC_OPAD_VALUE 0x5c
++
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++#define MD5_DIGEST_SIZE 16
++#define MD5_BLOCK_SIZE 64
++#endif
++
++/* SHA */
++#define SHA_COMMON_DIGEST_SIZE 32
++#define SHA1_DIGEST_SIZE 20
++#define SHA1_BLOCK_SIZE 64
++
++/* SHA LSB intialiazation values */
++
++#define SHA1_H0 0x67452301UL
++#define SHA1_H1 0xefcdab89UL
++#define SHA1_H2 0x98badcfeUL
++#define SHA1_H3 0x10325476UL
++#define SHA1_H4 0xc3d2e1f0UL
++
+ /**
+ * CCP supported AES modes
+ */
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 24f577a..6adef1c 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -39,6 +39,48 @@
+ #include "ccp_crypto.h"
+
+ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
++ { /* SHA1 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA1,
++ .block_size = 64,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 20,
++ .max = 20,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA1 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA1_HMAC,
++ .block_size = 64,
++ .key_size = {
++ .min = 1,
++ .max = 64,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 20,
++ .max = 20,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /*AES-CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-15-20-crypto-ccp-support-sha2-family-authentication-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-15-20-crypto-ccp-support-sha2-family-authentication-algo.patch
new file mode 100644
index 00000000..da30fb84
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-15-20-crypto-ccp-support-sha2-family-authentication-algo.patch
@@ -0,0 +1,608 @@
+From patchwork Fri Mar 9 08:35:15 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 15/20] crypto/ccp: support sha2 family authentication algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35814
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-15-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:15 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 270 +++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/ccp_crypto.h | 48 +++++++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 168 ++++++++++++++++++++++++
+ 3 files changed, 486 insertions(+)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index c575dc1..410e8bf 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -61,6 +61,34 @@ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ 0x0U, 0x0U,
+ };
+
++uint32_t ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
++ SHA224_H7, SHA224_H6,
++ SHA224_H5, SHA224_H4,
++ SHA224_H3, SHA224_H2,
++ SHA224_H1, SHA224_H0,
++};
++
++uint32_t ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(uint32_t)] = {
++ SHA256_H7, SHA256_H6,
++ SHA256_H5, SHA256_H4,
++ SHA256_H3, SHA256_H2,
++ SHA256_H1, SHA256_H0,
++};
++
++uint64_t ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
++ SHA384_H7, SHA384_H6,
++ SHA384_H5, SHA384_H4,
++ SHA384_H3, SHA384_H2,
++ SHA384_H1, SHA384_H0,
++};
++
++uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
++ SHA512_H7, SHA512_H6,
++ SHA512_H5, SHA512_H4,
++ SHA512_H3, SHA512_H2,
++ SHA512_H1, SHA512_H0,
++};
++
+ static enum ccp_cmd_order
+ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+ {
+@@ -97,6 +125,54 @@ static int partial_hash_sha1(uint8_t *data_in, uint8_t *data_out)
+ return 0;
+ }
+
++static int partial_hash_sha224(uint8_t *data_in, uint8_t *data_out)
++{
++ SHA256_CTX ctx;
++
++ if (!SHA224_Init(&ctx))
++ return -EFAULT;
++ SHA256_Transform(&ctx, data_in);
++ rte_memcpy(data_out, &ctx,
++ SHA256_DIGEST_LENGTH);
++ return 0;
++}
++
++static int partial_hash_sha256(uint8_t *data_in, uint8_t *data_out)
++{
++ SHA256_CTX ctx;
++
++ if (!SHA256_Init(&ctx))
++ return -EFAULT;
++ SHA256_Transform(&ctx, data_in);
++ rte_memcpy(data_out, &ctx,
++ SHA256_DIGEST_LENGTH);
++ return 0;
++}
++
++static int partial_hash_sha384(uint8_t *data_in, uint8_t *data_out)
++{
++ SHA512_CTX ctx;
++
++ if (!SHA384_Init(&ctx))
++ return -EFAULT;
++ SHA512_Transform(&ctx, data_in);
++ rte_memcpy(data_out, &ctx,
++ SHA512_DIGEST_LENGTH);
++ return 0;
++}
++
++static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
++{
++ SHA512_CTX ctx;
++
++ if (!SHA512_Init(&ctx))
++ return -EFAULT;
++ SHA512_Transform(&ctx, data_in);
++ rte_memcpy(data_out, &ctx,
++ SHA512_DIGEST_LENGTH);
++ return 0;
++}
++
+ static int generate_partial_hash(struct ccp_session *sess)
+ {
+
+@@ -104,11 +180,13 @@ static int generate_partial_hash(struct ccp_session *sess)
+ uint8_t opad[sess->auth.block_size];
+ uint8_t *ipad_t, *opad_t;
+ uint32_t *hash_value_be32, hash_temp32[8];
++ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute);
++ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute);
+
+ /* considering key size is always equal to block size of algorithm */
+ for (i = 0; i < sess->auth.block_size; i++) {
+@@ -132,6 +210,66 @@ static int generate_partial_hash(struct ccp_session *sess)
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
++ case CCP_AUTH_ALGO_SHA224_HMAC:
++ count = SHA256_DIGEST_SIZE >> 2;
++
++ if (partial_hash_sha224(ipad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++
++ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha224(opad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++ return 0;
++ case CCP_AUTH_ALGO_SHA256_HMAC:
++ count = SHA256_DIGEST_SIZE >> 2;
++
++ if (partial_hash_sha256(ipad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++
++ hash_value_be32 = (uint32_t *)((uint8_t *)sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha256(opad, (uint8_t *)hash_temp32))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be32++)
++ *hash_value_be32 = hash_temp32[count - 1 - i];
++ return 0;
++ case CCP_AUTH_ALGO_SHA384_HMAC:
++ count = SHA512_DIGEST_SIZE >> 3;
++
++ if (partial_hash_sha384(ipad, (uint8_t *)hash_temp64))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be64++)
++ *hash_value_be64 = hash_temp64[count - 1 - i];
++
++ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha384(opad, (uint8_t *)hash_temp64))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be64++)
++ *hash_value_be64 = hash_temp64[count - 1 - i];
++ return 0;
++ case CCP_AUTH_ALGO_SHA512_HMAC:
++ count = SHA512_DIGEST_SIZE >> 3;
++
++ if (partial_hash_sha512(ipad, (uint8_t *)hash_temp64))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be64++)
++ *hash_value_be64 = hash_temp64[count - 1 - i];
++
++ hash_value_be64 = (uint64_t *)((uint8_t *)sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha512(opad, (uint8_t *)hash_temp64))
++ return -1;
++ for (i = 0; i < count; i++, hash_value_be64++)
++ *hash_value_be64 = hash_temp64[count - 1 - i];
++ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+@@ -347,6 +485,107 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
++ case RTE_CRYPTO_AUTH_SHA224:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
++ sess->auth.ctx = (void *)ccp_sha224_init;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA224_HMAC:
++ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_224;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
++ sess->auth.block_size = SHA224_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
++ case RTE_CRYPTO_AUTH_SHA256:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
++ sess->auth.ctx = (void *)ccp_sha256_init;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA256_HMAC:
++ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_256;
++ sess->auth.ctx_len = CCP_SB_BYTES;
++ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
++ sess->auth.block_size = SHA256_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
++ case RTE_CRYPTO_AUTH_SHA384:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
++ sess->auth.ctx = (void *)ccp_sha384_init;
++ sess->auth.ctx_len = CCP_SB_BYTES << 1;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA384_HMAC:
++ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_384;
++ sess->auth.ctx_len = CCP_SB_BYTES << 1;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
++ sess->auth.block_size = SHA384_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
++ case RTE_CRYPTO_AUTH_SHA512:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
++ sess->auth.ctx = (void *)ccp_sha512_init;
++ sess->auth.ctx_len = CCP_SB_BYTES << 1;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA512_HMAC:
++ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA_TYPE_512;
++ sess->auth.ctx_len = CCP_SB_BYTES << 1;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
++ sess->auth.block_size = SHA512_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, sess->auth.ctx_len << 1);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
++
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+@@ -541,12 +780,32 @@ ccp_auth_slot(struct ccp_session *session)
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
++ case CCP_AUTH_ALGO_SHA224:
++ case CCP_AUTH_ALGO_SHA256:
++ case CCP_AUTH_ALGO_SHA384:
++ case CCP_AUTH_ALGO_SHA512:
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
++ case CCP_AUTH_ALGO_SHA224_HMAC:
++ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = 6;
+ break;
++ case CCP_AUTH_ALGO_SHA384_HMAC:
++ case CCP_AUTH_ALGO_SHA512_HMAC:
++ count = 7;
++ /**
++ * 1. Load PHash1 = H(k ^ ipad); to LSB
++ * 2. generate IHash = H(hash on meassage with PHash1
++ * as init values);
++ * 3. Retrieve IHash 2 slots for 384/512
++ * 4. Load Phash2 = H(k ^ opad); to LSB
++ * 5. generate FHash = H(hash on Ihash with Phash2
++ * as init value);
++ * 6. Retrieve HMAC output from LSB to host memory
++ */
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+@@ -1503,13 +1762,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+
+ switch (session->auth.algo) {
+ case CCP_AUTH_ALGO_SHA1:
++ case CCP_AUTH_ALGO_SHA224:
++ case CCP_AUTH_ALGO_SHA256:
++ case CCP_AUTH_ALGO_SHA384:
++ case CCP_AUTH_ALGO_SHA512:
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
+ case CCP_AUTH_ALGO_SHA1_HMAC:
++ case CCP_AUTH_ALGO_SHA224_HMAC:
++ case CCP_AUTH_ALGO_SHA256_HMAC:
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ break;
++ case CCP_AUTH_ALGO_SHA384_HMAC:
++ case CCP_AUTH_ALGO_SHA512_HMAC:
++ result = ccp_perform_hmac(op, cmd_q);
++ b_info->desccnt += 7;
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index 42179de..ca1c1a8 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -78,6 +78,18 @@
+ #define SHA1_DIGEST_SIZE 20
+ #define SHA1_BLOCK_SIZE 64
+
++#define SHA224_DIGEST_SIZE 28
++#define SHA224_BLOCK_SIZE 64
++
++#define SHA256_DIGEST_SIZE 32
++#define SHA256_BLOCK_SIZE 64
++
++#define SHA384_DIGEST_SIZE 48
++#define SHA384_BLOCK_SIZE 128
++
++#define SHA512_DIGEST_SIZE 64
++#define SHA512_BLOCK_SIZE 128
++
+ /* SHA LSB intialiazation values */
+
+ #define SHA1_H0 0x67452301UL
+@@ -86,6 +98,42 @@
+ #define SHA1_H3 0x10325476UL
+ #define SHA1_H4 0xc3d2e1f0UL
+
++#define SHA224_H0 0xc1059ed8UL
++#define SHA224_H1 0x367cd507UL
++#define SHA224_H2 0x3070dd17UL
++#define SHA224_H3 0xf70e5939UL
++#define SHA224_H4 0xffc00b31UL
++#define SHA224_H5 0x68581511UL
++#define SHA224_H6 0x64f98fa7UL
++#define SHA224_H7 0xbefa4fa4UL
++
++#define SHA256_H0 0x6a09e667UL
++#define SHA256_H1 0xbb67ae85UL
++#define SHA256_H2 0x3c6ef372UL
++#define SHA256_H3 0xa54ff53aUL
++#define SHA256_H4 0x510e527fUL
++#define SHA256_H5 0x9b05688cUL
++#define SHA256_H6 0x1f83d9abUL
++#define SHA256_H7 0x5be0cd19UL
++
++#define SHA384_H0 0xcbbb9d5dc1059ed8ULL
++#define SHA384_H1 0x629a292a367cd507ULL
++#define SHA384_H2 0x9159015a3070dd17ULL
++#define SHA384_H3 0x152fecd8f70e5939ULL
++#define SHA384_H4 0x67332667ffc00b31ULL
++#define SHA384_H5 0x8eb44a8768581511ULL
++#define SHA384_H6 0xdb0c2e0d64f98fa7ULL
++#define SHA384_H7 0x47b5481dbefa4fa4ULL
++
++#define SHA512_H0 0x6a09e667f3bcc908ULL
++#define SHA512_H1 0xbb67ae8584caa73bULL
++#define SHA512_H2 0x3c6ef372fe94f82bULL
++#define SHA512_H3 0xa54ff53a5f1d36f1ULL
++#define SHA512_H4 0x510e527fade682d1ULL
++#define SHA512_H5 0x9b05688c2b3e6c1fULL
++#define SHA512_H6 0x1f83d9abfb41bd6bULL
++#define SHA512_H7 0x5be0cd19137e2179ULL
++
+ /**
+ * CCP supported AES modes
+ */
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 6adef1c..ab6199f 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -81,6 +81,174 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* SHA224 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA224,
++ .block_size = 64,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 28,
++ .max = 28,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA224 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA224_HMAC,
++ .block_size = 64,
++ .key_size = {
++ .min = 1,
++ .max = 64,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 28,
++ .max = 28,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA256 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA256,
++ .block_size = 64,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 32,
++ .max = 32,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA256 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA256_HMAC,
++ .block_size = 64,
++ .key_size = {
++ .min = 1,
++ .max = 64,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 32,
++ .max = 32,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA384 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA384,
++ .block_size = 128,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 48,
++ .max = 48,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA384 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA384_HMAC,
++ .block_size = 128,
++ .key_size = {
++ .min = 1,
++ .max = 128,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 48,
++ .max = 48,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA512 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA512,
++ .block_size = 128,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 64,
++ .max = 64,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA512 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA512_HMAC,
++ .block_size = 128,
++ .key_size = {
++ .min = 1,
++ .max = 128,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 64,
++ .max = 64,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /*AES-CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-16-20-crypto-ccp-support-sha3-family-authentication-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-16-20-crypto-ccp-support-sha3-family-authentication-algo.patch
new file mode 100644
index 00000000..055f511f
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-16-20-crypto-ccp-support-sha3-family-authentication-algo.patch
@@ -0,0 +1,1067 @@
+From patchwork Fri Mar 9 08:35:16 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 16/20] crypto/ccp: support sha3 family authentication algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35815
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-16-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:16 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ drivers/crypto/ccp/ccp_crypto.c | 667 +++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_crypto.h | 22 ++
+ drivers/crypto/ccp/ccp_pmd_ops.c | 168 +++++++++
+ lib/librte_cryptodev/rte_crypto_sym.h | 17 +
+ 4 files changed, 873 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index 410e8bf..cb63bc6 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -89,6 +89,74 @@ uint64_t ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(uint64_t)] = {
+ SHA512_H1, SHA512_H0,
+ };
+
++#if defined(_MSC_VER)
++#define SHA3_CONST(x) x
++#else
++#define SHA3_CONST(x) x##L
++#endif
++
++/** 'Words' here refers to uint64_t */
++#define SHA3_KECCAK_SPONGE_WORDS \
++ (((1600) / 8) / sizeof(uint64_t))
++typedef struct sha3_context_ {
++ uint64_t saved;
++ /**
++ * The portion of the input message that we
++ * didn't consume yet
++ */
++ union {
++ uint64_t s[SHA3_KECCAK_SPONGE_WORDS];
++ /* Keccak's state */
++ uint8_t sb[SHA3_KECCAK_SPONGE_WORDS * 8];
++ /**total 200 ctx size**/
++ };
++ unsigned int byteIndex;
++ /**
++ * 0..7--the next byte after the set one
++ * (starts from 0; 0--none are buffered)
++ */
++ unsigned int wordIndex;
++ /**
++ * 0..24--the next word to integrate input
++ * (starts from 0)
++ */
++ unsigned int capacityWords;
++ /**
++ * the double size of the hash output in
++ * words (e.g. 16 for Keccak 512)
++ */
++} sha3_context;
++
++#ifndef SHA3_ROTL64
++#define SHA3_ROTL64(x, y) \
++ (((x) << (y)) | ((x) >> ((sizeof(uint64_t)*8) - (y))))
++#endif
++
++static const uint64_t keccakf_rndc[24] = {
++ SHA3_CONST(0x0000000000000001UL), SHA3_CONST(0x0000000000008082UL),
++ SHA3_CONST(0x800000000000808aUL), SHA3_CONST(0x8000000080008000UL),
++ SHA3_CONST(0x000000000000808bUL), SHA3_CONST(0x0000000080000001UL),
++ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008009UL),
++ SHA3_CONST(0x000000000000008aUL), SHA3_CONST(0x0000000000000088UL),
++ SHA3_CONST(0x0000000080008009UL), SHA3_CONST(0x000000008000000aUL),
++ SHA3_CONST(0x000000008000808bUL), SHA3_CONST(0x800000000000008bUL),
++ SHA3_CONST(0x8000000000008089UL), SHA3_CONST(0x8000000000008003UL),
++ SHA3_CONST(0x8000000000008002UL), SHA3_CONST(0x8000000000000080UL),
++ SHA3_CONST(0x000000000000800aUL), SHA3_CONST(0x800000008000000aUL),
++ SHA3_CONST(0x8000000080008081UL), SHA3_CONST(0x8000000000008080UL),
++ SHA3_CONST(0x0000000080000001UL), SHA3_CONST(0x8000000080008008UL)
++};
++
++static const unsigned int keccakf_rotc[24] = {
++ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 2, 14, 27, 41, 56, 8, 25, 43, 62,
++ 18, 39, 61, 20, 44
++};
++
++static const unsigned int keccakf_piln[24] = {
++ 10, 7, 11, 17, 18, 3, 5, 16, 8, 21, 24, 4, 15, 23, 19, 13, 12, 2, 20,
++ 14, 22, 9, 6, 1
++};
++
+ static enum ccp_cmd_order
+ ccp_get_cmd_id(const struct rte_crypto_sym_xform *xform)
+ {
+@@ -173,6 +241,223 @@ static int partial_hash_sha512(uint8_t *data_in, uint8_t *data_out)
+ return 0;
+ }
+
++static void
++keccakf(uint64_t s[25])
++{
++ int i, j, round;
++ uint64_t t, bc[5];
++#define KECCAK_ROUNDS 24
++
++ for (round = 0; round < KECCAK_ROUNDS; round++) {
++
++ /* Theta */
++ for (i = 0; i < 5; i++)
++ bc[i] = s[i] ^ s[i + 5] ^ s[i + 10] ^ s[i + 15] ^
++ s[i + 20];
++
++ for (i = 0; i < 5; i++) {
++ t = bc[(i + 4) % 5] ^ SHA3_ROTL64(bc[(i + 1) % 5], 1);
++ for (j = 0; j < 25; j += 5)
++ s[j + i] ^= t;
++ }
++
++ /* Rho Pi */
++ t = s[1];
++ for (i = 0; i < 24; i++) {
++ j = keccakf_piln[i];
++ bc[0] = s[j];
++ s[j] = SHA3_ROTL64(t, keccakf_rotc[i]);
++ t = bc[0];
++ }
++
++ /* Chi */
++ for (j = 0; j < 25; j += 5) {
++ for (i = 0; i < 5; i++)
++ bc[i] = s[j + i];
++ for (i = 0; i < 5; i++)
++ s[j + i] ^= (~bc[(i + 1) % 5]) &
++ bc[(i + 2) % 5];
++ }
++
++ /* Iota */
++ s[0] ^= keccakf_rndc[round];
++ }
++}
++
++static void
++sha3_Init224(void *priv)
++{
++ sha3_context *ctx = (sha3_context *) priv;
++
++ memset(ctx, 0, sizeof(*ctx));
++ ctx->capacityWords = 2 * 224 / (8 * sizeof(uint64_t));
++}
++
++static void
++sha3_Init256(void *priv)
++{
++ sha3_context *ctx = (sha3_context *) priv;
++
++ memset(ctx, 0, sizeof(*ctx));
++ ctx->capacityWords = 2 * 256 / (8 * sizeof(uint64_t));
++}
++
++static void
++sha3_Init384(void *priv)
++{
++ sha3_context *ctx = (sha3_context *) priv;
++
++ memset(ctx, 0, sizeof(*ctx));
++ ctx->capacityWords = 2 * 384 / (8 * sizeof(uint64_t));
++}
++
++static void
++sha3_Init512(void *priv)
++{
++ sha3_context *ctx = (sha3_context *) priv;
++
++ memset(ctx, 0, sizeof(*ctx));
++ ctx->capacityWords = 2 * 512 / (8 * sizeof(uint64_t));
++}
++
++
++/* This is simply the 'update' with the padding block.
++ * The padding block is 0x01 || 0x00* || 0x80. First 0x01 and last 0x80
++ * bytes are always present, but they can be the same byte.
++ */
++static void
++sha3_Update(void *priv, void const *bufIn, size_t len)
++{
++ sha3_context *ctx = (sha3_context *) priv;
++ unsigned int old_tail = (8 - ctx->byteIndex) & 7;
++ size_t words;
++ unsigned int tail;
++ size_t i;
++ const uint8_t *buf = bufIn;
++
++ if (len < old_tail) {
++ while (len--)
++ ctx->saved |= (uint64_t) (*(buf++)) <<
++ ((ctx->byteIndex++) * 8);
++ return;
++ }
++
++ if (old_tail) {
++ len -= old_tail;
++ while (old_tail--)
++ ctx->saved |= (uint64_t) (*(buf++)) <<
++ ((ctx->byteIndex++) * 8);
++
++ ctx->s[ctx->wordIndex] ^= ctx->saved;
++ ctx->byteIndex = 0;
++ ctx->saved = 0;
++ if (++ctx->wordIndex ==
++ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
++ keccakf(ctx->s);
++ ctx->wordIndex = 0;
++ }
++ }
++
++ words = len / sizeof(uint64_t);
++ tail = len - words * sizeof(uint64_t);
++
++ for (i = 0; i < words; i++, buf += sizeof(uint64_t)) {
++ const uint64_t t = (uint64_t) (buf[0]) |
++ ((uint64_t) (buf[1]) << 8 * 1) |
++ ((uint64_t) (buf[2]) << 8 * 2) |
++ ((uint64_t) (buf[3]) << 8 * 3) |
++ ((uint64_t) (buf[4]) << 8 * 4) |
++ ((uint64_t) (buf[5]) << 8 * 5) |
++ ((uint64_t) (buf[6]) << 8 * 6) |
++ ((uint64_t) (buf[7]) << 8 * 7);
++ ctx->s[ctx->wordIndex] ^= t;
++ if (++ctx->wordIndex ==
++ (SHA3_KECCAK_SPONGE_WORDS - ctx->capacityWords)) {
++ keccakf(ctx->s);
++ ctx->wordIndex = 0;
++ }
++ }
++
++ while (tail--)
++ ctx->saved |= (uint64_t) (*(buf++)) << ((ctx->byteIndex++) * 8);
++}
++
++int partial_hash_sha3_224(uint8_t *data_in, uint8_t *data_out)
++{
++ sha3_context *ctx;
++ int i;
++
++ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
++ if (!ctx) {
++ CCP_LOG_ERR("sha3-ctx creation failed");
++ return -ENOMEM;
++ }
++ sha3_Init224(ctx);
++ sha3_Update(ctx, data_in, SHA3_224_BLOCK_SIZE);
++ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
++ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
++ rte_free(ctx);
++
++ return 0;
++}
++
++int partial_hash_sha3_256(uint8_t *data_in, uint8_t *data_out)
++{
++ sha3_context *ctx;
++ int i;
++
++ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
++ if (!ctx) {
++ CCP_LOG_ERR("sha3-ctx creation failed");
++ return -ENOMEM;
++ }
++ sha3_Init256(ctx);
++ sha3_Update(ctx, data_in, SHA3_256_BLOCK_SIZE);
++ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
++ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
++ rte_free(ctx);
++
++ return 0;
++}
++
++int partial_hash_sha3_384(uint8_t *data_in, uint8_t *data_out)
++{
++ sha3_context *ctx;
++ int i;
++
++ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
++ if (!ctx) {
++ CCP_LOG_ERR("sha3-ctx creation failed");
++ return -ENOMEM;
++ }
++ sha3_Init384(ctx);
++ sha3_Update(ctx, data_in, SHA3_384_BLOCK_SIZE);
++ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
++ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
++ rte_free(ctx);
++
++ return 0;
++}
++
++int partial_hash_sha3_512(uint8_t *data_in, uint8_t *data_out)
++{
++ sha3_context *ctx;
++ int i;
++
++ ctx = rte_zmalloc("sha3-ctx", sizeof(sha3_context), 0);
++ if (!ctx) {
++ CCP_LOG_ERR("sha3-ctx creation failed");
++ return -ENOMEM;
++ }
++ sha3_Init512(ctx);
++ sha3_Update(ctx, data_in, SHA3_512_BLOCK_SIZE);
++ for (i = 0; i < CCP_SHA3_CTX_SIZE; i++, data_out++)
++ *data_out = ctx->sb[CCP_SHA3_CTX_SIZE - i - 1];
++ rte_free(ctx);
++
++ return 0;
++}
++
+ static int generate_partial_hash(struct ccp_session *sess)
+ {
+
+@@ -182,6 +467,7 @@ static int generate_partial_hash(struct ccp_session *sess)
+ uint32_t *hash_value_be32, hash_temp32[8];
+ uint64_t *hash_value_be64, hash_temp64[8];
+ int i, count;
++ uint8_t *hash_value_sha3;
+
+ opad_t = ipad_t = (uint8_t *)sess->auth.key;
+
+@@ -225,6 +511,16 @@ static int generate_partial_hash(struct ccp_session *sess)
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
++ case CCP_AUTH_ALGO_SHA3_224_HMAC:
++ hash_value_sha3 = sess->auth.pre_compute;
++ if (partial_hash_sha3_224(ipad, hash_value_sha3))
++ return -1;
++
++ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha3_224(opad, hash_value_sha3))
++ return -1;
++ return 0;
+ case CCP_AUTH_ALGO_SHA256_HMAC:
+ count = SHA256_DIGEST_SIZE >> 2;
+
+@@ -240,6 +536,16 @@ static int generate_partial_hash(struct ccp_session *sess)
+ for (i = 0; i < count; i++, hash_value_be32++)
+ *hash_value_be32 = hash_temp32[count - 1 - i];
+ return 0;
++ case CCP_AUTH_ALGO_SHA3_256_HMAC:
++ hash_value_sha3 = sess->auth.pre_compute;
++ if (partial_hash_sha3_256(ipad, hash_value_sha3))
++ return -1;
++
++ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha3_256(opad, hash_value_sha3))
++ return -1;
++ return 0;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+@@ -255,6 +561,16 @@ static int generate_partial_hash(struct ccp_session *sess)
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
++ case CCP_AUTH_ALGO_SHA3_384_HMAC:
++ hash_value_sha3 = sess->auth.pre_compute;
++ if (partial_hash_sha3_384(ipad, hash_value_sha3))
++ return -1;
++
++ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha3_384(opad, hash_value_sha3))
++ return -1;
++ return 0;
+ case CCP_AUTH_ALGO_SHA512_HMAC:
+ count = SHA512_DIGEST_SIZE >> 3;
+
+@@ -270,6 +586,16 @@ static int generate_partial_hash(struct ccp_session *sess)
+ for (i = 0; i < count; i++, hash_value_be64++)
+ *hash_value_be64 = hash_temp64[count - 1 - i];
+ return 0;
++ case CCP_AUTH_ALGO_SHA3_512_HMAC:
++ hash_value_sha3 = sess->auth.pre_compute;
++ if (partial_hash_sha3_512(ipad, hash_value_sha3))
++ return -1;
++
++ hash_value_sha3 = (uint8_t *)(sess->auth.pre_compute
++ + sess->auth.ctx_len);
++ if (partial_hash_sha3_512(opad, hash_value_sha3))
++ return -1;
++ return 0;
+ default:
+ CCP_LOG_ERR("Invalid auth algo");
+ return -1;
+@@ -510,6 +836,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
++ case RTE_CRYPTO_AUTH_SHA3_224:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA3_224_HMAC:
++ if (auth_xform->key.length > SHA3_224_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_224;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA224_DIGEST_SIZE;
++ sess->auth.block_size = SHA3_224_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
+ case RTE_CRYPTO_AUTH_SHA256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256;
+ sess->auth.engine = CCP_ENGINE_SHA;
+@@ -535,6 +885,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
++ case RTE_CRYPTO_AUTH_SHA3_256:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA3_256_HMAC:
++ if (auth_xform->key.length > SHA3_256_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_256;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA256_DIGEST_SIZE;
++ sess->auth.block_size = SHA3_256_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
+ case RTE_CRYPTO_AUTH_SHA384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384;
+ sess->auth.engine = CCP_ENGINE_SHA;
+@@ -560,6 +934,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
++ case RTE_CRYPTO_AUTH_SHA3_384:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA3_384_HMAC:
++ if (auth_xform->key.length > SHA3_384_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_384;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA384_DIGEST_SIZE;
++ sess->auth.block_size = SHA3_384_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
+ case RTE_CRYPTO_AUTH_SHA512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512;
+ sess->auth.engine = CCP_ENGINE_SHA;
+@@ -585,7 +983,30 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ if (generate_partial_hash(sess))
+ return -1;
+ break;
+-
++ case RTE_CRYPTO_AUTH_SHA3_512:
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
++ break;
++ case RTE_CRYPTO_AUTH_SHA3_512_HMAC:
++ if (auth_xform->key.length > SHA3_512_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512_HMAC;
++ sess->auth.engine = CCP_ENGINE_SHA;
++ sess->auth.ut.sha_type = CCP_SHA3_TYPE_512;
++ sess->auth.ctx_len = CCP_SHA3_CTX_SIZE;
++ sess->auth.offset = CCP_SHA3_CTX_SIZE - SHA512_DIGEST_SIZE;
++ sess->auth.block_size = SHA3_512_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ memset(sess->auth.pre_compute, 0, 2 * sess->auth.ctx_len);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ if (generate_partial_hash(sess))
++ return -1;
++ break;
+ case RTE_CRYPTO_AUTH_AES_CMAC:
+ sess->auth.algo = CCP_AUTH_ALGO_AES_CMAC;
+ sess->auth.engine = CCP_ENGINE_AES;
+@@ -806,6 +1227,26 @@ ccp_auth_slot(struct ccp_session *session)
+ * 6. Retrieve HMAC output from LSB to host memory
+ */
+ break;
++ case CCP_AUTH_ALGO_SHA3_224:
++ case CCP_AUTH_ALGO_SHA3_256:
++ case CCP_AUTH_ALGO_SHA3_384:
++ case CCP_AUTH_ALGO_SHA3_512:
++ count = 1;
++ /**< only op ctx and dst in host memory*/
++ break;
++ case CCP_AUTH_ALGO_SHA3_224_HMAC:
++ case CCP_AUTH_ALGO_SHA3_256_HMAC:
++ count = 3;
++ break;
++ case CCP_AUTH_ALGO_SHA3_384_HMAC:
++ case CCP_AUTH_ALGO_SHA3_512_HMAC:
++ count = 4;
++ /**
++ * 1. Op to Perform Ihash
++ * 2. Retrieve result from LSB to host memory
++ * 3. Perform final hash
++ */
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ count = 4;
+ /**
+@@ -1196,6 +1637,213 @@ ccp_perform_sha(struct rte_crypto_op *op,
+ }
+
+ static int
++ccp_perform_sha3_hmac(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q)
++{
++ struct ccp_session *session;
++ struct ccp_passthru pst;
++ union ccp_function function;
++ struct ccp_desc *desc;
++ uint8_t *append_ptr;
++ uint32_t tail;
++ phys_addr_t src_addr, dest_addr, ctx_paddr, dest_addr_t;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->auth.data.offset);
++ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
++ session->auth.ctx_len);
++ if (!append_ptr) {
++ CCP_LOG_ERR("CCP MBUF append failed\n");
++ return -1;
++ }
++ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
++ dest_addr_t = dest_addr + (session->auth.ctx_len / 2);
++ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void
++ *)session->auth.pre_compute);
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ /*desc1 for SHA3-Ihash operation */
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = (cmd_q->sb_sha * CCP_SB_BYTES);
++ CCP_CMD_DST_HI(desc) = 0;
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SB;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
++ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ /* Intermediate Hash value retrieve */
++ if ((session->auth.ut.sha_type == CCP_SHA3_TYPE_384) ||
++ (session->auth.ut.sha_type == CCP_SHA3_TYPE_512)) {
++
++ pst.src_addr =
++ (phys_addr_t)((cmd_q->sb_sha + 1) * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t;
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t + CCP_SB_BYTES;
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++
++ } else {
++ pst.src_addr = (phys_addr_t)(cmd_q->sb_sha * CCP_SB_BYTES);
++ pst.dest_addr = dest_addr_t;
++ pst.len = CCP_SB_BYTES;
++ pst.dir = 0;
++ pst.bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
++ pst.byte_swap = CCP_PASSTHRU_BYTESWAP_256BIT;
++ ccp_perform_passthru(&pst, cmd_q);
++ }
++
++ /**sha engine command descriptor for FinalHash*/
++ ctx_paddr += CCP_SHA3_CTX_SIZE;
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ if (session->auth.ut.sha_type == CCP_SHA3_TYPE_224) {
++ dest_addr_t += (CCP_SB_BYTES - SHA224_DIGEST_SIZE);
++ CCP_CMD_LEN(desc) = SHA224_DIGEST_SIZE;
++ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_256) {
++ CCP_CMD_LEN(desc) = SHA256_DIGEST_SIZE;
++ } else if (session->auth.ut.sha_type == CCP_SHA3_TYPE_384) {
++ dest_addr_t += (2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE);
++ CCP_CMD_LEN(desc) = SHA384_DIGEST_SIZE;
++ } else {
++ CCP_CMD_LEN(desc) = SHA512_DIGEST_SIZE;
++ }
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)dest_addr_t);
++ CCP_CMD_SRC_HI(desc) = high32_value(dest_addr_t);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = (uint32_t)dest_addr;
++ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
++ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
++static int
++ccp_perform_sha3(struct rte_crypto_op *op,
++ struct ccp_queue *cmd_q)
++{
++ struct ccp_session *session;
++ union ccp_function function;
++ struct ccp_desc *desc;
++ uint8_t *ctx_addr, *append_ptr;
++ uint32_t tail;
++ phys_addr_t src_addr, dest_addr, ctx_paddr;
++
++ session = (struct ccp_session *)get_session_private_data(
++ op->sym->session,
++ ccp_cryptodev_driver_id);
++
++ src_addr = rte_pktmbuf_mtophys_offset(op->sym->m_src,
++ op->sym->auth.data.offset);
++ append_ptr = (uint8_t *)rte_pktmbuf_append(op->sym->m_src,
++ session->auth.ctx_len);
++ if (!append_ptr) {
++ CCP_LOG_ERR("CCP MBUF append failed\n");
++ return -1;
++ }
++ dest_addr = (phys_addr_t)rte_mem_virt2phy((void *)append_ptr);
++ ctx_addr = session->auth.sha3_ctx;
++ ctx_paddr = (phys_addr_t)rte_mem_virt2phy((void *)ctx_addr);
++
++ desc = &cmd_q->qbase_desc[cmd_q->qidx];
++ memset(desc, 0, Q_DESC_SIZE);
++
++ /* prepare desc for SHA3 operation */
++ CCP_CMD_ENGINE(desc) = CCP_ENGINE_SHA;
++ CCP_CMD_INIT(desc) = 1;
++ CCP_CMD_EOM(desc) = 1;
++
++ function.raw = 0;
++ CCP_SHA_TYPE(&function) = session->auth.ut.sha_type;
++ CCP_CMD_FUNCTION(desc) = function.raw;
++
++ CCP_CMD_LEN(desc) = op->sym->auth.data.length;
++
++ CCP_CMD_SRC_LO(desc) = ((uint32_t)src_addr);
++ CCP_CMD_SRC_HI(desc) = high32_value(src_addr);
++ CCP_CMD_SRC_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_DST_LO(desc) = ((uint32_t)dest_addr);
++ CCP_CMD_DST_HI(desc) = high32_value(dest_addr);
++ CCP_CMD_DST_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ CCP_CMD_KEY_LO(desc) = ((uint32_t)ctx_paddr);
++ CCP_CMD_KEY_HI(desc) = high32_value(ctx_paddr);
++ CCP_CMD_KEY_MEM(desc) = CCP_MEMTYPE_SYSTEM;
++
++ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE;
++
++ rte_wmb();
++
++ tail = (uint32_t)(cmd_q->qbase_phys_addr + cmd_q->qidx * Q_DESC_SIZE);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_TAIL_LO_BASE, tail);
++ CCP_WRITE_REG(cmd_q->reg_base, CMD_Q_CONTROL_BASE,
++ cmd_q->qcontrol | CMD_Q_RUN);
++
++ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
++ return 0;
++}
++
++static int
+ ccp_perform_aes_cmac(struct rte_crypto_op *op,
+ struct ccp_queue *cmd_q)
+ {
+@@ -1780,6 +2428,23 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ break;
++ case CCP_AUTH_ALGO_SHA3_224:
++ case CCP_AUTH_ALGO_SHA3_256:
++ case CCP_AUTH_ALGO_SHA3_384:
++ case CCP_AUTH_ALGO_SHA3_512:
++ result = ccp_perform_sha3(op, cmd_q);
++ b_info->desccnt += 1;
++ break;
++ case CCP_AUTH_ALGO_SHA3_224_HMAC:
++ case CCP_AUTH_ALGO_SHA3_256_HMAC:
++ result = ccp_perform_sha3_hmac(op, cmd_q);
++ b_info->desccnt += 3;
++ break;
++ case CCP_AUTH_ALGO_SHA3_384_HMAC:
++ case CCP_AUTH_ALGO_SHA3_512_HMAC:
++ result = ccp_perform_sha3_hmac(op, cmd_q);
++ b_info->desccnt += 4;
++ break;
+ case CCP_AUTH_ALGO_AES_CMAC:
+ result = ccp_perform_aes_cmac(op, cmd_q);
+ b_info->desccnt += 4;
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index ca1c1a8..8459b71 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -80,15 +80,19 @@
+
+ #define SHA224_DIGEST_SIZE 28
+ #define SHA224_BLOCK_SIZE 64
++#define SHA3_224_BLOCK_SIZE 144
+
+ #define SHA256_DIGEST_SIZE 32
+ #define SHA256_BLOCK_SIZE 64
++#define SHA3_256_BLOCK_SIZE 136
+
+ #define SHA384_DIGEST_SIZE 48
+ #define SHA384_BLOCK_SIZE 128
++#define SHA3_384_BLOCK_SIZE 104
+
+ #define SHA512_DIGEST_SIZE 64
+ #define SHA512_BLOCK_SIZE 128
++#define SHA3_512_BLOCK_SIZE 72
+
+ /* SHA LSB intialiazation values */
+
+@@ -386,4 +390,22 @@ int process_ops_to_dequeue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ uint16_t nb_ops);
+
++
++/**
++ * Apis for SHA3 partial hash generation
++ * @param data_in buffer pointer on which phash is applied
++ * @param data_out phash result in ccp be format is written
++ */
++int partial_hash_sha3_224(uint8_t *data_in,
++ uint8_t *data_out);
++
++int partial_hash_sha3_256(uint8_t *data_in,
++ uint8_t *data_out);
++
++int partial_hash_sha3_384(uint8_t *data_in,
++ uint8_t *data_out);
++
++int partial_hash_sha3_512(uint8_t *data_in,
++ uint8_t *data_out);
++
+ #endif /* _CCP_CRYPTO_H_ */
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index ab6199f..bb59d15 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -123,6 +123,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* SHA3-224 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_224,
++ .block_size = 144,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 28,
++ .max = 28,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA3-224 HMAC*/
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_224_HMAC,
++ .block_size = 144,
++ .key_size = {
++ .min = 1,
++ .max = 144,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 28,
++ .max = 28,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /* SHA256 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+@@ -165,6 +207,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* SHA3-256 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_256,
++ .block_size = 136,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 32,
++ .max = 32,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA3-256-HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_256_HMAC,
++ .block_size = 136,
++ .key_size = {
++ .min = 1,
++ .max = 136,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 32,
++ .max = 32,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /* SHA384 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+@@ -207,6 +291,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* SHA3-384 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_384,
++ .block_size = 104,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 48,
++ .max = 48,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA3-384-HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_384_HMAC,
++ .block_size = 104,
++ .key_size = {
++ .min = 1,
++ .max = 104,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 48,
++ .max = 48,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /* SHA512 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+@@ -249,6 +375,48 @@ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
+ }, }
+ }, }
+ },
++ { /* SHA3-512 */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_512,
++ .block_size = 72,
++ .key_size = {
++ .min = 0,
++ .max = 0,
++ .increment = 0
++ },
++ .digest_size = {
++ .min = 64,
++ .max = 64,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++ { /* SHA3-512-HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_SHA3_512_HMAC,
++ .block_size = 72,
++ .key_size = {
++ .min = 1,
++ .max = 72,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 64,
++ .max = 64,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
+ { /*AES-CMAC */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+diff --git a/lib/librte_cryptodev/rte_crypto_sym.h b/lib/librte_cryptodev/rte_crypto_sym.h
+index 60797e9..eb5afc5 100644
+--- a/lib/librte_cryptodev/rte_crypto_sym.h
++++ b/lib/librte_cryptodev/rte_crypto_sym.h
+@@ -245,6 +245,23 @@ enum rte_crypto_auth_algorithm {
+ RTE_CRYPTO_AUTH_ZUC_EIA3,
+ /**< ZUC algorithm in EIA3 mode */
+
++ RTE_CRYPTO_AUTH_SHA3_224,
++ /**< 224 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_224_HMAC,
++ /**< HMAC using 224 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_256,
++ /**< 256 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_256_HMAC,
++ /**< HMAC using 256 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_384,
++ /**< 384 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_384_HMAC,
++ /**< HMAC using 384 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_512,
++ /**< 512 bit SHA3 algorithm. */
++ RTE_CRYPTO_AUTH_SHA3_512_HMAC,
++ /**< HMAC using 512 bit SHA3 algorithm. */
++
+ RTE_CRYPTO_AUTH_LIST_END
+ };
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-17-20-crypto-ccp-support-cpu-based-md5-and-sha2-family-authentication-algo.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-17-20-crypto-ccp-support-cpu-based-md5-and-sha2-family-authentication-algo.patch
new file mode 100644
index 00000000..c7756b4e
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-17-20-crypto-ccp-support-cpu-based-md5-and-sha2-family-authentication-algo.patch
@@ -0,0 +1,626 @@
+From patchwork Fri Mar 9 08:35:17 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 17/20] crypto/ccp: support cpu based md5 and sha2 family
+ authentication algo
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35816
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-17-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:17 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ config/common_base | 1 +
+ drivers/crypto/ccp/ccp_crypto.c | 282 ++++++++++++++++++++++++++++++++++-
+ drivers/crypto/ccp/ccp_crypto.h | 5 +-
+ drivers/crypto/ccp/ccp_pmd_ops.c | 23 +++
+ drivers/crypto/ccp/ccp_pmd_private.h | 10 ++
+ 5 files changed, 316 insertions(+), 5 deletions(-)
+
+diff --git a/config/common_base b/config/common_base
+index 28237f0..65e34ae 100644
+--- a/config/common_base
++++ b/config/common_base
+@@ -532,6 +532,7 @@ CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO=y
+ # Compile PMD for AMD CCP crypto device
+ #
+ CONFIG_RTE_LIBRTE_PMD_CCP=n
++CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=n
+
+ #
+ # Compile PMD for Marvell Crypto device
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index cb63bc6..133db76 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -53,6 +53,12 @@
+ #include "ccp_pci.h"
+ #include "ccp_pmd_private.h"
+
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++#include <openssl/conf.h>
++#include <openssl/err.h>
++#include <openssl/hmac.h>
++#endif
++
+ /* SHA initial context values */
+ static uint32_t ccp_sha1_init[SHA_COMMON_DIGEST_SIZE / sizeof(uint32_t)] = {
+ SHA1_H4, SHA1_H3,
+@@ -786,6 +792,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ else
+ sess->auth.op = CCP_AUTH_OP_VERIFY;
+ switch (auth_xform->algo) {
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ case RTE_CRYPTO_AUTH_MD5_HMAC:
++ sess->auth.algo = CCP_AUTH_ALGO_MD5_HMAC;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - MD5_DIGEST_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ sess->auth.block_size = MD5_BLOCK_SIZE;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++ break;
++#endif
+ case RTE_CRYPTO_AUTH_SHA1:
+ sess->auth.engine = CCP_ENGINE_SHA;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA1;
+@@ -795,6 +812,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA1_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA1_HMAC;
++ sess->auth.offset = CCP_SB_BYTES - SHA1_DIGEST_SIZE;
++ sess->auth.block_size = SHA1_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++#else
+ if (auth_xform->key.length > SHA1_BLOCK_SIZE)
+ return -1;
+ sess->auth.engine = CCP_ENGINE_SHA;
+@@ -810,6 +838,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
++#endif
+ break;
+ case RTE_CRYPTO_AUTH_SHA224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224;
+@@ -820,6 +849,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA224_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
++ sess->auth.offset = CCP_SB_BYTES - SHA224_DIGEST_SIZE;
++ sess->auth.block_size = SHA224_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++#else
+ if (auth_xform->key.length > SHA224_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA224_HMAC;
+@@ -835,6 +875,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
++#endif
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_224:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_224;
+@@ -869,6 +910,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA256_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
++ sess->auth.offset = CCP_SB_BYTES - SHA256_DIGEST_SIZE;
++ sess->auth.block_size = SHA256_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++#else
+ if (auth_xform->key.length > SHA256_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA256_HMAC;
+@@ -884,6 +936,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
++#endif
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_256:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_256;
+@@ -918,6 +971,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA384_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA384_DIGEST_SIZE;
++ sess->auth.block_size = SHA384_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++#else
+ if (auth_xform->key.length > SHA384_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA384_HMAC;
+@@ -933,6 +997,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
++#endif
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_384:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_384;
+@@ -967,6 +1032,17 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
+ break;
+ case RTE_CRYPTO_AUTH_SHA512_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
++ return -1;
++ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
++ sess->auth.offset = (CCP_SB_BYTES << 1) - SHA512_DIGEST_SIZE;
++ sess->auth.block_size = SHA512_BLOCK_SIZE;
++ sess->auth.key_length = auth_xform->key.length;
++ memset(sess->auth.key, 0, sess->auth.block_size);
++ rte_memcpy(sess->auth.key, auth_xform->key.data,
++ auth_xform->key.length);
++#else
+ if (auth_xform->key.length > SHA512_BLOCK_SIZE)
+ return -1;
+ sess->auth.algo = CCP_AUTH_ALGO_SHA512_HMAC;
+@@ -982,6 +1058,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ auth_xform->key.length);
+ if (generate_partial_hash(sess))
+ return -1;
++#endif
+ break;
+ case RTE_CRYPTO_AUTH_SHA3_512:
+ sess->auth.algo = CCP_AUTH_ALGO_SHA3_512;
+@@ -1012,7 +1089,7 @@ ccp_configure_session_auth(struct ccp_session *sess,
+ sess->auth.engine = CCP_ENGINE_AES;
+ sess->auth.um.aes_mode = CCP_AES_MODE_CMAC;
+ sess->auth.key_length = auth_xform->key.length;
+- /**<padding and hash result*/
++ /* padding and hash result */
+ sess->auth.ctx_len = CCP_SB_BYTES << 1;
+ sess->auth.offset = AES_BLOCK_SIZE;
+ sess->auth.block_size = AES_BLOCK_SIZE;
+@@ -1208,14 +1285,22 @@ ccp_auth_slot(struct ccp_session *session)
+ count = 3;
+ /**< op + lsb passthrough cpy to/from*/
+ break;
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ case CCP_AUTH_ALGO_MD5_HMAC:
++ break;
++#endif
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
++#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ count = 6;
++#endif
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
++#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
+ count = 7;
++#endif
+ /**
+ * 1. Load PHash1 = H(k ^ ipad); to LSB
+ * 2. generate IHash = H(hash on meassage with PHash1
+@@ -1322,6 +1407,122 @@ ccp_compute_slot_count(struct ccp_session *session)
+ return count;
+ }
+
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++static uint8_t
++algo_select(int sessalgo,
++ const EVP_MD **algo)
++{
++ int res = 0;
++
++ switch (sessalgo) {
++ case CCP_AUTH_ALGO_MD5_HMAC:
++ *algo = EVP_md5();
++ break;
++ case CCP_AUTH_ALGO_SHA1_HMAC:
++ *algo = EVP_sha1();
++ break;
++ case CCP_AUTH_ALGO_SHA224_HMAC:
++ *algo = EVP_sha224();
++ break;
++ case CCP_AUTH_ALGO_SHA256_HMAC:
++ *algo = EVP_sha256();
++ break;
++ case CCP_AUTH_ALGO_SHA384_HMAC:
++ *algo = EVP_sha384();
++ break;
++ case CCP_AUTH_ALGO_SHA512_HMAC:
++ *algo = EVP_sha512();
++ break;
++ default:
++ res = -EINVAL;
++ break;
++ }
++ return res;
++}
++
++static int
++process_cpu_auth_hmac(uint8_t *src, uint8_t *dst,
++ __rte_unused uint8_t *iv,
++ EVP_PKEY *pkey,
++ int srclen,
++ EVP_MD_CTX *ctx,
++ const EVP_MD *algo,
++ uint16_t d_len)
++{
++ size_t dstlen;
++ unsigned char temp_dst[64];
++
++ if (EVP_DigestSignInit(ctx, NULL, algo, NULL, pkey) <= 0)
++ goto process_auth_err;
++
++ if (EVP_DigestSignUpdate(ctx, (char *)src, srclen) <= 0)
++ goto process_auth_err;
++
++ if (EVP_DigestSignFinal(ctx, temp_dst, &dstlen) <= 0)
++ goto process_auth_err;
++
++ memcpy(dst, temp_dst, d_len);
++ return 0;
++process_auth_err:
++ CCP_LOG_ERR("Process cpu auth failed");
++ return -EINVAL;
++}
++
++static int cpu_crypto_auth(struct ccp_qp *qp,
++ struct rte_crypto_op *op,
++ struct ccp_session *sess,
++ EVP_MD_CTX *ctx)
++{
++ uint8_t *src, *dst;
++ int srclen, status;
++ struct rte_mbuf *mbuf_src, *mbuf_dst;
++ const EVP_MD *algo = NULL;
++ EVP_PKEY *pkey;
++
++ algo_select(sess->auth.algo, &algo);
++ pkey = EVP_PKEY_new_mac_key(EVP_PKEY_HMAC, NULL, sess->auth.key,
++ sess->auth.key_length);
++ mbuf_src = op->sym->m_src;
++ mbuf_dst = op->sym->m_dst ? op->sym->m_dst : op->sym->m_src;
++ srclen = op->sym->auth.data.length;
++ src = rte_pktmbuf_mtod_offset(mbuf_src, uint8_t *,
++ op->sym->auth.data.offset);
++
++ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
++ dst = qp->temp_digest;
++ } else {
++ dst = op->sym->auth.digest.data;
++ if (dst == NULL) {
++ dst = rte_pktmbuf_mtod_offset(mbuf_dst, uint8_t *,
++ op->sym->auth.data.offset +
++ sess->auth.digest_length);
++ }
++ }
++ status = process_cpu_auth_hmac(src, dst, NULL,
++ pkey, srclen,
++ ctx,
++ algo,
++ sess->auth.digest_length);
++ if (status) {
++ op->status = RTE_CRYPTO_OP_STATUS_ERROR;
++ return status;
++ }
++
++ if (sess->auth.op == CCP_AUTH_OP_VERIFY) {
++ if (memcmp(dst, op->sym->auth.digest.data,
++ sess->auth.digest_length) != 0) {
++ op->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
++ } else {
++ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++ }
++ } else {
++ op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++ }
++ EVP_PKEY_free(pkey);
++ return 0;
++}
++#endif
++
+ static void
+ ccp_perform_passthru(struct ccp_passthru *pst,
+ struct ccp_queue *cmd_q)
+@@ -2417,14 +2618,24 @@ ccp_crypto_auth(struct rte_crypto_op *op,
+ result = ccp_perform_sha(op, cmd_q);
+ b_info->desccnt += 3;
+ break;
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ case CCP_AUTH_ALGO_MD5_HMAC:
++ break;
++#endif
+ case CCP_AUTH_ALGO_SHA1_HMAC:
+ case CCP_AUTH_ALGO_SHA224_HMAC:
+ case CCP_AUTH_ALGO_SHA256_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ break;
++#endif
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 6;
+ break;
+ case CCP_AUTH_ALGO_SHA384_HMAC:
+ case CCP_AUTH_ALGO_SHA512_HMAC:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ break;
++#endif
+ result = ccp_perform_hmac(op, cmd_q);
+ b_info->desccnt += 7;
+ break;
+@@ -2488,7 +2699,7 @@ ccp_crypto_aead(struct rte_crypto_op *op,
+ }
+
+ int
+-process_ops_to_enqueue(const struct ccp_qp *qp,
++process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+@@ -2497,11 +2708,22 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
+ int i, result = 0;
+ struct ccp_batch_info *b_info;
+ struct ccp_session *session;
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ EVP_MD_CTX *auth_ctx = NULL;
++#endif
+
+ if (rte_mempool_get(qp->batch_mp, (void **)&b_info)) {
+ CCP_LOG_ERR("batch info allocation failed");
+ return 0;
+ }
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ auth_ctx = EVP_MD_CTX_create();
++ if (unlikely(!auth_ctx)) {
++ CCP_LOG_ERR("Unable to create auth ctx");
++ return 0;
++ }
++ b_info->auth_ctr = 0;
++#endif
+ /* populate batch info necessary for dequeue */
+ b_info->op_idx = 0;
+ b_info->lsb_buf_idx = 0;
+@@ -2523,6 +2745,11 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
+ break;
+ case CCP_CMD_AUTH:
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ b_info->auth_ctr++;
++ result = cpu_crypto_auth(qp, op[i],
++ session, auth_ctx);
++#endif
+ break;
+ case CCP_CMD_CIPHER_HASH:
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+@@ -2532,6 +2759,12 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
+ break;
+ case CCP_CMD_HASH_CIPHER:
+ result = ccp_crypto_auth(op[i], cmd_q, b_info);
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ result = cpu_crypto_auth(qp, op[i],
++ session, auth_ctx);
++ if (op[i]->status != RTE_CRYPTO_OP_STATUS_SUCCESS)
++ continue;
++#endif
+ if (result)
+ break;
+ result = ccp_crypto_cipher(op[i], cmd_q, b_info);
+@@ -2565,6 +2798,9 @@ process_ops_to_enqueue(const struct ccp_qp *qp,
+
+ rte_ring_enqueue(qp->processed_pkts, (void *)b_info);
+
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ EVP_MD_CTX_destroy(auth_ctx);
++#endif
+ return i;
+ }
+
+@@ -2633,13 +2869,27 @@ static inline void ccp_auth_dq_prepare(struct rte_crypto_op *op)
+ }
+
+ static int
+-ccp_prepare_ops(struct rte_crypto_op **op_d,
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ccp_prepare_ops(struct ccp_qp *qp,
++#else
++ccp_prepare_ops(struct ccp_qp *qp __rte_unused,
++#endif
++ struct rte_crypto_op **op_d,
+ struct ccp_batch_info *b_info,
+ uint16_t nb_ops)
+ {
+ int i, min_ops;
+ struct ccp_session *session;
+
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ EVP_MD_CTX *auth_ctx = NULL;
++
++ auth_ctx = EVP_MD_CTX_create();
++ if (unlikely(!auth_ctx)) {
++ CCP_LOG_ERR("Unable to create auth ctx");
++ return 0;
++ }
++#endif
+ min_ops = RTE_MIN(nb_ops, b_info->opcnt);
+
+ for (i = 0; i < min_ops; i++) {
+@@ -2652,8 +2902,25 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
+ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ break;
+ case CCP_CMD_AUTH:
++#ifndef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ ccp_auth_dq_prepare(op_d[i]);
++#endif
++ break;
+ case CCP_CMD_CIPHER_HASH:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ cpu_crypto_auth(qp, op_d[i],
++ session, auth_ctx);
++#else
++ ccp_auth_dq_prepare(op_d[i]);
++#endif
++ break;
+ case CCP_CMD_HASH_CIPHER:
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ op_d[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
++#else
++ ccp_auth_dq_prepare(op_d[i]);
++#endif
++ break;
+ case CCP_CMD_COMBINED:
+ ccp_auth_dq_prepare(op_d[i]);
+ break;
+@@ -2662,6 +2929,9 @@ ccp_prepare_ops(struct rte_crypto_op **op_d,
+ }
+ }
+
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ EVP_MD_CTX_destroy(auth_ctx);
++#endif
+ b_info->opcnt -= min_ops;
+ return min_ops;
+ }
+@@ -2681,6 +2951,10 @@ process_ops_to_dequeue(struct ccp_qp *qp,
+ } else if (rte_ring_dequeue(qp->processed_pkts,
+ (void **)&b_info))
+ return 0;
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ if (b_info->auth_ctr == b_info->opcnt)
++ goto success;
++#endif
+ cur_head_offset = CCP_READ_REG(b_info->cmd_q->reg_base,
+ CMD_Q_HEAD_LO_BASE);
+
+@@ -2700,7 +2974,7 @@ process_ops_to_dequeue(struct ccp_qp *qp,
+
+
+ success:
+- nb_ops = ccp_prepare_ops(op, b_info, nb_ops);
++ nb_ops = ccp_prepare_ops(qp, op, b_info, nb_ops);
+ rte_atomic64_add(&b_info->cmd_q->free_slots, b_info->desccnt);
+ b_info->desccnt = 0;
+ if (b_info->opcnt > 0) {
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index 8459b71..f526329 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -94,6 +94,9 @@
+ #define SHA512_BLOCK_SIZE 128
+ #define SHA3_512_BLOCK_SIZE 72
+
++/* Maximum length for digest */
++#define DIGEST_LENGTH_MAX 64
++
+ /* SHA LSB intialiazation values */
+
+ #define SHA1_H0 0x67452301UL
+@@ -372,7 +375,7 @@ int ccp_compute_slot_count(struct ccp_session *session);
+ * @param nb_ops No. of ops to be submitted
+ * @return 0 on success otherwise -1
+ */
+-int process_ops_to_enqueue(const struct ccp_qp *qp,
++int process_ops_to_enqueue(struct ccp_qp *qp,
+ struct rte_crypto_op **op,
+ struct ccp_queue *cmd_q,
+ uint16_t nb_ops,
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index bb59d15..1b67070 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -39,6 +39,29 @@
+ #include "ccp_crypto.h"
+
+ static const struct rte_cryptodev_capabilities ccp_pmd_capabilities[] = {
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ { /* MD5 HMAC */
++ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
++ {.sym = {
++ .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH,
++ {.auth = {
++ .algo = RTE_CRYPTO_AUTH_MD5_HMAC,
++ .block_size = 64,
++ .key_size = {
++ .min = 1,
++ .max = 64,
++ .increment = 1
++ },
++ .digest_size = {
++ .min = 16,
++ .max = 16,
++ .increment = 0
++ },
++ .aad_size = { 0 }
++ }, }
++ }, }
++ },
++#endif
+ { /* SHA1 */
+ .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ {.sym = {
+diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
+index d278a8c..cd9f6ae 100644
+--- a/drivers/crypto/ccp/ccp_pmd_private.h
++++ b/drivers/crypto/ccp/ccp_pmd_private.h
+@@ -32,6 +32,7 @@
+ #define _CCP_PMD_PRIVATE_H_
+
+ #include <rte_cryptodev.h>
++#include "ccp_crypto.h"
+
+ #define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+@@ -87,6 +88,10 @@ struct ccp_batch_info {
+ phys_addr_t lsb_buf_phys;
+ /**< LSB intermediate buf for passthru */
+ int lsb_buf_idx;
++#ifdef RTE_LIBRTE_PMD_CCP_CPU_AUTH
++ uint16_t auth_ctr;
++ /**< auth only ops batch */
++#endif
+ } __rte_cache_aligned;
+
+ /**< CCP crypto queue pair */
+@@ -107,6 +112,11 @@ struct ccp_qp {
+ /**< Store ops pulled out of queue */
+ struct rte_cryptodev *dev;
+ /**< rte crypto device to which this qp belongs */
++ uint8_t temp_digest[DIGEST_LENGTH_MAX];
++ /**< Buffer used to store the digest generated
++ * by the driver when verifying a digest provided
++ * by the user (using authentication verify operation)
++ */
+ } __rte_cache_aligned;
+
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-18-20-test-crypto-add-test-for-AMD-CCP-crypto-poll-mode.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-18-20-test-crypto-add-test-for-AMD-CCP-crypto-poll-mode.patch
new file mode 100644
index 00000000..66cc2d63
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-18-20-test-crypto-add-test-for-AMD-CCP-crypto-poll-mode.patch
@@ -0,0 +1,942 @@
+From patchwork Fri Mar 9 08:35:18 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev, v4,
+ 18/20] test/crypto: add test for AMD CCP crypto poll mode
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35817
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-18-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:18 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ test/test/test_cryptodev.c | 161 +++++++++++++++++++++++++++
+ test/test/test_cryptodev.h | 1 +
+ test/test/test_cryptodev_aes_test_vectors.h | 93 ++++++++++------
+ test/test/test_cryptodev_blockcipher.c | 9 +-
+ test/test/test_cryptodev_blockcipher.h | 1 +
+ test/test/test_cryptodev_des_test_vectors.h | 42 ++++---
+ test/test/test_cryptodev_hash_test_vectors.h | 60 ++++++----
+ 7 files changed, 301 insertions(+), 66 deletions(-)
+
+diff --git a/test/test/test_cryptodev.c b/test/test/test_cryptodev.c
+index 1417482..d1d7925 100644
+--- a/test/test/test_cryptodev.c
++++ b/test/test/test_cryptodev.c
+@@ -338,6 +338,23 @@ testsuite_setup(void)
+ }
+ }
+
++ /* Create an CCP device if required */
++ if (gbl_driver_id == rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD))) {
++ nb_devs = rte_cryptodev_device_count_by_driver(
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)));
++ if (nb_devs < 1) {
++ ret = rte_vdev_init(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD),
++ NULL);
++
++ TEST_ASSERT(ret == 0, "Failed to create "
++ "instance of pmd : %s",
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
++ }
++ }
++
+ #ifdef RTE_LIBRTE_PMD_CRYPTO_SCHEDULER
+ if (gbl_driver_id == rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD))) {
+@@ -1727,6 +1744,44 @@ test_AES_cipheronly_openssl_all(void)
+ }
+
+ static int
++test_AES_chain_ccp_all(void)
++{
++ struct crypto_testsuite_params *ts_params = &testsuite_params;
++ int status;
++
++ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
++ ts_params->op_mpool,
++ ts_params->session_mpool,
++ ts_params->valid_devs[0],
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
++ BLKCIPHER_AES_CHAIN_TYPE);
++
++ TEST_ASSERT_EQUAL(status, 0, "Test failed");
++
++ return TEST_SUCCESS;
++}
++
++static int
++test_AES_cipheronly_ccp_all(void)
++{
++ struct crypto_testsuite_params *ts_params = &testsuite_params;
++ int status;
++
++ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
++ ts_params->op_mpool,
++ ts_params->session_mpool,
++ ts_params->valid_devs[0],
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
++ BLKCIPHER_AES_CIPHERONLY_TYPE);
++
++ TEST_ASSERT_EQUAL(status, 0, "Test failed");
++
++ return TEST_SUCCESS;
++}
++
++static int
+ test_AES_chain_qat_all(void)
+ {
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+@@ -1898,6 +1953,25 @@ test_authonly_openssl_all(void)
+ }
+
+ static int
++test_authonly_ccp_all(void)
++{
++ struct crypto_testsuite_params *ts_params = &testsuite_params;
++ int status;
++
++ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
++ ts_params->op_mpool,
++ ts_params->session_mpool,
++ ts_params->valid_devs[0],
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
++ BLKCIPHER_AUTHONLY_TYPE);
++
++ TEST_ASSERT_EQUAL(status, 0, "Test failed");
++
++ return TEST_SUCCESS;
++}
++
++static int
+ test_AES_chain_armv8_all(void)
+ {
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+@@ -4973,6 +5047,44 @@ test_3DES_cipheronly_dpaa2_sec_all(void)
+ }
+
+ static int
++test_3DES_chain_ccp_all(void)
++{
++ struct crypto_testsuite_params *ts_params = &testsuite_params;
++ int status;
++
++ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
++ ts_params->op_mpool,
++ ts_params->session_mpool,
++ ts_params->valid_devs[0],
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
++ BLKCIPHER_3DES_CHAIN_TYPE);
++
++ TEST_ASSERT_EQUAL(status, 0, "Test failed");
++
++ return TEST_SUCCESS;
++}
++
++static int
++test_3DES_cipheronly_ccp_all(void)
++{
++ struct crypto_testsuite_params *ts_params = &testsuite_params;
++ int status;
++
++ status = test_blockcipher_all_tests(ts_params->mbuf_pool,
++ ts_params->op_mpool,
++ ts_params->session_mpool,
++ ts_params->valid_devs[0],
++ rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD)),
++ BLKCIPHER_3DES_CIPHERONLY_TYPE);
++
++ TEST_ASSERT_EQUAL(status, 0, "Test failed");
++
++ return TEST_SUCCESS;
++}
++
++static int
+ test_3DES_cipheronly_qat_all(void)
+ {
+ struct crypto_testsuite_params *ts_params = &testsuite_params;
+@@ -9646,6 +9758,38 @@ static struct unit_test_suite cryptodev_mrvl_testsuite = {
+ }
+ };
+
++static struct unit_test_suite cryptodev_ccp_testsuite = {
++ .suite_name = "Crypto Device CCP Unit Test Suite",
++ .setup = testsuite_setup,
++ .teardown = testsuite_teardown,
++ .unit_test_cases = {
++ TEST_CASE_ST(ut_setup, ut_teardown, test_multi_session),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_multi_session_random_usage),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_AES_chain_ccp_all),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_AES_cipheronly_ccp_all),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_3DES_chain_ccp_all),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_3DES_cipheronly_ccp_all),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ test_authonly_ccp_all),
++
++ /** Negative tests */
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ authentication_verify_HMAC_SHA1_fail_data_corrupt),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ authentication_verify_HMAC_SHA1_fail_tag_corrupt),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ auth_decryption_AES128CBC_HMAC_SHA1_fail_data_corrupt),
++ TEST_CASE_ST(ut_setup, ut_teardown,
++ auth_decryption_AES128CBC_HMAC_SHA1_fail_tag_corrupt),
++
++ TEST_CASES_END() /**< NULL terminate unit test array */
++ }
++};
+
+ static int
+ test_cryptodev_qat(void /*argv __rte_unused, int argc __rte_unused*/)
+@@ -9867,6 +10011,22 @@ test_cryptodev_dpaa_sec(void /*argv __rte_unused, int argc __rte_unused*/)
+ return unit_test_suite_runner(&cryptodev_dpaa_sec_testsuite);
+ }
+
++static int
++test_cryptodev_ccp(void)
++{
++ gbl_driver_id = rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
++
++ if (gbl_driver_id == -1) {
++ RTE_LOG(ERR, USER1, "CCP PMD must be loaded. Check if "
++ "CONFIG_RTE_LIBRTE_PMD_CCP is enabled "
++ "in config file to run this testsuite.\n");
++ return TEST_FAILED;
++ }
++
++ return unit_test_suite_runner(&cryptodev_ccp_testsuite);
++}
++
+ REGISTER_TEST_COMMAND(cryptodev_qat_autotest, test_cryptodev_qat);
+ REGISTER_TEST_COMMAND(cryptodev_aesni_mb_autotest, test_cryptodev_aesni_mb);
+ REGISTER_TEST_COMMAND(cryptodev_openssl_autotest, test_cryptodev_openssl);
+@@ -9879,3 +10039,4 @@ REGISTER_TEST_COMMAND(cryptodev_sw_armv8_autotest, test_cryptodev_armv8);
+ REGISTER_TEST_COMMAND(cryptodev_sw_mrvl_autotest, test_cryptodev_mrvl);
+ REGISTER_TEST_COMMAND(cryptodev_dpaa2_sec_autotest, test_cryptodev_dpaa2_sec);
+ REGISTER_TEST_COMMAND(cryptodev_dpaa_sec_autotest, test_cryptodev_dpaa_sec);
++REGISTER_TEST_COMMAND(cryptodev_ccp_autotest, test_cryptodev_ccp);
+diff --git a/test/test/test_cryptodev.h b/test/test/test_cryptodev.h
+index 8cdc087..d45fb7b 100644
+--- a/test/test/test_cryptodev.h
++++ b/test/test/test_cryptodev.h
+@@ -61,6 +61,7 @@
+ #define CRYPTODEV_NAME_DPAA2_SEC_PMD crypto_dpaa2_sec
+ #define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
+ #define CRYPTODEV_NAME_MRVL_PMD crypto_mrvl
++#define CRYPTODEV_NAME_CCP_PMD crypto_ccp
+
+ /**
+ * Write (spread) data from buffer to mbuf data
+diff --git a/test/test/test_cryptodev_aes_test_vectors.h b/test/test/test_cryptodev_aes_test_vectors.h
+index 3577ef4..6f2422a 100644
+--- a/test/test/test_cryptodev_aes_test_vectors.h
++++ b/test/test/test_cryptodev_aes_test_vectors.h
+@@ -1171,7 +1171,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CTR HMAC-SHA1 Decryption Digest "
+@@ -1184,7 +1185,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CTR XCBC Encryption Digest",
+@@ -1223,7 +1225,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CTR HMAC-SHA1 Decryption Digest "
+@@ -1236,7 +1239,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest",
+@@ -1249,7 +1253,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
+@@ -1285,7 +1290,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Decryption Digest "
+@@ -1315,7 +1321,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Encryption Digest "
+@@ -1337,7 +1344,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA256 Decryption Digest "
+@@ -1357,7 +1365,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
+@@ -1366,7 +1375,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_MB |
+- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Encryption Digest "
+@@ -1390,7 +1400,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA512 Decryption Digest "
+@@ -1455,7 +1466,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA224 Decryption Digest "
+@@ -1467,7 +1479,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA384 Encryption Digest",
+@@ -1479,7 +1492,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA384 Decryption Digest "
+@@ -1492,7 +1506,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC HMAC-SHA1 Encryption Digest "
+@@ -1501,7 +1516,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr =
+@@ -1511,7 +1527,8 @@ static const struct blockcipher_test_case aes_chain_test_cases[] = {
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_ARMV8 |
+- BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ };
+
+@@ -1526,7 +1543,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CBC Decryption",
+@@ -1538,7 +1556,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CBC Encryption",
+@@ -1549,7 +1568,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CBC Encryption Scater gather",
+@@ -1570,7 +1590,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CBC Decryption Scatter Gather",
+@@ -1590,7 +1611,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CBC Decryption",
+@@ -1602,7 +1624,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CBC OOP Encryption",
+@@ -1612,7 +1635,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CBC OOP Decryption",
+@@ -1622,7 +1646,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CTR Encryption",
+@@ -1634,7 +1659,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CTR Decryption",
+@@ -1646,7 +1672,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CTR Encryption",
+@@ -1657,7 +1684,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-192-CTR Decryption",
+@@ -1668,7 +1696,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_MB |
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CTR Encryption",
+@@ -1680,7 +1709,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-256-CTR Decryption",
+@@ -1692,7 +1722,8 @@ static const struct blockcipher_test_case aes_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "AES-128-CTR Encryption (12-byte IV)",
+diff --git a/test/test/test_cryptodev_blockcipher.c b/test/test/test_cryptodev_blockcipher.c
+index ed06618..5835b3e 100644
+--- a/test/test/test_cryptodev_blockcipher.c
++++ b/test/test/test_cryptodev_blockcipher.c
+@@ -54,6 +54,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
+
+ int openssl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
++ int ccp_pmd = rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+ int scheduler_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_SCHEDULER_PMD));
+ int armv8_pmd = rte_cryptodev_driver_id_get(
+@@ -94,7 +96,8 @@ test_blockcipher_one_case(const struct blockcipher_test_case *t,
+ driver_id == qat_pmd ||
+ driver_id == openssl_pmd ||
+ driver_id == armv8_pmd ||
+- driver_id == mrvl_pmd) { /* Fall through */
++ driver_id == mrvl_pmd ||
++ driver_id == ccp_pmd) { /* Fall through */
+ digest_len = tdata->digest.len;
+ } else if (driver_id == aesni_mb_pmd ||
+ driver_id == scheduler_pmd) {
+@@ -555,6 +558,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
+
+ int openssl_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_OPENSSL_PMD));
++ int ccp_pmd = rte_cryptodev_driver_id_get(
++ RTE_STR(CRYPTODEV_NAME_CCP_PMD));
+ int dpaa2_sec_pmd = rte_cryptodev_driver_id_get(
+ RTE_STR(CRYPTODEV_NAME_DPAA2_SEC_PMD));
+ int dpaa_sec_pmd = rte_cryptodev_driver_id_get(
+@@ -627,6 +632,8 @@ test_blockcipher_all_tests(struct rte_mempool *mbuf_pool,
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER;
+ else if (driver_id == dpaa2_sec_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC;
++ else if (driver_id == ccp_pmd)
++ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_CCP;
+ else if (driver_id == dpaa_sec_pmd)
+ target_pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC;
+ else if (driver_id == mrvl_pmd)
+diff --git a/test/test/test_cryptodev_blockcipher.h b/test/test/test_cryptodev_blockcipher.h
+index edbdaab..93ef0ae 100644
+--- a/test/test/test_cryptodev_blockcipher.h
++++ b/test/test/test_cryptodev_blockcipher.h
+@@ -27,6 +27,7 @@
+ #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC 0x0020 /* DPAA2_SEC flag */
+ #define BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC 0x0040 /* DPAA_SEC flag */
+ #define BLOCKCIPHER_TEST_TARGET_PMD_MRVL 0x0080 /* Marvell flag */
++#define BLOCKCIPHER_TEST_TARGET_PMD_CCP 0x0040 /* CCP flag */
+
+ #define BLOCKCIPHER_TEST_OP_CIPHER (BLOCKCIPHER_TEST_OP_ENCRYPT | \
+ BLOCKCIPHER_TEST_OP_DECRYPT)
+diff --git a/test/test/test_cryptodev_des_test_vectors.h b/test/test/test_cryptodev_des_test_vectors.h
+index 0be809e..a30317c 100644
+--- a/test/test/test_cryptodev_des_test_vectors.h
++++ b/test/test/test_cryptodev_des_test_vectors.h
+@@ -1044,7 +1044,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CBC HMAC-SHA1 Decryption Digest Verify",
+@@ -1053,19 +1054,22 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CBC SHA1 Encryption Digest",
+ .test_data = &triple_des128cbc_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CBC SHA1 Decryption Digest Verify",
+ .test_data = &triple_des128cbc_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC HMAC-SHA1 Encryption Digest",
+@@ -1075,7 +1079,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC HMAC-SHA1 Decryption Digest Verify",
+@@ -1085,21 +1090,24 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC SHA1 Encryption Digest",
+ .test_data = &triple_des192cbc_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC SHA1 Decryption Digest Verify",
+ .test_data = &triple_des192cbc_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CTR HMAC-SHA1 Encryption Digest",
+@@ -1180,7 +1188,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ .test_data = &triple_des128cbc_hmac_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_ENC_AUTH_GEN,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr =
+@@ -1189,7 +1198,8 @@ static const struct blockcipher_test_case triple_des_chain_test_cases[] = {
+ .test_data = &triple_des128cbc_hmac_sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY_DEC,
+ .feature_mask = BLOCKCIPHER_TEST_FEATURE_SESSIONLESS,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ };
+
+@@ -1201,7 +1211,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CBC Decryption",
+@@ -1210,7 +1221,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
+ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC
++ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC Encryption",
+@@ -1220,7 +1232,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-192-CBC Decryption",
+@@ -1230,7 +1243,8 @@ static const struct blockcipher_test_case triple_des_cipheronly_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_MRVL
++ BLOCKCIPHER_TEST_TARGET_PMD_MRVL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "3DES-128-CTR Encryption",
+diff --git a/test/test/test_cryptodev_hash_test_vectors.h b/test/test/test_cryptodev_hash_test_vectors.h
+index 93dacb7..6b882ae 100644
+--- a/test/test/test_cryptodev_hash_test_vectors.h
++++ b/test/test/test_cryptodev_hash_test_vectors.h
+@@ -358,13 +358,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ .test_descr = "SHA1 Digest",
+ .test_data = &sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA1 Digest Verify",
+ .test_data = &sha1_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA1 Digest",
+@@ -375,7 +377,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA1 Digest Scatter Gather",
+@@ -394,7 +397,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA1 Digest Verify Scatter Gather",
+@@ -408,13 +412,15 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ .test_descr = "SHA224 Digest",
+ .test_data = &sha224_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA224 Digest Verify",
+ .test_data = &sha224_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA224 Digest",
+@@ -425,7 +431,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA224 Digest Verify",
+@@ -436,19 +443,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA256 Digest",
+ .test_data = &sha256_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA256 Digest Verify",
+ .test_data = &sha256_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA256 Digest",
+@@ -459,7 +469,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA256 Digest Verify",
+@@ -470,19 +481,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA384 Digest",
+ .test_data = &sha384_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA384 Digest Verify",
+ .test_data = &sha384_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA384 Digest",
+@@ -493,7 +507,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA384 Digest Verify",
+@@ -504,19 +519,22 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA512 Digest",
+ .test_data = &sha512_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_GEN,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "SHA512 Digest Verify",
+ .test_data = &sha512_test_vector,
+ .op_mask = BLOCKCIPHER_TEST_OP_AUTH_VERIFY,
+- .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL
++ .pmd_mask = BLOCKCIPHER_TEST_TARGET_PMD_OPENSSL |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA512 Digest",
+@@ -527,7 +545,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ {
+ .test_descr = "HMAC-SHA512 Digest Verify",
+@@ -538,7 +557,8 @@ static const struct blockcipher_test_case hash_test_cases[] = {
+ BLOCKCIPHER_TEST_TARGET_PMD_SCHEDULER |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA2_SEC |
+ BLOCKCIPHER_TEST_TARGET_PMD_DPAA_SEC |
+- BLOCKCIPHER_TEST_TARGET_PMD_QAT
++ BLOCKCIPHER_TEST_TARGET_PMD_QAT |
++ BLOCKCIPHER_TEST_TARGET_PMD_CCP
+ },
+ };
+
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-19-20-doc-add-document-for-AMD-CCP-crypto-poll-mode-driver.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-19-20-doc-add-document-for-AMD-CCP-crypto-poll-mode-driver.patch
new file mode 100644
index 00000000..a40809d3
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-19-20-doc-add-document-for-AMD-CCP-crypto-poll-mode-driver.patch
@@ -0,0 +1,263 @@
+From patchwork Fri Mar 9 08:35:19 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 8bit
+Subject: [dpdk-dev, v4,
+ 19/20] doc: add document for AMD CCP crypto poll mode driver
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35818
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-19-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:19 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/cryptodevs/ccp.rst | 127 +++++++++++++++++++++++++++++
+ doc/guides/cryptodevs/features/ccp.ini | 57 +++++++++++++
+ doc/guides/cryptodevs/features/default.ini | 12 +++
+ doc/guides/cryptodevs/index.rst | 1 +
+ 4 files changed, 197 insertions(+)
+ create mode 100644 doc/guides/cryptodevs/ccp.rst
+ create mode 100644 doc/guides/cryptodevs/features/ccp.ini
+
+diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
+new file mode 100644
+index 0000000..59b9281
+--- /dev/null
++++ b/doc/guides/cryptodevs/ccp.rst
+@@ -0,0 +1,127 @@
++.. Copyright(c) 2017 Advanced Micro Devices, Inc.
++ All rights reserved.
++
++ Redistribution and use in source and binary forms, with or without
++ modification, are permitted provided that the following conditions
++ are met:
++
++ * Redistributions of source code must retain the above copyright
++ notice, this list of conditions and the following disclaimer.
++ * Redistributions in binary form must reproduce the above copyright
++ notice, this list of conditions and the following disclaimer in the
++ documentation and/or other materials provided with the distribution.
++ * Neither the name of the copyright holder nor the names of its
++ contributors may be used to endorse or promote products derived from
++ this software without specific prior written permission.
++
++ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
++ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
++ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
++ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
++ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
++ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
++ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
++ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++
++AMD CCP Poll Mode Driver
++========================
++
++This code provides the initial implementation of the ccp poll mode driver.
++The CCP poll mode driver library (librte_pmd_ccp) implements support for
++AMD’s cryptographic co-processor (CCP). The CCP PMD is a virtual crypto
++poll mode driver which schedules crypto operations to one or more available
++CCP hardware engines on the platform. The CCP PMD provides poll mode crypto
++driver support for the following hardware accelerator devices::
++
++ AMD Cryptographic Co-processor (0x1456)
++ AMD Cryptographic Co-processor (0x1468)
++
++Features
++--------
++
++CCP crypto PMD has support for:
++
++Cipher algorithms:
++
++* ``RTE_CRYPTO_CIPHER_AES_CBC``
++* ``RTE_CRYPTO_CIPHER_AES_ECB``
++* ``RTE_CRYPTO_CIPHER_AES_CTR``
++* ``RTE_CRYPTO_CIPHER_3DES_CBC``
++
++Hash algorithms:
++
++* ``RTE_CRYPTO_AUTH_SHA1``
++* ``RTE_CRYPTO_AUTH_SHA1_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA224``
++* ``RTE_CRYPTO_AUTH_SHA224_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA256``
++* ``RTE_CRYPTO_AUTH_SHA256_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA384``
++* ``RTE_CRYPTO_AUTH_SHA384_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA512``
++* ``RTE_CRYPTO_AUTH_SHA512_HMAC``
++* ``RTE_CRYPTO_AUTH_MD5_HMAC``
++* ``RTE_CRYPTO_AUTH_AES_CMAC``
++* ``RTE_CRYPTO_AUTH_SHA3_224``
++* ``RTE_CRYPTO_AUTH_SHA3_224_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA3_256``
++* ``RTE_CRYPTO_AUTH_SHA3_256_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA3_384``
++* ``RTE_CRYPTO_AUTH_SHA3_384_HMAC``
++* ``RTE_CRYPTO_AUTH_SHA3_512``
++* ``RTE_CRYPTO_AUTH_SHA3_512_HMAC``
++
++AEAD algorithms:
++
++* ``RTE_CRYPTO_AEAD_AES_GCM``
++
++Installation
++------------
++
++To compile CCP PMD, it has to be enabled in the config/common_base file.
++* ``CONFIG_RTE_LIBRTE_PMD_CCP=y``
++
++The CCP PMD also supports computing authentication over CPU with cipher offloaded
++to CCP. To enable this feature, enable following in the configuration.
++* ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y``
++
++This code was verified on Ubuntu 16.04.
++
++Initialization
++--------------
++
++Bind the CCP devices to DPDK UIO driver module before running the CCP PMD stack.
++e.g. for the 0x1456 device::
++
++ cd to the top-level DPDK directory
++ modprobe uio
++ insmod ./build/kmod/igb_uio.ko
++ echo "1022 1456" > /sys/bus/pci/drivers/igb_uio/new_id
++
++Another way to bind the CCP devices to DPDK UIO driver is by using the ``dpdk-devbind.py`` script.
++The following command assumes ``BFD`` of ``0000:09:00.2``::
++
++ cd to the top-level DPDK directory
++ ./usertools/dpdk-devbind.py -b igb_uio 0000:09:00.2
++
++To verify real traffic l2fwd-crypto example can be used with following command:
++
++.. code-block:: console
++
++ sudo ./build/l2fwd-crypto -l 1 -n 4 --vdev "crypto_ccp" -- -p 0x1
++ --chain CIPHER_HASH --cipher_op ENCRYPT --cipher_algo AES_CBC
++ --cipher_key 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:0f
++ --iv 00:01:02:03:04:05:06:07:08:09:0a:0b:0c:0d:0e:ff
++ --auth_op GENERATE --auth_algo SHA1_HMAC
++ --auth_key 11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
++ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
++ :11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11:11
++
++Limitations
++-----------
++
++* Chained mbufs are not supported
++* MD5_HMAC is supported only if ``CONFIG_RTE_LIBRTE_PMD_CCP_CPU_AUTH=y`` is enabled in configuration
+diff --git a/doc/guides/cryptodevs/features/ccp.ini b/doc/guides/cryptodevs/features/ccp.ini
+new file mode 100644
+index 0000000..add4bd8
+--- /dev/null
++++ b/doc/guides/cryptodevs/features/ccp.ini
+@@ -0,0 +1,57 @@
++;
++; Supported features of the 'ccp' crypto poll mode driver.
++;
++; Refer to default.ini for the full list of available PMD features.
++;
++[Features]
++Symmetric crypto = Y
++Sym operation chaining = Y
++HW Accelerated = Y
++
++;
++; Supported crypto algorithms of the 'ccp' crypto driver.
++;
++[Cipher]
++AES CBC (128) = Y
++AES CBC (192) = Y
++AES CBC (256) = Y
++AES ECB (128) = Y
++AES ECB (192) = Y
++AES ECB (256) = Y
++AES CTR (128) = Y
++AES CTR (192) = Y
++AES CTR (256) = Y
++3DES CBC = Y
++
++;
++; Supported authentication algorithms of the 'ccp' crypto driver.
++;
++[Auth]
++MD5 HMAC = Y
++SHA1 = Y
++SHA1 HMAC = Y
++SHA224 = Y
++SHA224 HMAC = Y
++SHA256 = Y
++SHA256 HMAC = Y
++SHA384 = Y
++SHA384 HMAC = Y
++SHA512 = Y
++SHA512 HMAC = Y
++AES CMAC = Y
++SHA3_224 = Y
++SHA3_224 HMAC = Y
++SHA3_256 = Y
++SHA3_256 HMAC = Y
++SHA3_384 = Y
++SHA3_384 HMAC = Y
++SHA3_512 = Y
++SHA3_512 HMAC = Y
++
++;
++; Supported AEAD algorithms of the 'ccp' crypto driver.
++;
++[AEAD]
++AES GCM (128) = Y
++AES GCM (192) = Y
++AES GCM (256) = Y
+diff --git a/doc/guides/cryptodevs/features/default.ini b/doc/guides/cryptodevs/features/default.ini
+index 728ce3b..aa1ca31 100644
+--- a/doc/guides/cryptodevs/features/default.ini
++++ b/doc/guides/cryptodevs/features/default.ini
+@@ -28,6 +28,9 @@ NULL =
+ AES CBC (128) =
+ AES CBC (192) =
+ AES CBC (256) =
++AES ECB (128) =
++AES ECB (192) =
++AES ECB (256) =
+ AES CTR (128) =
+ AES CTR (192) =
+ AES CTR (256) =
+@@ -62,6 +65,15 @@ AES GMAC =
+ SNOW3G UIA2 =
+ KASUMI F9 =
+ ZUC EIA3 =
++AES CMAC =
++SHA3_224 =
++SHA3_224 HMAC =
++SHA3_256 =
++SHA3_256 HMAC =
++SHA3_384 =
++SHA3_384 HMAC =
++SHA3_512 =
++SHA3_512 HMAC =
+
+ ;
+ ; Supported AEAD algorithms of a default crypto driver.
+diff --git a/doc/guides/cryptodevs/index.rst b/doc/guides/cryptodevs/index.rst
+index 558c926..8a921dd 100644
+--- a/doc/guides/cryptodevs/index.rst
++++ b/doc/guides/cryptodevs/index.rst
+@@ -13,6 +13,7 @@ Crypto Device Drivers
+ aesni_mb
+ aesni_gcm
+ armv8
++ ccp
+ dpaa2_sec
+ dpaa_sec
+ kasumi
diff --git a/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-20-20-crypto-ccp-moved-license-headers-to-SPDX-format.patch b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-20-20-crypto-ccp-moved-license-headers-to-SPDX-format.patch
new file mode 100644
index 00000000..43d8130b
--- /dev/null
+++ b/common/dpdk/recipes-extended/dpdk/dpdk/dpdk-dev-v4-20-20-crypto-ccp-moved-license-headers-to-SPDX-format.patch
@@ -0,0 +1,446 @@
+From patchwork Fri Mar 9 08:35:20 2018
+Content-Type: text/plain; charset="utf-8"
+MIME-Version: 1.0
+Content-Transfer-Encoding: 7bit
+Subject: [dpdk-dev,v4,20/20] crypto/ccp: moved license headers to SPDX format
+From: Ravi Kumar <ravi1.kumar@amd.com>
+X-Patchwork-Id: 35819
+X-Patchwork-Delegate: pablo.de.lara.guarch@intel.com
+Message-Id: <1520584520-130522-20-git-send-email-Ravi1.kumar@amd.com>
+List-Id: dev.dpdk.org
+To: dev@dpdk.org
+Cc: pablo.de.lara.guarch@intel.com
+Date: Fri, 9 Mar 2018 03:35:20 -0500
+
+Signed-off-by: Ravi Kumar <Ravi1.kumar@amd.com>
+---
+ doc/guides/cryptodevs/ccp.rst | 29 ++---------------------------
+ drivers/crypto/ccp/Makefile | 30 ++----------------------------
+ drivers/crypto/ccp/ccp_crypto.c | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_crypto.h | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_dev.c | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_dev.h | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_pci.c | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_pci.h | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_pmd_ops.c | 29 ++---------------------------
+ drivers/crypto/ccp/ccp_pmd_private.h | 29 ++---------------------------
+ drivers/crypto/ccp/rte_ccp_pmd.c | 29 ++---------------------------
+ 11 files changed, 22 insertions(+), 298 deletions(-)
+
+diff --git a/doc/guides/cryptodevs/ccp.rst b/doc/guides/cryptodevs/ccp.rst
+index 59b9281..9c0e428 100644
+--- a/doc/guides/cryptodevs/ccp.rst
++++ b/doc/guides/cryptodevs/ccp.rst
+@@ -1,30 +1,5 @@
+-.. Copyright(c) 2017 Advanced Micro Devices, Inc.
+- All rights reserved.
+-
+- Redistribution and use in source and binary forms, with or without
+- modification, are permitted provided that the following conditions
+- are met:
+-
+- * Redistributions of source code must retain the above copyright
+- notice, this list of conditions and the following disclaimer.
+- * Redistributions in binary form must reproduce the above copyright
+- notice, this list of conditions and the following disclaimer in the
+- documentation and/or other materials provided with the distribution.
+- * Neither the name of the copyright holder nor the names of its
+- contributors may be used to endorse or promote products derived from
+- this software without specific prior written permission.
+-
+- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++.. Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ SPDX-License-Identifier: BSD-3-Clause
+
+ AMD CCP Poll Mode Driver
+ ========================
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+index 5241465..1475a6c 100644
+--- a/drivers/crypto/ccp/Makefile
++++ b/drivers/crypto/ccp/Makefile
+@@ -1,31 +1,5 @@
+-#
+-# Copyright(c) 2018 Advanced Micro Devices, Inc.
+-# All rights reserved.
+-#
+-# Redistribution and use in source and binary forms, with or without
+-# modification, are permitted provided that the following conditions
+-# are met:
+-#
+-# * Redistributions of source code must retain the above copyright
+-# notice, this list of conditions and the following disclaimer.
+-# * Redistributions in binary form must reproduce the above copyright
+-# notice, this list of conditions and the following disclaimer in the
+-# documentation and/or other materials provided with the distribution.
+-# * Neither the name of the copyright holder nor the names of its
+-# contributors may be used to endorse or promote products derived from
+-# this software without specific prior written permission.
+-#
+-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+-# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++# Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++# SPDX-License-Identifier: BSD-3-Clause
+
+ include $(RTE_SDK)/mk/rte.vars.mk
+
+diff --git a/drivers/crypto/ccp/ccp_crypto.c b/drivers/crypto/ccp/ccp_crypto.c
+index 133db76..615a8c5 100644
+--- a/drivers/crypto/ccp/ccp_crypto.c
++++ b/drivers/crypto/ccp/ccp_crypto.c
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include <dirent.h>
+diff --git a/drivers/crypto/ccp/ccp_crypto.h b/drivers/crypto/ccp/ccp_crypto.h
+index f526329..6a4fa69 100644
+--- a/drivers/crypto/ccp/ccp_crypto.h
++++ b/drivers/crypto/ccp/ccp_crypto.h
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _CCP_CRYPTO_H_
+diff --git a/drivers/crypto/ccp/ccp_dev.c b/drivers/crypto/ccp/ccp_dev.c
+index d8c0ab4..7ad1227 100644
+--- a/drivers/crypto/ccp/ccp_dev.c
++++ b/drivers/crypto/ccp/ccp_dev.c
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include <dirent.h>
+diff --git a/drivers/crypto/ccp/ccp_dev.h b/drivers/crypto/ccp/ccp_dev.h
+index 759afc1..6f3ad5b 100644
+--- a/drivers/crypto/ccp/ccp_dev.h
++++ b/drivers/crypto/ccp/ccp_dev.h
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _CCP_DEV_H_
+diff --git a/drivers/crypto/ccp/ccp_pci.c b/drivers/crypto/ccp/ccp_pci.c
+index ddf4b49..d5e0eeb 100644
+--- a/drivers/crypto/ccp/ccp_pci.c
++++ b/drivers/crypto/ccp/ccp_pci.c
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include <dirent.h>
+diff --git a/drivers/crypto/ccp/ccp_pci.h b/drivers/crypto/ccp/ccp_pci.h
+index a4c09c8..8f98976 100644
+--- a/drivers/crypto/ccp/ccp_pci.h
++++ b/drivers/crypto/ccp/ccp_pci.h
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _CCP_PCI_H_
+diff --git a/drivers/crypto/ccp/ccp_pmd_ops.c b/drivers/crypto/ccp/ccp_pmd_ops.c
+index 1b67070..9acde00 100644
+--- a/drivers/crypto/ccp/ccp_pmd_ops.c
++++ b/drivers/crypto/ccp/ccp_pmd_ops.c
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include <string.h>
+diff --git a/drivers/crypto/ccp/ccp_pmd_private.h b/drivers/crypto/ccp/ccp_pmd_private.h
+index cd9f6ae..6199849 100644
+--- a/drivers/crypto/ccp/ccp_pmd_private.h
++++ b/drivers/crypto/ccp/ccp_pmd_private.h
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #ifndef _CCP_PMD_PRIVATE_H_
+diff --git a/drivers/crypto/ccp/rte_ccp_pmd.c b/drivers/crypto/ccp/rte_ccp_pmd.c
+index 23d3af3..bb4d241 100644
+--- a/drivers/crypto/ccp/rte_ccp_pmd.c
++++ b/drivers/crypto/ccp/rte_ccp_pmd.c
+@@ -1,31 +1,6 @@
+ /*-
+- * Copyright(c) 2018 Advanced Micro Devices, Inc.
+- * All rights reserved.
+- *
+- * Redistribution and use in source and binary forms, with or without
+- * modification, are permitted provided that the following conditions
+- * are met:
+- *
+- * * Redistributions of source code must retain the above copyright
+- * notice, this list of conditions and the following disclaimer.
+- * * Redistributions in binary form must reproduce the above copyright
+- * notice, this list of conditions and the following disclaimer in the
+- * documentation and/or other materials provided with the distribution.
+- * * Neither the name of the copyright holder nor the names of its
+- * contributors may be used to endorse or promote products derived from
+- * this software without specific prior written permission.
+- *
+- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+- * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
++ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+ #include <rte_bus_pci.h>