aboutsummaryrefslogtreecommitdiffstats
path: root/meta-v1000
diff options
context:
space:
mode:
Diffstat (limited to 'meta-v1000')
-rw-r--r--meta-v1000/conf/machine/v1000.conf5
-rw-r--r--meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools/0001-obey-CMAKE_INSTALL_LIBDIR.patch41
-rw-r--r--meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools_git.bb1
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/0001-Use-wayland-scanner-in-the-path.patch37
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/0001-winsys-svga-drm-Include-sys-types.h.patch34
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/0002-hardware-gloat.patch51
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/disable-asm-on-non-gcc.patch29
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/llvm-config-version.patch43
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/replace_glibc_check_with_linux.patch26
-rw-r--r--meta-v1000/recipes-graphics/mesa/files/vulkan-mkdir.patch35
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0001-Use-wayland-scanner-in-the-path.patch37
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0001-configure.ac-for-llvm-config-to-report-correct-libdi.patch56
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0001-fix-building-with-flex-2.6.2.patch72
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0001-winsys-svga-drm-Include-sys-types.h.patch34
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0002-configure.ac-fix-the-llvm-version-correctly.patch38
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0002-hardware-gloat.patch51
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/0003-strip-llvm-ldflags.patch33
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/disable-asm-on-non-gcc.patch29
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/llvm-config-version.patch43
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/replace_glibc_check_with_linux.patch26
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa/vulkan-mkdir.patch35
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa_17.3.%.bbappend11
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bb22
-rw-r--r--meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bbappend67
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware.bb28
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/LICENSE51
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_ce.binbin0 -> 8832 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_gpu_info.binbin0 -> 316 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_me.binbin0 -> 17024 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec.binbin0 -> 262784 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec2.binbin0 -> 262784 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_pfp.binbin0 -> 21120 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_rlc.binbin0 -> 26436 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_sdma.binbin0 -> 16896 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_vcn.binbin0 -> 339296 bytes
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2912-drm-amdkfd-Reset-process-queues-if-it-VM_FAULTs.patch31
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2913-drm-amdkfd-Support-registering-third-pary-device-mem.patch71
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2914-drm-amdkfd-Address-kernel-warning.patch26
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2915-drm-amdkfd-Handle-MEM_VIOL-in-trap-handler.patch66
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2916-drm-amd-Add-mqd-as-parameter-in-kfd2kgd.hqd_destroy-.patch177
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2917-drm-amdkfd-Fix-a-bug-that-process-cleanup-is-not-don.patch46
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2918-drm-amdkfd-Fix-a-bug-that-vmid-is-released-before.patch164
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2919-drm-amd-Implement-parallel-memory-mapping-on-mGPUs.patch101
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2920-drm-amdkfd-gfx9-preempt-queues-after-VM_FAULT.patch31
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2921-drm-amd-pp-Read-the-maximum-clock-frequency-from.patch36
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2922-AMD-XGBE-support.patch11852
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2923-amd-xgbe-clocksource-Use-a-plain-u64-instead-of.patch145
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2924-amd-xgbe-Fix-IRQ-processing-when-running-in-single.patch42
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2925-amd-xgbe-Update-PCI-support-to-use-new-IRQ.patch246
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2926-amd-xgbe-Add-a-hardware-quirk-for-register.patch107
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2927-amd-xgbe-Check-xgbe_init-return-code.patch54
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2928-amd-xgbe-Stop-the-PHY-before-releasing-interrupts.patch43
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2929-amd-xgbe-Be-sure-to-set-MDIO-modes-on-device.patch76
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2930-amd-xgbe-Don-t-overwrite-SFP-PHY-mod_absent.patch34
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2931-net-busy-poll-allow-preemption-in-sk_busy_loop.patch268
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2932-net-busy-poll-return-busypolling-status-to.patch109
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2933-net-remove-__napi_complete-All-__napi_complete.patch47
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2934-amd-xgbe-Enable-IRQs-only-if-napi_complete_done.patch54
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2935-amd-xgbe-Fix-the-ECC-related-bit-position.patch69
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2936-net-ethernet-update-drivers-to-make-both-SW-and.patch40
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2937-amd-xgbe-use-PAGE_ALLOC_COSTLY_ORDER-in.patch51
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2938-amd-xgbe-Simplify-mailbox-interface-rate-change.patch289
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2939-amd-xgbe-Fix-SFP-PHY-supported-advertised-settings.patch170
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2940-amd-xgbe-Use-the-proper-register-during-PTP.patch36
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2941-amd-xgbe-Add-a-check-for-an-skb-in-the-timestamp.patch47
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2942-amd-xgbe-Prevent-looping-forever-if-timestamp.patch65
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2943-amd-xgbe-Handle-return-code-from-software-reset.patch38
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2944-amd-xgbe-Fixes-for-working-with-PHYs-that-support.patch52
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2945-amd-xgbe-Limit-the-I2C-error-messages-that-are.patch55
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2946-amd-xgbe-Re-issue-interrupt-if-interrupt-status.patch368
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2947-amd-xgbe-Add-NUMA-affinity-support-for-memory.patch933
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2948-amd-xgbe-Add-NUMA-affinity-support-for-IRQ-hints.patch109
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2949-amd-xgbe-Prepare-for-more-fine-grained-cache.patch172
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2950-amd-xgbe-Simplify-the-burst-length-settings.patch223
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2951-amd-xgbe-Adjust-register-settings-to-improve.patch212
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2952-amd-xgbe-fix-spelling-mistake-avialable.patch32
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2953-drivers-net-add-missing-interrupt.h-include.patch33
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2954-amd-xgbe-Set-the-MDIO-mode-for-10000Base-T.patch34
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2955-amd-xgbe-Set-the-MII-control-width-for-the-MAC.patch47
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2956-amd-xgbe-Be-sure-driver-shuts-down-cleanly-on.patch61
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2957-amd-xgbe-Update-TSO-packet-statistics-accuracy.patch35
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2958-amd-xgbe-Add-support-to-handle-device-renaming.patch305
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2959-amd-xgbe-Add-additional-dynamic-debug-messages.patch35
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2960-amd-xgbe-Optimize-DMA-channel-interrupt-enablement.patch230
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2961-amd-xgbe-Add-hardware-features-debug-output.patch116
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2962-amd-xgbe-Add-per-queue-Tx-and-Rx-statistics.patch168
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2963-amd-xgbe-Convert-to-using-the-new-link-mode.patch1349
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2964-net-ethtool-Add-macro-to-clear-a-link-mode.patch42
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2965-Fix-for-build-error.patch57
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2966-amd-xgbe-Add-support-for-VXLAN-offload.patch857
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2967-amd-xgbe-Add-additional-ethtool-statistics.patch108
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2968-amd-xgbe-Interrupt-summary-bits-are-h-w-version.patch90
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2969-Enabled-DMA-flags-in-eMMC-driver.patch30
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2970-drm-amd-display-amdgpu_dm-glmark2-hard-hang-fix.patch33
-rw-r--r--meta-v1000/recipes-kernel/linux/files/2971-ASoC-AMD-Enable-pci-bus-mastering-for-ACP3.x-device.patch33
-rw-r--r--meta-v1000/recipes-kernel/linux/files/v1000-extra-config.cfg1
-rw-r--r--meta-v1000/recipes-kernel/linux/files/v1000-user-config.cfg4
-rwxr-xr-xmeta-v1000/recipes-kernel/linux/files/v1000-user-patches.scc60
98 files changed, 20608 insertions, 842 deletions
diff --git a/meta-v1000/conf/machine/v1000.conf b/meta-v1000/conf/machine/v1000.conf
index 3ef09576..f5bac326 100644
--- a/meta-v1000/conf/machine/v1000.conf
+++ b/meta-v1000/conf/machine/v1000.conf
@@ -5,7 +5,7 @@
PREFERRED_PROVIDER_virtual/kernel ?= "linux-yocto"
PREFERRED_VERSION_linux-yocto ?= "4.9%"
-PREFERRED_VERSION_mesa ?= "17.3.0"
+PREFERRED_VERSION_mesa ?= "17.3.%"
require conf/machine/include/tune-v1000.inc
@@ -25,7 +25,8 @@ XSERVER ?= "${XSERVER_X86_BASE} \
"
KERNEL_MODULE_AUTOLOAD += "snd-soc-acp-pcm snd-soc-acp-rt286-mach"
-MACHINE_EXTRA_RRECOMMENDS_remove = "rtc-test smbus-test"
+MACHINE_EXTRA_RRECOMMENDS += "amdgpu-firmware grub-efi"
+MACHINE_EXTRA_RRECOMMENDS_remove = "rtc-test smbus-test grub"
# Setup a getty on all serial ports
SERIAL_CONSOLES ?= "115200;ttyS4 115200;ttyS5 115200;ttyUSB0"
diff --git a/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools/0001-obey-CMAKE_INSTALL_LIBDIR.patch b/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools/0001-obey-CMAKE_INSTALL_LIBDIR.patch
deleted file mode 100644
index 170f4116..00000000
--- a/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools/0001-obey-CMAKE_INSTALL_LIBDIR.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From eddbb9ad2edd22a5307e9ba000fc9a0979030064 Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Wed, 31 May 2017 12:38:07 +0500
-Subject: [PATCH] obey CMAKE_INSTALL_LIBDIR
-
-If the path to CMAKE_INSTALL_LIBDIR is not followed appropriately
-the installation will not work correctly on a multilib platofrm.
-
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- source/CMakeLists.txt | 4 ++--
- source/opt/CMakeLists.txt | 4 ++--
- 2 files changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/source/CMakeLists.txt b/source/CMakeLists.txt
-index 5586216..43d5be5 100644
---- a/source/CMakeLists.txt
-+++ b/source/CMakeLists.txt
-@@ -279,5 +279,5 @@ target_include_directories(${SPIRV_TOOLS}
-
- install(TARGETS ${SPIRV_TOOLS}
- RUNTIME DESTINATION bin
-- LIBRARY DESTINATION lib
-- ARCHIVE DESTINATION lib)
-+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
-+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
-diff --git a/source/opt/CMakeLists.txt b/source/opt/CMakeLists.txt
-index f1420e8..abc191f 100644
---- a/source/opt/CMakeLists.txt
-+++ b/source/opt/CMakeLists.txt
-@@ -65,5 +65,5 @@ target_link_libraries(SPIRV-Tools-opt
-
- install(TARGETS SPIRV-Tools-opt
- RUNTIME DESTINATION bin
-- LIBRARY DESTINATION lib
-- ARCHIVE DESTINATION lib)
-+ LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
-+ ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools_git.bb b/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools_git.bb
index d3071265..f2d80b44 100644
--- a/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools_git.bb
+++ b/meta-v1000/recipes-graphics/lunarg-sdk/spirv-tools_git.bb
@@ -18,7 +18,6 @@ SRCREV_spirv-tools = "5834719fc17d4735fce0102738b87b70255cfd5f"
SRCREV_spirv-headers = "061097878467b8e040fbf153a837d844ef9f9f96"
SRC_URI = "git://github.com/KhronosGroup/SPIRV-Tools;protocol=http;name=spirv-tools \
git://github.com/KhronosGroup/SPIRV-Headers;name=spirv-headers;destsuffix=${SPIRV_HEADERS_LOCATION} \
- file://0001-obey-CMAKE_INSTALL_LIBDIR.patch \
file://0002-spirv-lesspipe.sh-allow-using-generic-shells.patch"
do_install_append() {
diff --git a/meta-v1000/recipes-graphics/mesa/files/0001-Use-wayland-scanner-in-the-path.patch b/meta-v1000/recipes-graphics/mesa/files/0001-Use-wayland-scanner-in-the-path.patch
deleted file mode 100644
index eb6ff4ff..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/0001-Use-wayland-scanner-in-the-path.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From c908f0c13ac81a3a52140f129a13b2bc997ff4ee Mon Sep 17 00:00:00 2001
-From: Jussi Kukkonen <jussi.kukkonen@intel.com>
-Date: Tue, 15 Nov 2016 15:20:49 +0200
-Subject: [PATCH] Simplify wayland-scanner lookup
-
-Don't use pkg-config to lookup the path of a binary that's in the path.
-
-Alternatively we'd have to prefix the path returned by pkg-config with
-PKG_CONFIG_SYSROOT_DIR.
-
-Upstream-Status: Pending
-Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
----
- configure.ac | 7 +------
- 1 file changed, 1 insertion(+), 6 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 2c7e636fac..d2b2350739 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -2174,12 +2174,7 @@ if test "x$with_platforms" != xauto; then
- with_egl_platforms=$with_platforms
- fi
-
--PKG_CHECK_MODULES([WAYLAND_SCANNER], [wayland-scanner],
-- WAYLAND_SCANNER=`$PKG_CONFIG --variable=wayland_scanner wayland-scanner`,
-- WAYLAND_SCANNER='')
--if test "x$WAYLAND_SCANNER" = x; then
-- AC_PATH_PROG([WAYLAND_SCANNER], [wayland-scanner], [:])
--fi
-+AC_PATH_PROG([WAYLAND_SCANNER], [wayland-scanner])
-
- # Do per-EGL platform setups and checks
- egl_platforms=`IFS=', '; echo $with_egl_platforms`
---
-2.13.0
-
diff --git a/meta-v1000/recipes-graphics/mesa/files/0001-winsys-svga-drm-Include-sys-types.h.patch b/meta-v1000/recipes-graphics/mesa/files/0001-winsys-svga-drm-Include-sys-types.h.patch
deleted file mode 100644
index 549b8671..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/0001-winsys-svga-drm-Include-sys-types.h.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From d8750776404b1031d762966d0f551d13d2fe05a7 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Wed, 16 Aug 2017 18:58:20 -0700
-Subject: [PATCH] winsys/svga/drm: Include sys/types.h
-
-vmw_screen.h uses dev_t which is defines in sys/types.h
-this header is required to be included for getting dev_t
-definition. This issue happens on musl C library, it is hidden
-on glibc since sys/types.h is included through another
-system headers
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
-
-Upstream-Status: Submitted
-
- src/gallium/winsys/svga/drm/vmw_screen.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/src/gallium/winsys/svga/drm/vmw_screen.h b/src/gallium/winsys/svga/drm/vmw_screen.h
-index 0ef8e84..2eda97e 100644
---- a/src/gallium/winsys/svga/drm/vmw_screen.h
-+++ b/src/gallium/winsys/svga/drm/vmw_screen.h
-@@ -41,6 +41,7 @@
- #include "svga_winsys.h"
- #include "pipebuffer/pb_buffer_fenced.h"
- #include <os/os_thread.h>
-+#include <sys/types.h>
-
- #define VMW_GMR_POOL_SIZE (16*1024*1024)
- #define VMW_QUERY_POOL_SIZE (8192)
---
-2.14.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/files/0002-hardware-gloat.patch b/meta-v1000/recipes-graphics/mesa/files/0002-hardware-gloat.patch
deleted file mode 100644
index 0e014dcc..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/0002-hardware-gloat.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 00bcd599310dc7fce4fe336ffd85902429051a0c Mon Sep 17 00:00:00 2001
-From: Igor Gnatenko <i.gnatenko.brain@gmail.com>
-Date: Sun, 20 Mar 2016 13:27:04 +0100
-Subject: [PATCH 2/4] hardware gloat
-
-Upstream-Status: Inappropriate [not author]
-Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
----
- src/gallium/drivers/llvmpipe/lp_screen.c | 7 +++++++
- src/gallium/drivers/softpipe/sp_screen.c | 7 +++++++
- 2 files changed, 14 insertions(+)
-
-diff --git a/src/gallium/drivers/llvmpipe/lp_screen.c b/src/gallium/drivers/llvmpipe/lp_screen.c
-index 4f61de8..3b0ec77 100644
---- a/src/gallium/drivers/llvmpipe/lp_screen.c
-+++ b/src/gallium/drivers/llvmpipe/lp_screen.c
-@@ -411,6 +411,13 @@ llvmpipe_is_format_supported( struct pipe_screen *_screen,
- if (!format_desc)
- return FALSE;
-
-+ if ((bind & PIPE_BIND_RENDER_TARGET) &&
-+ format != PIPE_FORMAT_R9G9B9E5_FLOAT &&
-+ format != PIPE_FORMAT_R11G11B10_FLOAT &&
-+ util_format_is_float(format)) {
-+ return FALSE;
-+ }
-+
- assert(target == PIPE_BUFFER ||
- target == PIPE_TEXTURE_1D ||
- target == PIPE_TEXTURE_1D_ARRAY ||
-diff --git a/src/gallium/drivers/softpipe/sp_screen.c b/src/gallium/drivers/softpipe/sp_screen.c
-index 031602b..c279120 100644
---- a/src/gallium/drivers/softpipe/sp_screen.c
-+++ b/src/gallium/drivers/softpipe/sp_screen.c
-@@ -358,6 +358,13 @@ softpipe_is_format_supported( struct pipe_screen *screen,
- if (!format_desc)
- return FALSE;
-
-+ if ((bind & PIPE_BIND_RENDER_TARGET) &&
-+ format != PIPE_FORMAT_R9G9B9E5_FLOAT &&
-+ format != PIPE_FORMAT_R11G11B10_FLOAT &&
-+ util_format_is_float(format)) {
-+ return FALSE;
-+ }
-+
- if (sample_count > 1)
- return FALSE;
-
---
-2.7.4
-
diff --git a/meta-v1000/recipes-graphics/mesa/files/disable-asm-on-non-gcc.patch b/meta-v1000/recipes-graphics/mesa/files/disable-asm-on-non-gcc.patch
deleted file mode 100644
index d2d67558..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/disable-asm-on-non-gcc.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-Configure checks for compiler to be gcc and then it enables asm_offsets
-generation. see
-
-https://cgit.freedesktop.org/mesa/mesa/commit/?id=73c9b4b0e05fc66629ba250846948dc55c0e7a0d
-
-However, we missed the check when enabling this on cross compilation
-when architecture for both host and target is x86
-
-Fixes errors like
-./gen_matypes > matypes.h
-/bin/bash: ./gen_matypes: No such file or directory
-
--Khem
-
-Upstream-Status: Submitted
-
-Index: mesa-12.0.1/configure.ac
-===================================================================
---- mesa-12.0.1.orig/configure.ac
-+++ mesa-12.0.1/configure.ac
-@@ -732,7 +732,7 @@ test "x$enable_asm" = xno && AC_MSG_RESU
- if test "x$enable_asm" = xyes -a "x$cross_compiling" = xyes; then
- case "$host_cpu" in
- i?86 | x86_64 | amd64)
-- if test "x$host_cpu" != "x$target_cpu"; then
-+ if test "x$host_cpu" != "x$target_cpu" -o "x$acv_mesa_CLANG" = xyes; then
- enable_asm=no
- AC_MSG_RESULT([no, cross compiling])
- fi
diff --git a/meta-v1000/recipes-graphics/mesa/files/llvm-config-version.patch b/meta-v1000/recipes-graphics/mesa/files/llvm-config-version.patch
deleted file mode 100644
index a65468cc..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/llvm-config-version.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 53142302c0794006261d97af3ffcab043f502d10 Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 12:48:10 +0500
-Subject: [PATCH 1/2] When building llvm from git or svn it embeds the svn/git
- revision into internal version string
-
-$ /mnt/a/oe/build/tmp/work/corei7-64-bec-linux/mesa/2_17.1.5-r0/recipe-sysroot/usr/lib/llvm5.0/llvm-config-host --version
-5.0.0git-9a5c333388c
-
-We need to ignore everything after 5.0.0 which is what the cut cmd is doing
-
-Upstream-Status: Pending
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- configure.ac | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 8c95007..9c92b28 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -995,7 +995,7 @@ strip_unwanted_llvm_flags() {
-
- llvm_set_environment_variables() {
- if test "x$LLVM_CONFIG" != xno; then
-- LLVM_VERSION=`$LLVM_CONFIG --version | egrep -o '^[[0-9.]]+'`
-+ LLVM_VERSION=`$LLVM_CONFIG --version | cut -c1-5`
- LLVM_CPPFLAGS=`strip_unwanted_llvm_flags "$LLVM_CONFIG --cppflags"`
- LLVM_INCLUDEDIR=`$LLVM_CONFIG --includedir`
- LLVM_LIBDIR=`$LLVM_CONFIG --libdir`
-@@ -2639,7 +2639,7 @@ detect_old_buggy_llvm() {
- dnl ourselves.
- dnl (See https://llvm.org/bugs/show_bug.cgi?id=6823)
- dnl We can't use $LLVM_VERSION because it has 'svn' stripped out,
-- LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version`
-+ LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version|cut -c1-5`
- AS_IF([test -f "$LLVM_LIBDIR/lib$LLVM_SO_NAME.$IMP_LIB_EXT"], [llvm_have_one_so=yes])
-
- if test "x$llvm_have_one_so" = xyes; then
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/files/replace_glibc_check_with_linux.patch b/meta-v1000/recipes-graphics/mesa/files/replace_glibc_check_with_linux.patch
deleted file mode 100644
index 0280ee85..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/replace_glibc_check_with_linux.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-endianness check is OS wide and not specific to libc
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Upstream-Status: Pending
-
-Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
----
- src/util/u_endian.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/util/u_endian.h b/src/util/u_endian.h
-index b9d563d..2d5eab9 100644
---- a/src/util/u_endian.h
-+++ b/src/util/u_endian.h
-@@ -27,7 +27,7 @@
- #ifndef U_ENDIAN_H
- #define U_ENDIAN_H
-
--#if defined(__GLIBC__) || defined(ANDROID)
-+#if defined(__linux__)
- #include <endian.h>
-
- #if __BYTE_ORDER == __LITTLE_ENDIAN
---
-2.1.4
-
diff --git a/meta-v1000/recipes-graphics/mesa/files/vulkan-mkdir.patch b/meta-v1000/recipes-graphics/mesa/files/vulkan-mkdir.patch
deleted file mode 100644
index 61bf6c65..00000000
--- a/meta-v1000/recipes-graphics/mesa/files/vulkan-mkdir.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 722695f595c97e02d184937ddf06da27974d2eca Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 12:32:05 +0500
-Subject: [PATCH] src/intel/Makefile.vulkan.am: create target directories when
- required
-
-In out-of-tree builds src/intel/vulkan won't exist, so always create
-it before writing into it.
-
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- src/intel/Makefile.vulkan.am | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/src/intel/Makefile.vulkan.am b/src/intel/Makefile.vulkan.am
-index 811faab..aad93cc 100644
---- a/src/intel/Makefile.vulkan.am
-+++ b/src/intel/Makefile.vulkan.am
-@@ -59,10 +59,12 @@ EXTRA_DIST += \
- vulkan/TODO
-
- vulkan/dev_icd.json : vulkan/anv_extensions.py vulkan/anv_icd.py
-+ $(MKDIR_GEN)
- $(AM_V_GEN)$(PYTHON2) $(srcdir)/vulkan/anv_icd.py \
- --lib-path="${abs_top_builddir}/${LIB_DIR}" --out $@
-
- vulkan/intel_icd.@host_cpu@.json : vulkan/anv_extensions.py vulkan/anv_icd.py
-+ $(MKDIR_GEN)
- $(AM_V_GEN)$(PYTHON2) $(srcdir)/vulkan/anv_icd.py \
- --lib-path="${libdir}" --out $@
-
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0001-Use-wayland-scanner-in-the-path.patch b/meta-v1000/recipes-graphics/mesa/mesa/0001-Use-wayland-scanner-in-the-path.patch
deleted file mode 100644
index eb6ff4ff..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0001-Use-wayland-scanner-in-the-path.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From c908f0c13ac81a3a52140f129a13b2bc997ff4ee Mon Sep 17 00:00:00 2001
-From: Jussi Kukkonen <jussi.kukkonen@intel.com>
-Date: Tue, 15 Nov 2016 15:20:49 +0200
-Subject: [PATCH] Simplify wayland-scanner lookup
-
-Don't use pkg-config to lookup the path of a binary that's in the path.
-
-Alternatively we'd have to prefix the path returned by pkg-config with
-PKG_CONFIG_SYSROOT_DIR.
-
-Upstream-Status: Pending
-Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
----
- configure.ac | 7 +------
- 1 file changed, 1 insertion(+), 6 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 2c7e636fac..d2b2350739 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -2174,12 +2174,7 @@ if test "x$with_platforms" != xauto; then
- with_egl_platforms=$with_platforms
- fi
-
--PKG_CHECK_MODULES([WAYLAND_SCANNER], [wayland-scanner],
-- WAYLAND_SCANNER=`$PKG_CONFIG --variable=wayland_scanner wayland-scanner`,
-- WAYLAND_SCANNER='')
--if test "x$WAYLAND_SCANNER" = x; then
-- AC_PATH_PROG([WAYLAND_SCANNER], [wayland-scanner], [:])
--fi
-+AC_PATH_PROG([WAYLAND_SCANNER], [wayland-scanner])
-
- # Do per-EGL platform setups and checks
- egl_platforms=`IFS=', '; echo $with_egl_platforms`
---
-2.13.0
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0001-configure.ac-for-llvm-config-to-report-correct-libdi.patch b/meta-v1000/recipes-graphics/mesa/mesa/0001-configure.ac-for-llvm-config-to-report-correct-libdi.patch
deleted file mode 100644
index 258df39c..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0001-configure.ac-for-llvm-config-to-report-correct-libdi.patch
+++ /dev/null
@@ -1,56 +0,0 @@
-From c12882ceedb17c9ee252abdf91a07685153fd73c Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 13:19:02 +0500
-Subject: [PATCH 1/2] configure.ac: for llvm-config to report correct libdir
-
-In cross compiling environments llvm-config messes up
-as it reports native environment because it is built
-for the host system. Hence if the target system and
-host system have different baselibs the llvm-config
-fails to find libraries for the target system
-appropriately.
-This now forces llvm-config to use the target specific
-libdir.
-
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- configure.ac | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 9c92b28..23d8fbf 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -998,7 +998,7 @@ llvm_set_environment_variables() {
- LLVM_VERSION=`$LLVM_CONFIG --version | cut -c1-5`
- LLVM_CPPFLAGS=`strip_unwanted_llvm_flags "$LLVM_CONFIG --cppflags"`
- LLVM_INCLUDEDIR=`$LLVM_CONFIG --includedir`
-- LLVM_LIBDIR=`$LLVM_CONFIG --libdir`
-+ LLVM_LIBDIR=${llvm_prefix}
-
- # We need to respect LLVM_CPPFLAGS when compiling LLVM headers.
- save_CFLAGS="$CFLAGS"
-@@ -2684,17 +2684,17 @@ if test "x$enable_llvm" = xyes; then
-
- if test $LLVM_VERSION_MAJOR -ge 4 -o $LLVM_VERSION_MAJOR -eq 3 -a $LLVM_VERSION_MINOR -ge 9; then
- if test "x$enable_llvm_shared_libs" = xyes; then
-- LLVM_LIBS="`$LLVM_CONFIG --link-shared --libs ${LLVM_COMPONENTS}`"
-+ LLVM_LIBS="`$LLVM_CONFIG --tgtlibdir ${llvm_prefix} --link-shared --libs ${LLVM_COMPONENTS}`"
- else
- dnl Invoking llvm-config with both -libs and --system-libs produces the
- dnl two separate lines - each for the set of libraries.
- dnl Call the program twice, effectively folding them into a single line.
-- LLVM_LIBS="`$LLVM_CONFIG --link-static --libs ${LLVM_COMPONENTS}`"
-+ LLVM_LIBS="`$LLVM_CONFIG --tgtlibdir ${llvm_prefix} --link-static --libs ${LLVM_COMPONENTS}`"
- dnl We need to link to llvm system libs when using static libs
- LLVM_LIBS="$LLVM_LIBS `$LLVM_CONFIG --link-static --system-libs`"
- fi
- else
-- LLVM_LIBS="`$LLVM_CONFIG --libs ${LLVM_COMPONENTS}`"
-+ LLVM_LIBS="`$LLVM_CONFIG --tgtlibdir ${llvm_prefix} --libs ${LLVM_COMPONENTS}`"
- if test "x$enable_llvm_shared_libs" = xyes; then
- detect_old_buggy_llvm
- else
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0001-fix-building-with-flex-2.6.2.patch b/meta-v1000/recipes-graphics/mesa/mesa/0001-fix-building-with-flex-2.6.2.patch
deleted file mode 100644
index 87e7a1d6..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0001-fix-building-with-flex-2.6.2.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From: Emil Velikov <emil.velikov@collabora.com>
-
-Flex version 2.6.2 does not expand (define) the yy version of some
-function, thus we fail to compile.
-
-Strictly speaking this might be a flex bug, although expanding the few
-instances is perfectly trivial and works with 2.6.2 and earlier versions
-of flex.
-
-Cc: "12.0 13.0" <mesa-stable@lists.freedesktop.org>
-Cc: Mike Lothian <mike@fireburn.co.uk>
-Repored-by: Mike Lothian <mike@fireburn.co.uk>
-Signed-off-by: Emil Velikov <emil.l.velikov@gmail.com>
- src/compiler/glsl/glsl_lexer.ll | 6 +++---
- src/mesa/program/program_lexer.l | 6 +++---
- src/compiler/glsl/glcpp/glcpp-lex.l | 2 +-
- 3 files changed, 7 insertions(+), 7 deletions(-)
-
-diff --git a/src/compiler/glsl/glsl_lexer.ll b/src/compiler/glsl/glsl_lexer.ll
-index d5e5d4c..e5492bf 100644
---- a/src/compiler/glsl/glsl_lexer.ll
-+++ b/src/compiler/glsl/glsl_lexer.ll
-@@ -627,12 +627,12 @@ classify_identifier(struct _mesa_glsl_parse_state *state, const char *name)
- void
- _mesa_glsl_lexer_ctor(struct _mesa_glsl_parse_state *state, const char *string)
- {
-- yylex_init_extra(state, & state->scanner);
-- yy_scan_string(string, state->scanner);
-+ _mesa_glsl_lexer_lex_init_extra(state, & state->scanner);
-+ _mesa_glsl_lexer__scan_string(string, state->scanner);
- }
-
- void
- _mesa_glsl_lexer_dtor(struct _mesa_glsl_parse_state *state)
- {
-- yylex_destroy(state->scanner);
-+ _mesa_glsl_lexer_lex_destroy(state->scanner);
- }
-
-diff --git a/src/mesa/program/program_lexer.l b/src/mesa/program/program_lexer.l
-index dee66cb..0196635 100644
---- a/src/mesa/program/program_lexer.l
-+++ b/src/mesa/program/program_lexer.l
-@@ -474,12 +474,12 @@ void
- _mesa_program_lexer_ctor(void **scanner, struct asm_parser_state *state,
- const char *string, size_t len)
- {
-- yylex_init_extra(state, scanner);
-- yy_scan_bytes(string, len, *scanner);
-+ _mesa_program_lexer_lex_init_extra(state, scanner);
-+ _mesa_program_lexer__scan_bytes(string, len, *scanner);
- }
-
- void
- _mesa_program_lexer_dtor(void *scanner)
- {
-- yylex_destroy(scanner);
-+ _mesa_program_lexer_lex_destroy(scanner);
- }
-
-diff --git a/src/compiler/glsl/glcpp/glcpp-lex.l b/src/compiler/glsl/glcpp/glcpp-lex.l
-index d09441a..41459cd 100644
---- a/src/compiler/glsl/glcpp/glcpp-lex.l
-+++ b/src/compiler/glsl/glcpp/glcpp-lex.l
-@@ -584,5 +584,5 @@ HEXADECIMAL_INTEGER 0[xX][0-9a-fA-F]+[uU]?
- void
- glcpp_lex_set_source_string(glcpp_parser_t *parser, const char *shader)
- {
-- yy_scan_string(shader, parser->scanner);
-+ glcpp__scan_string(shader, parser->scanner);
- }
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0001-winsys-svga-drm-Include-sys-types.h.patch b/meta-v1000/recipes-graphics/mesa/mesa/0001-winsys-svga-drm-Include-sys-types.h.patch
deleted file mode 100644
index 549b8671..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0001-winsys-svga-drm-Include-sys-types.h.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From d8750776404b1031d762966d0f551d13d2fe05a7 Mon Sep 17 00:00:00 2001
-From: Khem Raj <raj.khem@gmail.com>
-Date: Wed, 16 Aug 2017 18:58:20 -0700
-Subject: [PATCH] winsys/svga/drm: Include sys/types.h
-
-vmw_screen.h uses dev_t which is defines in sys/types.h
-this header is required to be included for getting dev_t
-definition. This issue happens on musl C library, it is hidden
-on glibc since sys/types.h is included through another
-system headers
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
----
-
-Upstream-Status: Submitted
-
- src/gallium/winsys/svga/drm/vmw_screen.h | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/src/gallium/winsys/svga/drm/vmw_screen.h b/src/gallium/winsys/svga/drm/vmw_screen.h
-index 0ef8e84..2eda97e 100644
---- a/src/gallium/winsys/svga/drm/vmw_screen.h
-+++ b/src/gallium/winsys/svga/drm/vmw_screen.h
-@@ -41,6 +41,7 @@
- #include "svga_winsys.h"
- #include "pipebuffer/pb_buffer_fenced.h"
- #include <os/os_thread.h>
-+#include <sys/types.h>
-
- #define VMW_GMR_POOL_SIZE (16*1024*1024)
- #define VMW_QUERY_POOL_SIZE (8192)
---
-2.14.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0002-configure.ac-fix-the-llvm-version-correctly.patch b/meta-v1000/recipes-graphics/mesa/mesa/0002-configure.ac-fix-the-llvm-version-correctly.patch
deleted file mode 100644
index 8f7d300a..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0002-configure.ac-fix-the-llvm-version-correctly.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From 94ecac67155c1c0d5114e339b0e9361e3af48d0a Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 13:22:58 +0500
-Subject: [PATCH 2/2] configure.ac: fix the llvm version correctly
-
-We do not use the LLVM patch version in OE so
-drop that from the version string as well.
-
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- configure.ac | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 23d8fbf..a4444d4 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -995,7 +995,7 @@ strip_unwanted_llvm_flags() {
-
- llvm_set_environment_variables() {
- if test "x$LLVM_CONFIG" != xno; then
-- LLVM_VERSION=`$LLVM_CONFIG --version | cut -c1-5`
-+ LLVM_VERSION=`$LLVM_CONFIG --version | cut -c1-3`
- LLVM_CPPFLAGS=`strip_unwanted_llvm_flags "$LLVM_CONFIG --cppflags"`
- LLVM_INCLUDEDIR=`$LLVM_CONFIG --includedir`
- LLVM_LIBDIR=${llvm_prefix}
-@@ -2639,7 +2639,7 @@ detect_old_buggy_llvm() {
- dnl ourselves.
- dnl (See https://llvm.org/bugs/show_bug.cgi?id=6823)
- dnl We can't use $LLVM_VERSION because it has 'svn' stripped out,
-- LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version|cut -c1-5`
-+ LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version|cut -c1-3`
- AS_IF([test -f "$LLVM_LIBDIR/lib$LLVM_SO_NAME.$IMP_LIB_EXT"], [llvm_have_one_so=yes])
-
- if test "x$llvm_have_one_so" = xyes; then
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0002-hardware-gloat.patch b/meta-v1000/recipes-graphics/mesa/mesa/0002-hardware-gloat.patch
deleted file mode 100644
index 0e014dcc..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0002-hardware-gloat.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From 00bcd599310dc7fce4fe336ffd85902429051a0c Mon Sep 17 00:00:00 2001
-From: Igor Gnatenko <i.gnatenko.brain@gmail.com>
-Date: Sun, 20 Mar 2016 13:27:04 +0100
-Subject: [PATCH 2/4] hardware gloat
-
-Upstream-Status: Inappropriate [not author]
-Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
----
- src/gallium/drivers/llvmpipe/lp_screen.c | 7 +++++++
- src/gallium/drivers/softpipe/sp_screen.c | 7 +++++++
- 2 files changed, 14 insertions(+)
-
-diff --git a/src/gallium/drivers/llvmpipe/lp_screen.c b/src/gallium/drivers/llvmpipe/lp_screen.c
-index 4f61de8..3b0ec77 100644
---- a/src/gallium/drivers/llvmpipe/lp_screen.c
-+++ b/src/gallium/drivers/llvmpipe/lp_screen.c
-@@ -411,6 +411,13 @@ llvmpipe_is_format_supported( struct pipe_screen *_screen,
- if (!format_desc)
- return FALSE;
-
-+ if ((bind & PIPE_BIND_RENDER_TARGET) &&
-+ format != PIPE_FORMAT_R9G9B9E5_FLOAT &&
-+ format != PIPE_FORMAT_R11G11B10_FLOAT &&
-+ util_format_is_float(format)) {
-+ return FALSE;
-+ }
-+
- assert(target == PIPE_BUFFER ||
- target == PIPE_TEXTURE_1D ||
- target == PIPE_TEXTURE_1D_ARRAY ||
-diff --git a/src/gallium/drivers/softpipe/sp_screen.c b/src/gallium/drivers/softpipe/sp_screen.c
-index 031602b..c279120 100644
---- a/src/gallium/drivers/softpipe/sp_screen.c
-+++ b/src/gallium/drivers/softpipe/sp_screen.c
-@@ -358,6 +358,13 @@ softpipe_is_format_supported( struct pipe_screen *screen,
- if (!format_desc)
- return FALSE;
-
-+ if ((bind & PIPE_BIND_RENDER_TARGET) &&
-+ format != PIPE_FORMAT_R9G9B9E5_FLOAT &&
-+ format != PIPE_FORMAT_R11G11B10_FLOAT &&
-+ util_format_is_float(format)) {
-+ return FALSE;
-+ }
-+
- if (sample_count > 1)
- return FALSE;
-
---
-2.7.4
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/0003-strip-llvm-ldflags.patch b/meta-v1000/recipes-graphics/mesa/mesa/0003-strip-llvm-ldflags.patch
deleted file mode 100644
index d840cb7c..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/0003-strip-llvm-ldflags.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-diff --git a/configure.ac b/configure.ac
-index 12dedd6..0776938 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -1012,6 +1012,19 @@ llvm_add_target() {
- fi
- }
-
-+# Strip all the native bits from ldflags
-+strip_unwanted_llvm_ldflags() {
-+ flags=""
-+ for flag in `$1`; do
-+ case "$flag" in
-+ *native*) ;;
-+ *) flags="$flags $flag"
-+ ;;
-+ esac
-+ done
-+ echo $flags
-+}
-+
- # Call this inside ` ` to get the return value.
- # $1 is the llvm-config command with arguments.
- strip_unwanted_llvm_flags() {
-@@ -2661,7 +2674,7 @@ dnl
- if test "x$enable_llvm" = xyes; then
- DEFINES="${DEFINES} -DHAVE_LLVM=0x0$LLVM_VERSION_INT -DMESA_LLVM_VERSION_PATCH=$LLVM_VERSION_PATCH"
-
-- LLVM_LDFLAGS=`$LLVM_CONFIG --ldflags`
-+ LLVM_LDFLAGS=`strip_unwanted_llvm_ldflags "$LLVM_CONFIG --ldflags"`
- LLVM_CFLAGS=$LLVM_CPPFLAGS # CPPFLAGS seem to be sufficient
- LLVM_CXXFLAGS=`strip_unwanted_llvm_flags "$LLVM_CONFIG --cxxflags"`
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/disable-asm-on-non-gcc.patch b/meta-v1000/recipes-graphics/mesa/mesa/disable-asm-on-non-gcc.patch
deleted file mode 100644
index d2d67558..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/disable-asm-on-non-gcc.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-Configure checks for compiler to be gcc and then it enables asm_offsets
-generation. see
-
-https://cgit.freedesktop.org/mesa/mesa/commit/?id=73c9b4b0e05fc66629ba250846948dc55c0e7a0d
-
-However, we missed the check when enabling this on cross compilation
-when architecture for both host and target is x86
-
-Fixes errors like
-./gen_matypes > matypes.h
-/bin/bash: ./gen_matypes: No such file or directory
-
--Khem
-
-Upstream-Status: Submitted
-
-Index: mesa-12.0.1/configure.ac
-===================================================================
---- mesa-12.0.1.orig/configure.ac
-+++ mesa-12.0.1/configure.ac
-@@ -732,7 +732,7 @@ test "x$enable_asm" = xno && AC_MSG_RESU
- if test "x$enable_asm" = xyes -a "x$cross_compiling" = xyes; then
- case "$host_cpu" in
- i?86 | x86_64 | amd64)
-- if test "x$host_cpu" != "x$target_cpu"; then
-+ if test "x$host_cpu" != "x$target_cpu" -o "x$acv_mesa_CLANG" = xyes; then
- enable_asm=no
- AC_MSG_RESULT([no, cross compiling])
- fi
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/llvm-config-version.patch b/meta-v1000/recipes-graphics/mesa/mesa/llvm-config-version.patch
deleted file mode 100644
index a65468cc..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/llvm-config-version.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From 53142302c0794006261d97af3ffcab043f502d10 Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 12:48:10 +0500
-Subject: [PATCH 1/2] When building llvm from git or svn it embeds the svn/git
- revision into internal version string
-
-$ /mnt/a/oe/build/tmp/work/corei7-64-bec-linux/mesa/2_17.1.5-r0/recipe-sysroot/usr/lib/llvm5.0/llvm-config-host --version
-5.0.0git-9a5c333388c
-
-We need to ignore everything after 5.0.0 which is what the cut cmd is doing
-
-Upstream-Status: Pending
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- configure.ac | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/configure.ac b/configure.ac
-index 8c95007..9c92b28 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -995,7 +995,7 @@ strip_unwanted_llvm_flags() {
-
- llvm_set_environment_variables() {
- if test "x$LLVM_CONFIG" != xno; then
-- LLVM_VERSION=`$LLVM_CONFIG --version | egrep -o '^[[0-9.]]+'`
-+ LLVM_VERSION=`$LLVM_CONFIG --version | cut -c1-5`
- LLVM_CPPFLAGS=`strip_unwanted_llvm_flags "$LLVM_CONFIG --cppflags"`
- LLVM_INCLUDEDIR=`$LLVM_CONFIG --includedir`
- LLVM_LIBDIR=`$LLVM_CONFIG --libdir`
-@@ -2639,7 +2639,7 @@ detect_old_buggy_llvm() {
- dnl ourselves.
- dnl (See https://llvm.org/bugs/show_bug.cgi?id=6823)
- dnl We can't use $LLVM_VERSION because it has 'svn' stripped out,
-- LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version`
-+ LLVM_SO_NAME=LLVM-`$LLVM_CONFIG --version|cut -c1-5`
- AS_IF([test -f "$LLVM_LIBDIR/lib$LLVM_SO_NAME.$IMP_LIB_EXT"], [llvm_have_one_so=yes])
-
- if test "x$llvm_have_one_so" = xyes; then
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/replace_glibc_check_with_linux.patch b/meta-v1000/recipes-graphics/mesa/mesa/replace_glibc_check_with_linux.patch
deleted file mode 100644
index 0280ee85..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/replace_glibc_check_with_linux.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-endianness check is OS wide and not specific to libc
-
-Signed-off-by: Khem Raj <raj.khem@gmail.com>
-Upstream-Status: Pending
-
-Signed-off-by: Jussi Kukkonen <jussi.kukkonen@intel.com>
----
- src/util/u_endian.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/src/util/u_endian.h b/src/util/u_endian.h
-index b9d563d..2d5eab9 100644
---- a/src/util/u_endian.h
-+++ b/src/util/u_endian.h
-@@ -27,7 +27,7 @@
- #ifndef U_ENDIAN_H
- #define U_ENDIAN_H
-
--#if defined(__GLIBC__) || defined(ANDROID)
-+#if defined(__linux__)
- #include <endian.h>
-
- #if __BYTE_ORDER == __LITTLE_ENDIAN
---
-2.1.4
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa/vulkan-mkdir.patch b/meta-v1000/recipes-graphics/mesa/mesa/vulkan-mkdir.patch
deleted file mode 100644
index 61bf6c65..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa/vulkan-mkdir.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-From 722695f595c97e02d184937ddf06da27974d2eca Mon Sep 17 00:00:00 2001
-From: Awais Belal <awais_belal@mentor.com>
-Date: Fri, 15 Dec 2017 12:32:05 +0500
-Subject: [PATCH] src/intel/Makefile.vulkan.am: create target directories when
- required
-
-In out-of-tree builds src/intel/vulkan won't exist, so always create
-it before writing into it.
-
-Signed-off-by: Ross Burton <ross.burton@intel.com>
-Signed-off-by: Awais Belal <awais_belal@mentor.com>
----
- src/intel/Makefile.vulkan.am | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/src/intel/Makefile.vulkan.am b/src/intel/Makefile.vulkan.am
-index 811faab..aad93cc 100644
---- a/src/intel/Makefile.vulkan.am
-+++ b/src/intel/Makefile.vulkan.am
-@@ -59,10 +59,12 @@ EXTRA_DIST += \
- vulkan/TODO
-
- vulkan/dev_icd.json : vulkan/anv_extensions.py vulkan/anv_icd.py
-+ $(MKDIR_GEN)
- $(AM_V_GEN)$(PYTHON2) $(srcdir)/vulkan/anv_icd.py \
- --lib-path="${abs_top_builddir}/${LIB_DIR}" --out $@
-
- vulkan/intel_icd.@host_cpu@.json : vulkan/anv_extensions.py vulkan/anv_icd.py
-+ $(MKDIR_GEN)
- $(AM_V_GEN)$(PYTHON2) $(srcdir)/vulkan/anv_icd.py \
- --lib-path="${libdir}" --out $@
-
---
-2.11.1
-
diff --git a/meta-v1000/recipes-graphics/mesa/mesa_17.3.%.bbappend b/meta-v1000/recipes-graphics/mesa/mesa_17.3.%.bbappend
new file mode 100644
index 00000000..e819463b
--- /dev/null
+++ b/meta-v1000/recipes-graphics/mesa/mesa_17.3.%.bbappend
@@ -0,0 +1,11 @@
+FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
+
+MESA_LLVM_RELEASE_v1000 = "6.0"
+
+SRC_URI_append_v1000 = " file://0001-st-omx-enc-Correct-the-timestamping.patch \
+ file://0002-st-omx-enc-Modularize-the-Encoding-task.patch \
+ file://0003-st-omx-enc-Support-framerate-conversion.patch \
+ file://0004-st-mesa-Reverting-patches-that-solved-perf-issues-wi.patch \
+ file://0005-Added-the-workaround-fix-for-the-opengl-CTS-failure..patch \
+ file://0006-st-omx-handle-invalid-timestamps-better-for-frc.patch \
+ file://0007-Revert-st-mesa-Reverting-patches-that-solved-perf-is.patch" \ No newline at end of file
diff --git a/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bb b/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bb
deleted file mode 100644
index fc52fbb9..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bb
+++ /dev/null
@@ -1,22 +0,0 @@
-require recipes-graphics/${BPN}/${BPN}.inc
-
-SRC_URI = "https://mesa.freedesktop.org/archive/mesa-${PV}.tar.xz \
- file://replace_glibc_check_with_linux.patch \
- file://disable-asm-on-non-gcc.patch \
- file://0001-Use-wayland-scanner-in-the-path.patch \
- file://0002-hardware-gloat.patch \
- file://vulkan-mkdir.patch \
- file://llvm-config-version.patch \
- file://0001-winsys-svga-drm-Include-sys-types.h.patch \
- "
-
-SRC_URI[md5sum] = "7f69ae6aa9c54a990c4d65e29b17f07d"
-SRC_URI[sha256sum] = "29a0a3a6c39990d491a1a58ed5c692e596b3bfc6c01d0b45e0b787116c50c6d9"
-
-#because we cannot rely on the fact that all apps will use pkgconfig,
-#make eglplatform.h independent of MESA_EGL_NO_X11_HEADER
-do_install_append() {
- if ${@bb.utils.contains('PACKAGECONFIG', 'egl', 'true', 'false', d)}; then
- sed -i -e 's/^#if defined(MESA_EGL_NO_X11_HEADERS)$/#if defined(MESA_EGL_NO_X11_HEADERS) || ${@bb.utils.contains('PACKAGECONFIG', 'x11', '0', '1', d)}/' ${D}${includedir}/EGL/eglplatform.h
- fi
-}
diff --git a/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bbappend b/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bbappend
deleted file mode 100644
index 7fc212cd..00000000
--- a/meta-v1000/recipes-graphics/mesa/mesa_17.3.0.bbappend
+++ /dev/null
@@ -1,67 +0,0 @@
-FILESEXTRAPATHS_prepend := "${THISDIR}/${PN}:"
-
-MESA_LLVM_RELEASE_v1000 = "6.0"
-
-DEPENDS_append_v1000 = " libvdpau libomxil"
-
-PACKAGECONFIG[va] = "--enable-va,--disable-va,libva"
-PACKAGECONFIG_append_v1000 = " xvmc gallium r600 gallium-llvm xa osmesa va"
-PACKAGECONFIG_remove_v1000 = "vulkan"
-
-LIBVA_PLATFORMS = "libva"
-LIBVA_PLATFORMS .= "${@bb.utils.contains('DISTRO_FEATURES', 'x11', ' libva-x11', '', d)}"
-LIBVA_PLATFORMS .= "${@bb.utils.contains('DISTRO_FEATURES', 'wayland', ' libva-wayland', '', d)}"
-LIBVA_PLATFORMS .= "${@bb.utils.contains('DISTRO_FEATURES', 'opengl', ' libva-gl', '', d)}"
-RDEPENDS_mesa-megadriver += "${@bb.utils.contains('PACKAGECONFIG', 'va', '${LIBVA_PLATFORMS}', '', d)}"
-
-SRC_URI_append_v1000 = " file://0001-fix-building-with-flex-2.6.2.patch \
- file://0001-configure.ac-for-llvm-config-to-report-correct-libdi.patch \
- file://0002-configure.ac-fix-the-llvm-version-correctly.patch \
- file://0003-strip-llvm-ldflags.patch \
- file://0001-st-omx-enc-Correct-the-timestamping.patch \
- file://0002-st-omx-enc-Modularize-the-Encoding-task.patch \
- file://0003-st-omx-enc-Support-framerate-conversion.patch \
- file://0004-st-mesa-Reverting-patches-that-solved-perf-issues-wi.patch \
- file://0005-Added-the-workaround-fix-for-the-opengl-CTS-failure..patch \
- file://0006-st-omx-handle-invalid-timestamps-better-for-frc.patch \
- file://0007-Revert-st-mesa-Reverting-patches-that-solved-perf-is.patch"
-
-EXTRA_OECONF_append_v1000 = " \
- --enable-vdpau \
- --enable-glx \
- --enable-texture-float \
- --enable-omx-bellagio \
- --with-omx-bellagio-libdir=${libdir}/bellagio \
- "
-
-# Package all the libXvMC gallium extensions together
-# they provide the shared lib libXvMCGallium and splitting
-# them up creates trouble in rpm packaging
-PACKAGES =+ "libxvmcgallium-${PN} libxvmcgallium-${PN}-dev"
-FILES_libxvmcgallium-${PN} = "${libdir}/libXvMC*${SOLIBS}"
-FILES_libxvmcgallium-${PN}-dev = "${libdir}/libXvMC*${SOLIBSDEV} \
- ${libdir}/libXvMC*.la"
-
-PACKAGES =+ "libvdpau-${PN} libvdpau-${PN}-dev"
-FILES_libvdpau-${PN} = "${libdir}/vdpau/libvdpau*${SOLIBS}"
-FILES_libvdpau-${PN}-dev = "${libdir}/vdpau/libvdpau*${SOLIBSDEV} \
- ${libdir}/vdpau/libvdpau*.la"
-FILES_${PN}-dbg += "${libdir}/vdpau/.debug"
-
-#
-# libomx files are non-versioned so we put *.so directly in the
-# main package as opposed to the -dev package.
-#
-PACKAGES =+ "libomx-${PN} libomx-${PN}-dev"
-FILES_libomx-${PN} = "${libdir}/bellagio/libomx_*.so"
-FILES_libomx-${PN}-dev = "${libdir}/bellagio/libomx_*.la"
-FILES_${PN}-dbg += "${libdir}/bellagio/.debug"
-
-# Set DRIDRIVERS with anonymous python so we can effectively
-# override the _append_x86-64 assignement from mesa.inc.
-python () {
- d.setVar("DRIDRIVERS", "swrast,radeon")
- d.setVar("GALLIUMDRIVERS", "swrast,r300,r600,radeonsi")
-}
-
-MESA_CRYPTO_v1000 = ""
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware.bb b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware.bb
new file mode 100644
index 00000000..16751161
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware.bb
@@ -0,0 +1,28 @@
+DESCRIPTION = "These binaries provide kernel support for newer AMD GPUs"
+SECTION = "kernel"
+LICENSE = "Firmware-amd"
+
+SRC_URI = "file://raven_me.bin file://raven_pfp.bin file://raven_vcn.bin \
+ file://raven_ce.bin file://raven_mec2.bin file://raven_rlc.bin \
+ file://raven_gpu_info.bin file://raven_mec.bin file://raven_sdma.bin \
+ file://LICENSE \
+ "
+
+LIC_FILES_CHKSUM = "file://LICENSE;md5=07b0c31777bd686d8e1609c6940b5e74"
+
+S = "${WORKDIR}"
+
+# Since, no binaries are generated for a specific target,
+# inherit allarch to simply populate prebuilt binaries
+inherit allarch
+
+do_compile() {
+ :
+}
+
+do_install() {
+ install -v -m 444 -D ${S}/LICENSE ${D}/lib/firmware/amdgpu/LICENSE
+ install -v -m 0644 ${S}/*.bin ${D}/lib/firmware/amdgpu
+}
+
+FILES_${PN} = "/lib/firmware/*"
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/LICENSE b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/LICENSE
new file mode 100644
index 00000000..fe3780b3
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/LICENSE
@@ -0,0 +1,51 @@
+Copyright (C) 2009-2014 Advanced Micro Devices, Inc. All rights reserved.
+
+REDISTRIBUTION: Permission is hereby granted, free of any license fees,
+to any person obtaining a copy of this microcode (the "Software"), to
+install, reproduce, copy and distribute copies, in binary form only, of
+the Software and to permit persons to whom the Software is provided to
+do the same, provided that the following conditions are met:
+
+No reverse engineering, decompilation, or disassembly of this Software
+is permitted.
+
+Redistributions must reproduce the above copyright notice, this
+permission notice, and the following disclaimers and notices in the
+Software documentation and/or other materials provided with the
+Software.
+
+DISCLAIMER: THE USE OF THE SOFTWARE IS AT YOUR SOLE RISK. THE SOFTWARE
+IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND AND COPYRIGHT
+HOLDER AND ITS LICENSORS EXPRESSLY DISCLAIM ALL WARRANTIES, EXPRESS AND
+IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+COPYRIGHT HOLDER AND ITS LICENSORS DO NOT WARRANT THAT THE SOFTWARE WILL
+MEET YOUR REQUIREMENTS, OR THAT THE OPERATION OF THE SOFTWARE WILL BE
+UNINTERRUPTED OR ERROR-FREE. THE ENTIRE RISK ASSOCIATED WITH THE USE OF
+THE SOFTWARE IS ASSUMED BY YOU. FURTHERMORE, COPYRIGHT HOLDER AND ITS
+LICENSORS DO NOT WARRANT OR MAKE ANY REPRESENTATIONS REGARDING THE USE
+OR THE RESULTS OF THE USE OF THE SOFTWARE IN TERMS OF ITS CORRECTNESS,
+ACCURACY, RELIABILITY, CURRENTNESS, OR OTHERWISE.
+
+DISCLAIMER: UNDER NO CIRCUMSTANCES INCLUDING NEGLIGENCE, SHALL COPYRIGHT
+HOLDER AND ITS LICENSORS OR ITS DIRECTORS, OFFICERS, EMPLOYEES OR AGENTS
+("AUTHORIZED REPRESENTATIVES") BE LIABLE FOR ANY INCIDENTAL, INDIRECT,
+SPECIAL OR CONSEQUENTIAL DAMAGES (INCLUDING DAMAGES FOR LOSS OF BUSINESS
+PROFITS, BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, AND THE
+LIKE) ARISING OUT OF THE USE, MISUSE OR INABILITY TO USE THE SOFTWARE,
+BREACH OR DEFAULT, INCLUDING THOSE ARISING FROM INFRINGEMENT OR ALLEGED
+INFRINGEMENT OF ANY PATENT, TRADEMARK, COPYRIGHT OR OTHER INTELLECTUAL
+PROPERTY RIGHT EVEN IF COPYRIGHT HOLDER AND ITS AUTHORIZED
+REPRESENTATIVES HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. IN
+NO EVENT SHALL COPYRIGHT HOLDER OR ITS AUTHORIZED REPRESENTATIVES TOTAL
+LIABILITY FOR ALL DAMAGES, LOSSES, AND CAUSES OF ACTION (WHETHER IN
+CONTRACT, TORT (INCLUDING NEGLIGENCE) OR OTHERWISE) EXCEED THE AMOUNT OF
+US$10.
+
+Notice: The Software is subject to United States export laws and
+regulations. You agree to comply with all domestic and international
+export laws and regulations that apply to the Software, including but
+not limited to the Export Administration Regulations administered by the
+U.S. Department of Commerce and International Traffic in Arm Regulations
+administered by the U.S. Department of State. These laws include
+restrictions on destinations, end users and end use.
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_ce.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_ce.bin
new file mode 100644
index 00000000..5f161738
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_ce.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_gpu_info.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_gpu_info.bin
new file mode 100644
index 00000000..03513749
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_gpu_info.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_me.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_me.bin
new file mode 100644
index 00000000..651291a1
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_me.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec.bin
new file mode 100644
index 00000000..34688665
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec2.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec2.bin
new file mode 100644
index 00000000..34688665
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_mec2.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_pfp.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_pfp.bin
new file mode 100644
index 00000000..c8cabd94
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_pfp.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_rlc.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_rlc.bin
new file mode 100644
index 00000000..b9813578
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_rlc.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_sdma.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_sdma.bin
new file mode 100644
index 00000000..52da6dc4
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_sdma.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_vcn.bin b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_vcn.bin
new file mode 100644
index 00000000..1569bb69
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux-firmware/amdgpu-firmware/raven_vcn.bin
Binary files differ
diff --git a/meta-v1000/recipes-kernel/linux/files/2912-drm-amdkfd-Reset-process-queues-if-it-VM_FAULTs.patch b/meta-v1000/recipes-kernel/linux/files/2912-drm-amdkfd-Reset-process-queues-if-it-VM_FAULTs.patch
new file mode 100644
index 00000000..0f60bb97
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2912-drm-amdkfd-Reset-process-queues-if-it-VM_FAULTs.patch
@@ -0,0 +1,31 @@
+From 0f800b17a82059b3acd8b8525e5fcf2d5f93dece Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 14:41:35 +0530
+Subject: [PATCH] drm/amdkfd: Reset process queues if it VM_FAULTs
+
+Currently, queues are preempt during process termination or if it
+VM_FAULTs. Instead reset the queues.
+
+BUG: SWDEV-110763
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 7407e67..e303bae 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1664,7 +1664,7 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+- ret = process_evict_queues(dqm, &pdd->qpd, reset);
++ ret = process_evict_queues(dqm, &pdd->qpd, true);
+ kfd_unref_process(p);
+
+ return ret;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2913-drm-amdkfd-Support-registering-third-pary-device-mem.patch b/meta-v1000/recipes-kernel/linux/files/2913-drm-amdkfd-Support-registering-third-pary-device-mem.patch
new file mode 100644
index 00000000..770d1972
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2913-drm-amdkfd-Support-registering-third-pary-device-mem.patch
@@ -0,0 +1,71 @@
+From 790324ed6f009faeaace9075e7251f8275673474 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 16:45:58 +0530
+Subject: [PATCH] drm/amdkfd: Support registering third pary device memory
+
+Register userptr that corresponds to third party device memory for GPU
+access. Instead of userptr BO a doorbell BO will be created using the
+physical address of the third party device memory. User space will treat
+the doorbell BO as a regular userptr BO
+
+BUG: KFD-351
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index bcd1c0e..b9752a1 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1176,7 +1176,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ struct kfd_dev *dev;
+ int idr_handle;
+ long err;
+- uint64_t offset;
++ uint64_t offset = args->mmap_offset;
++ uint32_t flags = args->flags;
++ struct vm_area_struct *vma;
+
+ if (args->size == 0)
+ return -EINVAL;
+@@ -1191,17 +1193,31 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
+ if (IS_ERR(pdd))
+ return PTR_ERR(pdd);
+
+- if (args->flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
++ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
++ /* Check if the userptr corresponds to another (or third-party)
++ * device local memory. If so treat is as a doorbell. User
++ * space will be oblivious of this and will use this doorbell
++ * BO as a regular userptr BO
++ */
++ vma = find_vma(current->mm, args->mmap_offset);
++ if (vma && (vma->vm_flags & VM_IO)) {
++ unsigned long pfn;
++
++ follow_pfn(vma, args->mmap_offset, &pfn);
++ flags |= KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL;
++ flags &= ~KFD_IOC_ALLOC_MEM_FLAGS_USERPTR;
++ offset = (pfn << PAGE_SHIFT);
++ }
++ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+ if (args->size != kfd_doorbell_process_slice(dev))
+ return -EINVAL;
+ offset = kfd_get_process_doorbells(dev, p);
+- } else
+- offset = args->mmap_offset;
++ }
+
+ err = dev->kfd2kgd->alloc_memory_of_gpu(
+ dev->kgd, args->va_addr, args->size,
+ pdd->vm, (struct kgd_mem **) &mem, &offset,
+- NULL, args->flags);
++ NULL, flags);
+
+ if (err != 0)
+ return err;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2914-drm-amdkfd-Address-kernel-warning.patch b/meta-v1000/recipes-kernel/linux/files/2914-drm-amdkfd-Address-kernel-warning.patch
new file mode 100644
index 00000000..ac0be020
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2914-drm-amdkfd-Address-kernel-warning.patch
@@ -0,0 +1,26 @@
+From 1bc7cd0a90d6d884f201b9038483eb76b016e526 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 16:50:02 +0530
+Subject: [PATCH] drm/amdkfd: Address kernel warning
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index b9752a1..437a133 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1307,7 +1307,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ void *mem;
+ struct kfd_dev *dev, *peer;
+ long err = 0;
+- int i, num_dev;
++ int i, num_dev = 0;
+ uint32_t *devices_arr = NULL;
+
+ dev = kfd_device_by_id(GET_GPU_ID(args->handle));
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2915-drm-amdkfd-Handle-MEM_VIOL-in-trap-handler.patch b/meta-v1000/recipes-kernel/linux/files/2915-drm-amdkfd-Handle-MEM_VIOL-in-trap-handler.patch
new file mode 100644
index 00000000..5579e4a8
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2915-drm-amdkfd-Handle-MEM_VIOL-in-trap-handler.patch
@@ -0,0 +1,66 @@
+From 4d673f793114de06e1847085ea68b7ce21dabadc Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 16:52:24 +0530
+Subject: [PATCH] drm/amdkfd: Handle MEM_VIOL in trap handler
+
+Generalize XNACK_ERROR handling to MEM_VIOL, which encompasses both
+XNACK error and DUA aperture violation. This is also an illegal state
+in which to issue further memory accesses in the trap which can lead
+to the TMA fetch placing the wavefront into FATAL_HALT state.
+
+Change-Id: I8af7f09b74c71ace878c4b82122492d677f7a456
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+index bd5053e..c7f8749 100644
+--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
++++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+@@ -117,7 +117,7 @@ var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
+ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
+ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
+ var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
+-var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
++var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
+
+ var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
+ var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
+@@ -239,14 +239,14 @@ L_SKIP_RESTORE:
+
+ // ********* Handle non-CWSR traps *******************
+ if (!EMU_RUN_HACK)
+- // If STATUS.XNACK_ERROR is asserted then we cannot fetch from the TMA.
++ // If STATUS.MEM_VIOL is asserted then we cannot fetch from the TMA.
+ // Instead, halt the wavefront and return from the trap.
+- s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK
+- s_cbranch_scc0 L_NO_XNACK_ERROR
++ s_and_b32 ttmp8, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
++ s_cbranch_scc0 L_NO_MEM_VIOL
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+ s_branch L_EXCP_CASE
+
+-L_NO_XNACK_ERROR:
++L_NO_MEM_VIOL:
+ /* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
+ s_getreg_b32 ttmp14,hwreg(HW_REG_SQ_SHADER_TMA_LO)
+ s_getreg_b32 ttmp15,hwreg(HW_REG_SQ_SHADER_TMA_HI)
+@@ -1136,7 +1136,7 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xb8f0f802, 0x89708670,
+ 0xb8f1f803, 0x8674ff71,
+ 0x00000400, 0xbf85001a,
+- 0x8674ff71, 0x10000000,
++ 0x8674ff71, 0x00000100,
+ 0xbf840003, 0x8770ff70,
+ 0x00002000, 0xbf820010,
+ 0xb8faf812, 0xb8fbf813,
+@@ -1378,4 +1378,3 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
+ 0xbf8a0000, 0x95806f6c,
+ 0xbf810000, 0x00000000,
+ };
+-
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2916-drm-amd-Add-mqd-as-parameter-in-kfd2kgd.hqd_destroy-.patch b/meta-v1000/recipes-kernel/linux/files/2916-drm-amd-Add-mqd-as-parameter-in-kfd2kgd.hqd_destroy-.patch
new file mode 100644
index 00000000..1ed7ff62
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2916-drm-amd-Add-mqd-as-parameter-in-kfd2kgd.hqd_destroy-.patch
@@ -0,0 +1,177 @@
+From 7231058e08b7f04ebbd276a4b2596eef456674c7 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 16:56:59 +0530
+Subject: [PATCH] drm/amd: Add mqd as parameter in kfd2kgd.hqd_destroy
+ interface
+
+Change-Id: I11522965287622bf577fca2aa5dee2aaf791a77f
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 4 ++--
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 4 ++--
+ drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 2 +-
+ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +-
+ drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 2 +-
+ drivers/gpu/drm/radeon/radeon_kfd.c | 4 ++--
+ 9 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+index 639344e..ad327a8 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+@@ -124,7 +124,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id);
+@@ -587,7 +587,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+ return false;
+ }
+
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+index 9efebdc..8ac7201 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+@@ -96,7 +96,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id);
+@@ -610,7 +610,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+ return false;
+ }
+
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+index 50ca56f..4bb5160 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+@@ -132,7 +132,7 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
+ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id);
+@@ -746,7 +746,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+ return false;
+ }
+
+-static int kgd_hqd_destroy(struct kgd_dev *kgd,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
+ enum kfd_preempt_type reset_type,
+ unsigned int utimeout, uint32_t pipe_id,
+ uint32_t queue_id)
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+index 34099e2..fb8c859 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+@@ -181,7 +181,7 @@ static void uninitialize(struct kernel_queue *kq)
+ {
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ kq->mqd->destroy_mqd(kq->mqd,
+- NULL,
++ kq->queue->mqd,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
+ kq->queue->pipe,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+index 25ddb73..5eeb2b3 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+@@ -325,7 +325,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+ {
+- return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
++ return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout,
+ pipe_id, queue_id);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+index 144e6cf..6091a50 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+@@ -267,7 +267,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ uint32_t queue_id)
+ {
+ return mm->dev->kfd2kgd->hqd_destroy
+- (mm->dev->kgd, type, timeout,
++ (mm->dev->kgd, mqd, type, timeout,
+ pipe_id, queue_id);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+index 894a5f2..8af7f21 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+@@ -288,7 +288,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
+ uint32_t queue_id)
+ {
+ return mm->dev->kfd2kgd->hqd_destroy
+- (mm->dev->kgd, type, timeout,
++ (mm->dev->kgd, mqd, type, timeout,
+ pipe_id, queue_id);
+ }
+
+diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+index 3e7f5d1..2e95783 100644
+--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
++++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+@@ -309,7 +309,7 @@ struct kfd2kgd_calls {
+ bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+- int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
++ int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+
+diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
+index dc88266..9480b04 100644
+--- a/drivers/gpu/drm/radeon/radeon_kfd.c
++++ b/drivers/gpu/drm/radeon/radeon_kfd.c
+@@ -121,7 +121,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
+ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
+ uint32_t pipe_id, uint32_t queue_id);
+
+-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id);
+ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
+@@ -852,7 +852,7 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
+ return false;
+ }
+
+-static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
++static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, uint32_t reset_type,
+ unsigned int timeout, uint32_t pipe_id,
+ uint32_t queue_id)
+ {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2917-drm-amdkfd-Fix-a-bug-that-process-cleanup-is-not-don.patch b/meta-v1000/recipes-kernel/linux/files/2917-drm-amdkfd-Fix-a-bug-that-process-cleanup-is-not-don.patch
new file mode 100644
index 00000000..b1012222
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2917-drm-amdkfd-Fix-a-bug-that-process-cleanup-is-not-don.patch
@@ -0,0 +1,46 @@
+From 9f4d7a52a0f44541c299b08e0f805ff533f69918 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 17:16:50 +0530
+Subject: [PATCH] drm/amdkfd: Fix a bug that process cleanup is not done
+ properly
+
+When destroying a queue fails in nocpsch, it may leave other queues
+not destroyed and process not unregistered. This commit fixes that.
+
+Change-Id: I9dcf7cb71242aa02e5b72c0381fec19a85f3dfe4
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index e303bae..7b25969a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1431,9 +1431,11 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+
+ /* Clear all user mode queues */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+- retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
+- if (retval)
+- goto out;
++ int ret;
++
++ ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
++ if (ret)
++ retval = ret;
+ }
+
+ /* Unregister process */
+@@ -1446,7 +1448,6 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
+ }
+ }
+
+-out:
+ mutex_unlock(&dqm->lock);
+ return retval;
+ }
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2918-drm-amdkfd-Fix-a-bug-that-vmid-is-released-before.patch b/meta-v1000/recipes-kernel/linux/files/2918-drm-amdkfd-Fix-a-bug-that-vmid-is-released-before.patch
new file mode 100644
index 00000000..250dd9d3
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2918-drm-amdkfd-Fix-a-bug-that-vmid-is-released-before.patch
@@ -0,0 +1,164 @@
+From e13027012535c00dca0b2605342d9fcfb7400518 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 17:49:06 +0530
+Subject: [PATCH] drm/amdkfd: Fix a bug that vmid is released before resetting
+ wavefronts
+
+When no HWS is used, vmid is always released after the last queue is
+destroyed rather than when the process terminates. With the current code,
+when a process terminates with all queues destroyed and somehow we need
+to reset wavefronts, dbgdev_wave_reset_wavefronts() will fail because
+no vmid is bound to this process any more.
+
+With this commit, we will reset the wavefronts, if needed, just before
+releasing the vmid. As part of the change, the wavefronts reset handling
+is moved to DQM from PQM, resulting in clearer logic.
+
+Change-Id: Ib72b7dc1d910045130928a8e20729b884a55b335
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 24 +++++++++++++++++++++-
+ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 11 +++++-----
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 1 -
+ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 15 --------------
+ 4 files changed, 29 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 7b25969a..42b85e6 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -426,12 +426,26 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
+ KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
+ KFD_HIQ_TIMEOUT,
+ q->pipe, q->queue);
++ if (retval == -ETIME)
++ qpd->reset_wavefronts = true;
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+ list_del(&q->list);
+- if (list_empty(&qpd->queues_list))
++ if (list_empty(&qpd->queues_list)) {
++ if (qpd->reset_wavefronts) {
++ pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
++ dqm->dev);
++ /* dbgdev_wave_reset_wavefronts has to be called before
++ * deallocate_vmid(), i.e. when vmid is still in use.
++ */
++ dbgdev_wave_reset_wavefronts(dqm->dev,
++ qpd->pqm->process);
++ qpd->reset_wavefronts = false;
++ }
++
+ deallocate_vmid(dqm, qpd, q);
++ }
+ if (q->properties.is_active)
+ dqm->queue_count--;
+
+@@ -1308,6 +1322,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
+ dqm->queue_count--;
+
+ retval = execute_queues_cpsch(dqm, false, false);
++ if (retval == -ETIME)
++ qpd->reset_wavefronts = true;
+
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+
+@@ -1534,6 +1550,12 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
+
+ retval = execute_queues_cpsch(dqm, true, true);
+
++ if (retval || qpd->reset_wavefronts) {
++ pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
++ dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
++ qpd->reset_wavefronts = false;
++ }
++
+ /* lastly, free mqd resources */
+ list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
+ mqd = dqm->ops.get_mqd_manager(dqm,
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+index 47a166a..2d05f90 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+@@ -554,6 +554,12 @@ struct qcm_process_device {
+ unsigned int vmid;
+ bool is_debug;
+ unsigned int evicted; /* eviction counter, 0=active */
++
++ /* This flag tells if we should reset all wavefronts on
++ * process termination
++ */
++ bool reset_wavefronts;
++
+ /*
+ * All the memory management data should be here too
+ */
+@@ -655,11 +661,6 @@ struct kfd_process_device {
+ /* GPUVM allocations storage */
+ struct idr alloc_idr;
+
+- /* This flag tells if we should reset all
+- * wavefronts on process termination
+- */
+- bool reset_wavefronts;
+-
+ /* Flag used to tell the pdd has dequeued from the dqm.
+ * This is used to prevent dev->dqm->ops.process_termination() from
+ * being called twice when it is already called in IOMMU callback
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index fb1e9e2..c85122f 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -672,7 +672,6 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
+ pdd->qpd.dqm = dev->dqm;
+ pdd->qpd.pqm = &p->pqm;
+ pdd->qpd.evicted = 0;
+- pdd->reset_wavefronts = false;
+ pdd->process = p;
+ pdd->bound = PDD_UNBOUND;
+ pdd->already_dequeued = false;
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+index 76284cd..4d47d48 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+@@ -66,7 +66,6 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
+ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+ {
+ struct kfd_dev *dev = pdd->dev;
+- struct kfd_process *p = pdd->process;
+ int retval;
+
+ if (pdd->already_dequeued)
+@@ -74,16 +73,6 @@ void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
+
+ retval = dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
+ pdd->already_dequeued = true;
+- /* Checking pdd->reset_wavefronts may not be needed, because
+- * if reset_wavefronts was set to true before, which means unmapping
+- * failed, process_termination should fail too until we reset
+- * wavefronts. Now we put the check there to be safe.
+- */
+- if (retval || pdd->reset_wavefronts) {
+- pr_warn("Resetting wave fronts on dev %p\n", dev);
+- dbgdev_wave_reset_wavefronts(dev, p);
+- pdd->reset_wavefronts = false;
+- }
+ }
+
+ void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
+@@ -337,10 +326,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
+ kfree(pqn->q->properties.cu_mask);
+ pqn->q->properties.cu_mask = NULL;
+ retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
+- if (retval != 0) {
+- if (retval == -ETIME)
+- pdd->reset_wavefronts = true;
+- }
+ uninit_queue(pqn->q);
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2919-drm-amd-Implement-parallel-memory-mapping-on-mGPUs.patch b/meta-v1000/recipes-kernel/linux/files/2919-drm-amd-Implement-parallel-memory-mapping-on-mGPUs.patch
new file mode 100644
index 00000000..a667d515
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2919-drm-amd-Implement-parallel-memory-mapping-on-mGPUs.patch
@@ -0,0 +1,101 @@
+From e5a754c6570bc2e626af6df1ac0f11a91ca43757 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 18:40:45 +0530
+Subject: [PATCH] drm/amd: Implement parallel memory mapping on mGPUs
+
+Alter the KFD-KGD interface to optimize multi-GPU memory mappings to
+work concurrently instead of sequentially. Return the fences
+during the process, wait for all fences after the mappings are done.
+The fences are stored in the associated kgd_mem object.
+
+This change also enables interruptible waiting with proper signal
+handling
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 9 ++++++---
+ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 +++++++
+ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 7 +++++++
+ 3 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+index 0d98b43..2beb61e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -871,8 +871,11 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
+ {
+ int ret = 0;
+
+- if (wait)
++ if (wait) {
+ ret = amdgpu_sync_wait(ctx->sync, intr);
++ if (ret)
++ return ret;
++ }
+
+ if (ctx->reserved)
+ ttm_eu_backoff_reservation(&ctx->ticket, &ctx->list);
+@@ -1220,7 +1223,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
+ entry, bo_size);
+ }
+
+- ret = unreserve_bo_and_vms(&ctx, false, false);
++ ret = unreserve_bo_and_vms(&ctx, false, true);
+
+ /* Free the sync object */
+ amdgpu_sync_free(&mem->sync);
+@@ -1349,7 +1352,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
+ amdgpu_bo_fence(bo,
+ &kfd_vm->process_info->eviction_fence->base,
+ true);
+- ret = unreserve_bo_and_vms(&ctx, false, false);
++ ret = unreserve_bo_and_vms(&ctx, false, true);
+
+ mutex_unlock(&mem->process_info->lock);
+ mutex_unlock(&mem->lock);
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+index 437a133..3dec240 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+@@ -1402,6 +1402,12 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
+ kfd_flush_tlb(dev, p->pasid);
+ }
+
++ err = dev->kfd2kgd->sync_memory(dev->kgd, (struct kgd_mem *) mem, true);
++ if (err) {
++ pr_debug("Sync memory failed, wait interrupted by user signal\n");
++ goto sync_memory_failed;
++ }
++
+ if (args->device_ids_array_size > 0 && devices_arr)
+ kfree(devices_arr);
+
+@@ -1512,6 +1518,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
+ up_write(&p->lock);
+ get_mem_obj_from_handle_failed:
+ copy_from_user_failed:
++sync_memory_failed:
+ kfree(devices_arr);
+ return err;
+ }
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+index c85122f..78a5f7a 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+@@ -121,6 +121,13 @@ static int kfd_process_alloc_gpuvm(struct kfd_process *p,
+
+ kfd_flush_tlb(kdev, p->pasid);
+
++ err = kdev->kfd2kgd->sync_memory(kdev->kgd, (struct kgd_mem *) mem,
++ true);
++ if (err) {
++ pr_debug("Sync memory failed, wait interrupted by user signal\n");
++ goto sync_memory_failed;
++ }
++
+ /* Create an obj handle so kfd_process_device_remove_obj_handle
+ * will take care of the bo removal when the process finishes.
+ * We do not need to take p->lock, because the process is just
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2920-drm-amdkfd-gfx9-preempt-queues-after-VM_FAULT.patch b/meta-v1000/recipes-kernel/linux/files/2920-drm-amdkfd-gfx9-preempt-queues-after-VM_FAULT.patch
new file mode 100644
index 00000000..9ec3d07e
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2920-drm-amdkfd-gfx9-preempt-queues-after-VM_FAULT.patch
@@ -0,0 +1,31 @@
+From eb7218fc1b993fa527bb9bbde5aeffeefb371d34 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 18:56:59 +0530
+Subject: [PATCH] drm/amdkfd: gfx9 preempt queues after VM_FAULT
+
+Context-save after a VM_FAULT is required for the debugger.
+
+Change-Id: I4a07448143c161f8ab48c41635cceaa4e13efb94
+Signed-off-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 42b85e6..06328d7 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1687,7 +1687,7 @@ int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+- ret = process_evict_queues(dqm, &pdd->qpd, true);
++ ret = process_evict_queues(dqm, &pdd->qpd, reset);
+ kfd_unref_process(p);
+
+ return ret;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2921-drm-amd-pp-Read-the-maximum-clock-frequency-from.patch b/meta-v1000/recipes-kernel/linux/files/2921-drm-amd-pp-Read-the-maximum-clock-frequency-from.patch
new file mode 100644
index 00000000..6eead75b
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2921-drm-amd-pp-Read-the-maximum-clock-frequency-from.patch
@@ -0,0 +1,36 @@
+From fb6470ce380b8d6efc8a457b65a6dffd10691bfa Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:21:43 +0530
+Subject: [PATCH] drm/amd/pp: Read the maximum clock frequency from pplib
+ using pp_dpm_get_sclk() and pp_dpm_get_mclk() incase of failure when using
+ phm_get_max_high_clocks()
+
+Change-Id: I40368191a98165b6f93ed35f3ff46a41f065b174
+Signed-off-by: Raveendra Talabattula <raveendra.talabattula@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index f73e80c..ae9248e 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1474,6 +1474,11 @@ int amd_powerplay_get_display_mode_validation_clocks(void *handle,
+ ret = phm_get_max_high_clocks(hwmgr, clocks);
+
+ mutex_unlock(&pp_handle->pp_lock);
+- return ret;
+-}
++
++ if (ret) {
++ clocks->memory_max_clock = pp_dpm_get_mclk(handle, false);
++ clocks->engine_max_clock = pp_dpm_get_sclk(handle, false);
++ }
+
++ return 0;
++}
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2922-AMD-XGBE-support.patch b/meta-v1000/recipes-kernel/linux/files/2922-AMD-XGBE-support.patch
new file mode 100644
index 00000000..c93fb379
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2922-AMD-XGBE-support.patch
@@ -0,0 +1,11852 @@
+From 86589cfe679a248210ed5026ab6aaf80d6758e5b Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:24:17 +0530
+Subject: [PATCH] AMD-XGBE support
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/Kconfig | 10 +-
+ drivers/net/ethernet/amd/xgbe/Makefile | 5 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 387 +++-
+ drivers/net/ethernet/amd/xgbe/xgbe-dcb.c | 0
+ drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 154 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 0
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 1146 ++++++---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 351 ++-
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 87 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-i2c.c | 492 ++++
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 705 ++----
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 1130 +++++----
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 542 +++++
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c | 845 +++++++
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 3084 +++++++++++++++++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe-platform.c | 642 +++++
+ drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 2 +-
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 388 +++-
+ drivers/net/phy/phy.c | 3 +-
+ include/linux/netdevice.h | 2 +
+ include/linux/phy.h | 1 +
+ 21 files changed, 8482 insertions(+), 1494 deletions(-)
+ mode change 100644 => 100755 drivers/net/ethernet/amd/Kconfig
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/Makefile
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-common.h
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+ create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-main.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+ create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+ create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+ create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+ create mode 100644 drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe.h
+ mode change 100644 => 100755 drivers/net/phy/phy.c
+ mode change 100644 => 100755 include/linux/netdevice.h
+ mode change 100644 => 100755 include/linux/phy.h
+
+diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
+old mode 100644
+new mode 100755
+index 0038709..0f1db5c
+--- a/drivers/net/ethernet/amd/Kconfig
++++ b/drivers/net/ethernet/amd/Kconfig
+@@ -173,10 +173,12 @@ config SUNLANCE
+
+ config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+- depends on ((OF_NET && OF_ADDRESS) || ACPI) && HAS_IOMEM && HAS_DMA
+- depends on ARM64 || COMPILE_TEST
++ depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
++ depends on X86 || ARM64 || COMPILE_TEST
+ select BITREVERSE
+ select CRC32
++ select PHYLIB
++ select AMD_XGBE_HAVE_ECC if X86
+ select PTP_1588_CLOCK
+ ---help---
+ This driver supports the AMD 10GbE Ethernet device found on an
+@@ -195,4 +197,8 @@ config AMD_XGBE_DCB
+
+ If unsure, say N.
+
++config AMD_XGBE_HAVE_ECC
++ bool
++ default n
++
+ endif # NET_VENDOR_AMD
+diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
+old mode 100644
+new mode 100755
+index 171a7e6..0dea8f5
+--- a/drivers/net/ethernet/amd/xgbe/Makefile
++++ b/drivers/net/ethernet/amd/xgbe/Makefile
+@@ -2,7 +2,10 @@ obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+ amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+- xgbe-ptp.o
++ xgbe-ptp.o \
++ xgbe-i2c.o xgbe-phy-v1.o xgbe-phy-v2.o \
++ xgbe-platform.o
+
++amd-xgbe-$(CONFIG_PCI) += xgbe-pci.o
+ amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
+ amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+old mode 100644
+new mode 100755
+index 1592e1c..86f1626
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -159,6 +159,8 @@
+ #define DMA_ISR_MACIS_WIDTH 1
+ #define DMA_ISR_MTLIS_INDEX 16
+ #define DMA_ISR_MTLIS_WIDTH 1
++#define DMA_MR_INTM_INDEX 12
++#define DMA_MR_INTM_WIDTH 2
+ #define DMA_MR_SWR_INDEX 0
+ #define DMA_MR_SWR_WIDTH 1
+ #define DMA_SBMR_EAME_INDEX 11
+@@ -309,6 +311,11 @@
+ #define MAC_HWF0R 0x011c
+ #define MAC_HWF1R 0x0120
+ #define MAC_HWF2R 0x0124
++#define MAC_MDIOSCAR 0x0200
++#define MAC_MDIOSCCDR 0x0204
++#define MAC_MDIOISR 0x0214
++#define MAC_MDIOIER 0x0218
++#define MAC_MDIOCL22R 0x0220
+ #define MAC_GPIOCR 0x0278
+ #define MAC_GPIOSR 0x027c
+ #define MAC_MACA0HR 0x0300
+@@ -409,10 +416,34 @@
+ #define MAC_ISR_MMCTXIS_WIDTH 1
+ #define MAC_ISR_PMTIS_INDEX 4
+ #define MAC_ISR_PMTIS_WIDTH 1
++#define MAC_ISR_SMI_INDEX 1
++#define MAC_ISR_SMI_WIDTH 1
+ #define MAC_ISR_TSIS_INDEX 12
+ #define MAC_ISR_TSIS_WIDTH 1
+ #define MAC_MACA1HR_AE_INDEX 31
+ #define MAC_MACA1HR_AE_WIDTH 1
++#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
++#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
++#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
++#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
++#define MAC_MDIOSCAR_DA_INDEX 21
++#define MAC_MDIOSCAR_DA_WIDTH 5
++#define MAC_MDIOSCAR_PA_INDEX 16
++#define MAC_MDIOSCAR_PA_WIDTH 5
++#define MAC_MDIOSCAR_RA_INDEX 0
++#define MAC_MDIOSCAR_RA_WIDTH 16
++#define MAC_MDIOSCAR_REG_INDEX 0
++#define MAC_MDIOSCAR_REG_WIDTH 21
++#define MAC_MDIOSCCDR_BUSY_INDEX 22
++#define MAC_MDIOSCCDR_BUSY_WIDTH 1
++#define MAC_MDIOSCCDR_CMD_INDEX 16
++#define MAC_MDIOSCCDR_CMD_WIDTH 2
++#define MAC_MDIOSCCDR_CR_INDEX 19
++#define MAC_MDIOSCCDR_CR_WIDTH 3
++#define MAC_MDIOSCCDR_DATA_INDEX 0
++#define MAC_MDIOSCCDR_DATA_WIDTH 16
++#define MAC_MDIOSCCDR_SADDR_INDEX 18
++#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+ #define MAC_PFR_HMC_INDEX 2
+ #define MAC_PFR_HMC_WIDTH 1
+ #define MAC_PFR_HPF_INDEX 10
+@@ -790,6 +821,10 @@
+ #define MTL_Q_RQOMR_RSF_WIDTH 1
+ #define MTL_Q_RQOMR_RTC_INDEX 0
+ #define MTL_Q_RQOMR_RTC_WIDTH 2
++#define MTL_Q_TQDR_TRCSTS_INDEX 1
++#define MTL_Q_TQDR_TRCSTS_WIDTH 2
++#define MTL_Q_TQDR_TXQSTS_INDEX 4
++#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+ #define MTL_Q_TQOMR_FTQ_INDEX 0
+ #define MTL_Q_TQOMR_FTQ_WIDTH 1
+ #define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+@@ -852,14 +887,18 @@
+ #define MTL_TSA_SP 0x00
+ #define MTL_TSA_ETS 0x02
+
+-/* PCS MMD select register offset
+- * The MMD select register is used for accessing PCS registers
+- * when the underlying APB3 interface is using indirect addressing.
+- * Indirect addressing requires accessing registers in two phases,
+- * an address phase and a data phase. The address phases requires
+- * writing an address selection value to the MMD select regiesters.
+- */
+-#define PCS_MMD_SELECT 0xff
++/* PCS register offsets */
++#define PCS_V1_WINDOW_SELECT 0x03fc
++#define PCS_V2_WINDOW_DEF 0x9060
++#define PCS_V2_WINDOW_SELECT 0x9064
++#define PCS_V2_RV_WINDOW_DEF 0x1060
++#define PCS_V2_RV_WINDOW_SELECT 0x1064
++
++/* PCS register entry bit positions and sizes */
++#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
++#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
++#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
++#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+ /* SerDes integration register offsets */
+ #define SIR0_KR_RT_1 0x002c
+@@ -903,6 +942,198 @@
+ #define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+ #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
++/* MAC Control register offsets */
++#define XP_PROP_0 0x0000
++#define XP_PROP_1 0x0004
++#define XP_PROP_2 0x0008
++#define XP_PROP_3 0x000c
++#define XP_PROP_4 0x0010
++#define XP_PROP_5 0x0014
++#define XP_MAC_ADDR_LO 0x0020
++#define XP_MAC_ADDR_HI 0x0024
++#define XP_ECC_ISR 0x0030
++#define XP_ECC_IER 0x0034
++#define XP_ECC_CNT0 0x003c
++#define XP_ECC_CNT1 0x0040
++#define XP_DRIVER_INT_REQ 0x0060
++#define XP_DRIVER_INT_RO 0x0064
++#define XP_DRIVER_SCRATCH_0 0x0068
++#define XP_DRIVER_SCRATCH_1 0x006c
++#define XP_INT_EN 0x0078
++#define XP_I2C_MUTEX 0x0080
++#define XP_MDIO_MUTEX 0x0084
++
++/* MAC Control register entry bit positions and sizes */
++#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
++#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
++#define XP_DRIVER_INT_RO_STATUS_INDEX 0
++#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
++#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
++#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
++#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
++#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
++#define XP_ECC_CNT0_RX_DED_INDEX 24
++#define XP_ECC_CNT0_RX_DED_WIDTH 8
++#define XP_ECC_CNT0_RX_SEC_INDEX 16
++#define XP_ECC_CNT0_RX_SEC_WIDTH 8
++#define XP_ECC_CNT0_TX_DED_INDEX 8
++#define XP_ECC_CNT0_TX_DED_WIDTH 8
++#define XP_ECC_CNT0_TX_SEC_INDEX 0
++#define XP_ECC_CNT0_TX_SEC_WIDTH 8
++#define XP_ECC_CNT1_DESC_DED_INDEX 8
++#define XP_ECC_CNT1_DESC_DED_WIDTH 8
++#define XP_ECC_CNT1_DESC_SEC_INDEX 0
++#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
++#define XP_ECC_IER_DESC_DED_INDEX 0
++#define XP_ECC_IER_DESC_DED_WIDTH 1
++#define XP_ECC_IER_DESC_SEC_INDEX 1
++#define XP_ECC_IER_DESC_SEC_WIDTH 1
++#define XP_ECC_IER_RX_DED_INDEX 2
++#define XP_ECC_IER_RX_DED_WIDTH 1
++#define XP_ECC_IER_RX_SEC_INDEX 3
++#define XP_ECC_IER_RX_SEC_WIDTH 1
++#define XP_ECC_IER_TX_DED_INDEX 4
++#define XP_ECC_IER_TX_DED_WIDTH 1
++#define XP_ECC_IER_TX_SEC_INDEX 5
++#define XP_ECC_IER_TX_SEC_WIDTH 1
++#define XP_ECC_ISR_DESC_DED_INDEX 0
++#define XP_ECC_ISR_DESC_DED_WIDTH 1
++#define XP_ECC_ISR_DESC_SEC_INDEX 1
++#define XP_ECC_ISR_DESC_SEC_WIDTH 1
++#define XP_ECC_ISR_RX_DED_INDEX 2
++#define XP_ECC_ISR_RX_DED_WIDTH 1
++#define XP_ECC_ISR_RX_SEC_INDEX 3
++#define XP_ECC_ISR_RX_SEC_WIDTH 1
++#define XP_ECC_ISR_TX_DED_INDEX 4
++#define XP_ECC_ISR_TX_DED_WIDTH 1
++#define XP_ECC_ISR_TX_SEC_INDEX 5
++#define XP_ECC_ISR_TX_SEC_WIDTH 1
++#define XP_I2C_MUTEX_BUSY_INDEX 31
++#define XP_I2C_MUTEX_BUSY_WIDTH 1
++#define XP_I2C_MUTEX_ID_INDEX 29
++#define XP_I2C_MUTEX_ID_WIDTH 2
++#define XP_I2C_MUTEX_ACTIVE_INDEX 0
++#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
++#define XP_MAC_ADDR_HI_VALID_INDEX 31
++#define XP_MAC_ADDR_HI_VALID_WIDTH 1
++#define XP_PROP_0_CONN_TYPE_INDEX 28
++#define XP_PROP_0_CONN_TYPE_WIDTH 3
++#define XP_PROP_0_MDIO_ADDR_INDEX 16
++#define XP_PROP_0_MDIO_ADDR_WIDTH 5
++#define XP_PROP_0_PORT_ID_INDEX 0
++#define XP_PROP_0_PORT_ID_WIDTH 8
++#define XP_PROP_0_PORT_MODE_INDEX 8
++#define XP_PROP_0_PORT_MODE_WIDTH 4
++#define XP_PROP_0_PORT_SPEEDS_INDEX 23
++#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
++#define XP_PROP_1_MAX_RX_DMA_INDEX 24
++#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
++#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
++#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
++#define XP_PROP_1_MAX_TX_DMA_INDEX 16
++#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
++#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
++#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
++#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
++#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
++#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
++#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
++#define XP_PROP_3_GPIO_MASK_INDEX 28
++#define XP_PROP_3_GPIO_MASK_WIDTH 4
++#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
++#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
++#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
++#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
++#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
++#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
++#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
++#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
++#define XP_PROP_3_GPIO_ADDR_INDEX 8
++#define XP_PROP_3_GPIO_ADDR_WIDTH 3
++#define XP_PROP_3_MDIO_RESET_INDEX 0
++#define XP_PROP_3_MDIO_RESET_WIDTH 2
++#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
++#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
++#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
++#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
++#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
++#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
++#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
++#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
++#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
++#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
++#define XP_PROP_4_MUX_CHAN_INDEX 4
++#define XP_PROP_4_MUX_CHAN_WIDTH 3
++#define XP_PROP_4_REDRV_ADDR_INDEX 16
++#define XP_PROP_4_REDRV_ADDR_WIDTH 7
++#define XP_PROP_4_REDRV_IF_INDEX 23
++#define XP_PROP_4_REDRV_IF_WIDTH 1
++#define XP_PROP_4_REDRV_LANE_INDEX 24
++#define XP_PROP_4_REDRV_LANE_WIDTH 3
++#define XP_PROP_4_REDRV_MODEL_INDEX 28
++#define XP_PROP_4_REDRV_MODEL_WIDTH 3
++#define XP_PROP_4_REDRV_PRESENT_INDEX 31
++#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
++
++/* I2C Control register offsets */
++#define IC_CON 0x0000
++#define IC_TAR 0x0004
++#define IC_DATA_CMD 0x0010
++#define IC_INTR_STAT 0x002c
++#define IC_INTR_MASK 0x0030
++#define IC_RAW_INTR_STAT 0x0034
++#define IC_CLR_INTR 0x0040
++#define IC_CLR_TX_ABRT 0x0054
++#define IC_CLR_STOP_DET 0x0060
++#define IC_ENABLE 0x006c
++#define IC_TXFLR 0x0074
++#define IC_RXFLR 0x0078
++#define IC_TX_ABRT_SOURCE 0x0080
++#define IC_ENABLE_STATUS 0x009c
++#define IC_COMP_PARAM_1 0x00f4
++
++/* I2C Control register entry bit positions and sizes */
++#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
++#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
++#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
++#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
++#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
++#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
++#define IC_CON_MASTER_MODE_INDEX 0
++#define IC_CON_MASTER_MODE_WIDTH 1
++#define IC_CON_RESTART_EN_INDEX 5
++#define IC_CON_RESTART_EN_WIDTH 1
++#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
++#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
++#define IC_CON_SLAVE_DISABLE_INDEX 6
++#define IC_CON_SLAVE_DISABLE_WIDTH 1
++#define IC_CON_SPEED_INDEX 1
++#define IC_CON_SPEED_WIDTH 2
++#define IC_DATA_CMD_CMD_INDEX 8
++#define IC_DATA_CMD_CMD_WIDTH 1
++#define IC_DATA_CMD_STOP_INDEX 9
++#define IC_DATA_CMD_STOP_WIDTH 1
++#define IC_ENABLE_ABORT_INDEX 1
++#define IC_ENABLE_ABORT_WIDTH 1
++#define IC_ENABLE_EN_INDEX 0
++#define IC_ENABLE_EN_WIDTH 1
++#define IC_ENABLE_STATUS_EN_INDEX 0
++#define IC_ENABLE_STATUS_EN_WIDTH 1
++#define IC_INTR_MASK_TX_EMPTY_INDEX 4
++#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
++#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
++#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
++#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
++#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
++#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
++#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
++#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
++#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
++
++/* I2C Control register value */
++#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
++#define IC_TX_ABRT_ARB_LOST 0x1000
++
+ /* Descriptor/Packet entry bit positions and sizes */
+ #define RX_PACKET_ERRORS_CRC_INDEX 2
+ #define RX_PACKET_ERRORS_CRC_WIDTH 1
+@@ -1029,6 +1260,10 @@
+ #define MDIO_PMA_10GBR_FECCTRL 0x00ab
+ #endif
+
++#ifndef MDIO_PCS_DIG_CTRL
++#define MDIO_PCS_DIG_CTRL 0x8000
++#endif
++
+ #ifndef MDIO_AN_XNP
+ #define MDIO_AN_XNP 0x0016
+ #endif
+@@ -1049,11 +1284,48 @@
+ #define MDIO_AN_INT 0x8002
+ #endif
+
++#ifndef MDIO_VEND2_AN_ADVERTISE
++#define MDIO_VEND2_AN_ADVERTISE 0x0004
++#endif
++
++#ifndef MDIO_VEND2_AN_LP_ABILITY
++#define MDIO_VEND2_AN_LP_ABILITY 0x0005
++#endif
++
++#ifndef MDIO_VEND2_AN_CTRL
++#define MDIO_VEND2_AN_CTRL 0x8001
++#endif
++
++#ifndef MDIO_VEND2_AN_STAT
++#define MDIO_VEND2_AN_STAT 0x8002
++#endif
++
+ #ifndef MDIO_CTRL1_SPEED1G
+ #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+ #endif
+
++#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
++#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_AN_RESTART
++#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_SS6
++#define MDIO_VEND2_CTRL1_SS6 BIT(6)
++#endif
++
++#ifndef MDIO_VEND2_CTRL1_SS13
++#define MDIO_VEND2_CTRL1_SS13 BIT(13)
++#endif
++
+ /* MDIO mask values */
++#define XGBE_AN_CL73_INT_CMPLT BIT(0)
++#define XGBE_AN_CL73_INC_LINK BIT(1)
++#define XGBE_AN_CL73_PG_RCV BIT(2)
++#define XGBE_AN_CL73_INT_MASK 0x07
++
+ #define XGBE_XNP_MCF_NULL_MESSAGE 0x001
+ #define XGBE_XNP_ACK_PROCESSED BIT(12)
+ #define XGBE_XNP_MP_FORMATTED BIT(13)
+@@ -1062,6 +1334,19 @@
+ #define XGBE_KR_TRAINING_START BIT(0)
+ #define XGBE_KR_TRAINING_ENABLE BIT(1)
+
++#define XGBE_PCS_CL37_BP BIT(12)
++
++#define XGBE_AN_CL37_INT_CMPLT BIT(0)
++#define XGBE_AN_CL37_INT_MASK 0x01
++
++#define XGBE_AN_CL37_HD_MASK 0x40
++#define XGBE_AN_CL37_FD_MASK 0x20
++
++#define XGBE_AN_CL37_PCS_MODE_MASK 0x06
++#define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
++#define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
++#define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
++
+ /* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+@@ -1197,12 +1482,28 @@ do { \
+ /* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+-#define XPCS_IOWRITE(_pdata, _off, _val) \
++#define XPCS_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+-#define XPCS_IOREAD(_pdata, _off) \
++#define XPCS32_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
++#define XPCS16_IOWRITE(_pdata, _off, _val) \
++ iowrite16(_val, (_pdata)->xpcs_regs + (_off))
++
++#define XPCS16_IOREAD(_pdata, _off) \
++ ioread16((_pdata)->xpcs_regs + (_off))
++
+ /* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+@@ -1280,6 +1581,72 @@ do { \
+ } while (0)
+
+ /* Macros for building, reading or writing register values or bits
++ * within the register values of MAC Control registers.
++ */
++#define XP_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XP_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XP_IOREAD(_pdata, _reg) \
++ ioread32((_pdata)->xprop_regs + (_reg))
++
++#define XP_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XP_IOWRITE(_pdata, _reg, _val) \
++ iowrite32((_val), (_pdata)->xprop_regs + (_reg))
++
++#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XP_IOWRITE((_pdata), (_reg), reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
++ * within the register values of I2C Control registers.
++ */
++#define XI2C_GET_BITS(_var, _prefix, _field) \
++ GET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH)
++
++#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
++ SET_BITS((_var), \
++ _prefix##_##_field##_INDEX, \
++ _prefix##_##_field##_WIDTH, (_val))
++
++#define XI2C_IOREAD(_pdata, _reg) \
++ ioread32((_pdata)->xi2c_regs + (_reg))
++
++#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
++ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH)
++
++#define XI2C_IOWRITE(_pdata, _reg, _val) \
++ iowrite32((_val), (_pdata)->xi2c_regs + (_reg))
++
++#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
++do { \
++ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
++ SET_BITS(reg_val, \
++ _reg##_##_field##_INDEX, \
++ _reg##_##_field##_WIDTH, (_val)); \
++ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
++} while (0)
++
++/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
+old mode 100644
+new mode 100755
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+old mode 100644
+new mode 100755
+index 96f485a..7546b66
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+@@ -153,7 +153,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ int ret;
+
+ if (*ppos != 0)
+- return 0;
++ return -EINVAL;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+@@ -316,6 +316,126 @@ static const struct file_operations xpcs_reg_value_fops = {
+ .write = xpcs_reg_value_write,
+ };
+
++static ssize_t xprop_reg_addr_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++
++ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xprop_reg);
++}
++
++static ssize_t xprop_reg_addr_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++
++ return xgbe_common_write(buffer, count, ppos,
++ &pdata->debugfs_xprop_reg);
++}
++
++static ssize_t xprop_reg_value_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++ unsigned int value;
++
++ value = XP_IOREAD(pdata, pdata->debugfs_xprop_reg);
++
++ return xgbe_common_read(buffer, count, ppos, value);
++}
++
++static ssize_t xprop_reg_value_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++ unsigned int value;
++ ssize_t len;
++
++ len = xgbe_common_write(buffer, count, ppos, &value);
++ if (len < 0)
++ return len;
++
++ XP_IOWRITE(pdata, pdata->debugfs_xprop_reg, value);
++
++ return len;
++}
++
++static const struct file_operations xprop_reg_addr_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = xprop_reg_addr_read,
++ .write = xprop_reg_addr_write,
++};
++
++static const struct file_operations xprop_reg_value_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = xprop_reg_value_read,
++ .write = xprop_reg_value_write,
++};
++
++static ssize_t xi2c_reg_addr_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++
++ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xi2c_reg);
++}
++
++static ssize_t xi2c_reg_addr_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++
++ return xgbe_common_write(buffer, count, ppos,
++ &pdata->debugfs_xi2c_reg);
++}
++
++static ssize_t xi2c_reg_value_read(struct file *filp, char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++ unsigned int value;
++
++ value = XI2C_IOREAD(pdata, pdata->debugfs_xi2c_reg);
++
++ return xgbe_common_read(buffer, count, ppos, value);
++}
++
++static ssize_t xi2c_reg_value_write(struct file *filp,
++ const char __user *buffer,
++ size_t count, loff_t *ppos)
++{
++ struct xgbe_prv_data *pdata = filp->private_data;
++ unsigned int value;
++ ssize_t len;
++
++ len = xgbe_common_write(buffer, count, ppos, &value);
++ if (len < 0)
++ return len;
++
++ XI2C_IOWRITE(pdata, pdata->debugfs_xi2c_reg, value);
++
++ return len;
++}
++
++static const struct file_operations xi2c_reg_addr_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = xi2c_reg_addr_read,
++ .write = xi2c_reg_addr_write,
++};
++
++static const struct file_operations xi2c_reg_value_fops = {
++ .owner = THIS_MODULE,
++ .open = simple_open,
++ .read = xi2c_reg_value_read,
++ .write = xi2c_reg_value_write,
++};
++
+ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+ {
+ struct dentry *pfile;
+@@ -367,6 +487,38 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
++ if (pdata->xprop_regs) {
++ pfile = debugfs_create_file("xprop_register", 0600,
++ pdata->xgbe_debugfs, pdata,
++ &xprop_reg_addr_fops);
++ if (!pfile)
++ netdev_err(pdata->netdev,
++ "debugfs_create_file failed\n");
++
++ pfile = debugfs_create_file("xprop_register_value", 0600,
++ pdata->xgbe_debugfs, pdata,
++ &xprop_reg_value_fops);
++ if (!pfile)
++ netdev_err(pdata->netdev,
++ "debugfs_create_file failed\n");
++ }
++
++ if (pdata->xi2c_regs) {
++ pfile = debugfs_create_file("xi2c_register", 0600,
++ pdata->xgbe_debugfs, pdata,
++ &xi2c_reg_addr_fops);
++ if (!pfile)
++ netdev_err(pdata->netdev,
++ "debugfs_create_file failed\n");
++
++ pfile = debugfs_create_file("xi2c_register_value", 0600,
++ pdata->xgbe_debugfs, pdata,
++ &xi2c_reg_value_fops);
++ if (!pfile)
++ netdev_err(pdata->netdev,
++ "debugfs_create_file failed\n");
++ }
++
+ kfree(buf);
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+old mode 100644
+new mode 100755
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+old mode 100644
+new mode 100755
+index ca106d4..efe3cf9
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -123,6 +123,11 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
++static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
++{
++ return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
++}
++
+ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+ {
+@@ -491,6 +496,27 @@ static void xgbe_config_rss(struct xgbe_prv_data *pdata)
+ "error configuring RSS, RSS disabled\n");
+ }
+
++static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
++ unsigned int queue)
++{
++ unsigned int prio, tc;
++
++ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
++ /* Does this queue handle the priority? */
++ if (pdata->prio2q_map[prio] != queue)
++ continue;
++
++ /* Get the Traffic Class for this priority */
++ tc = pdata->ets->prio_tc[prio];
++
++ /* Check if PFC is enabled for this traffic class */
++ if (pdata->pfc->pfc_en & (1 << tc))
++ return true;
++ }
++
++ return false;
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+ unsigned int max_q_count, q_count;
+@@ -528,27 +554,14 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+- if (pfc && ets) {
+- unsigned int prio;
+-
+- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+- unsigned int tc;
+-
+- /* Does this queue handle the priority? */
+- if (pdata->prio2q_map[prio] != i)
+- continue;
+-
+- /* Get the Traffic Class for this priority */
+- tc = ets->prio_tc[prio];
+-
+- /* Check if flow control should be enabled */
+- if (pfc->pfc_en & (1 << tc)) {
++ if (pdata->rx_rfd[i]) {
++ /* Flow control thresholds are established */
++ if (pfc && ets) {
++ if (xgbe_is_pfc_queue(pdata, i))
+ ehfc = 1;
+- break;
+- }
++ } else {
++ ehfc = 1;
+ }
+- } else {
+- ehfc = 1;
+ }
+
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+@@ -633,6 +646,11 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
++ /* Set the interrupt mode if supported */
++ if (pdata->channel_irq_mode)
++ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
++ pdata->channel_irq_mode);
++
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+@@ -654,19 +672,21 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless using
+- * per channel interrupts)
++ * per channel interrupts in edge triggered
++ * mode)
+ */
+- if (!pdata->per_channel_irq)
++ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+- * per channel interrupts)
++ * per channel interrupts in edge triggered
++ * mode)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+- if (!pdata->per_channel_irq)
++ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+@@ -702,34 +722,90 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
++
++ /* Enable MDIO single command completion interrupt */
++ XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
+ }
+
+-static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
++static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
+ {
+- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x3)
+- return 0;
++ unsigned int ecc_isr, ecc_ier = 0;
+
+- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
++ if (!pdata->vdata->ecc_support)
++ return;
+
+- return 0;
++ /* Clear all the interrupts which are set */
++ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
++ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
++
++ /* Enable ECC interrupts */
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
++
++ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+ }
+
+-static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
++static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
+ {
+- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0x2)
+- return 0;
++ unsigned int ecc_ier;
+
+- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
++ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
+
+- return 0;
++ /* Disable ECC DED interrupts */
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
++
++ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
+ }
+
+-static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
++static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
++ enum xgbe_ecc_sec sec)
+ {
+- if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) == 0)
+- return 0;
++ unsigned int ecc_ier;
++
++ ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
++
++ /* Disable ECC SEC interrupt */
++ switch (sec) {
++ case XGBE_ECC_SEC_TX:
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
++ break;
++ case XGBE_ECC_SEC_RX:
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
++ break;
++ case XGBE_ECC_SEC_DESC:
++ XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
++ break;
++ }
++
++ XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
++}
++
++static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
++{
++ unsigned int ss;
++
++ switch (speed) {
++ case SPEED_1000:
++ ss = 0x03;
++ break;
++ case SPEED_2500:
++ ss = 0x02;
++ break;
++ case SPEED_10000:
++ ss = 0x00;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+- XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
++ if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
++ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+ }
+@@ -1019,8 +1095,101 @@ static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
+ return 0;
+ }
+
+-static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+- int mmd_reg)
++static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
++{
++ unsigned int reg;
++
++ if (gpio > 15)
++ return -EINVAL;
++
++ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
++
++ reg &= ~(1 << (gpio + 16));
++ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
++
++ return 0;
++}
++
++static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
++{
++ unsigned int reg;
++
++ if (gpio > 15)
++ return -EINVAL;
++
++ reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
++
++ reg |= (1 << (gpio + 16));
++ XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
++
++ return 0;
++}
++
++static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg)
++{
++ unsigned long flags;
++ unsigned int mmd_address, index, offset;
++ int mmd_data;
++
++ if (mmd_reg & MII_ADDR_C45)
++ mmd_address = mmd_reg & ~MII_ADDR_C45;
++ else
++ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
++
++ /* The PCS registers are accessed using mmio. The underlying
++ * management interface uses indirect addressing to access the MMD
++ * register sets. This requires accessing of the PCS register in two
++ * phases, an address phase and a data phase.
++ *
++ * The mmio interface is based on 16-bit offsets and values. All
++ * register offsets must therefore be adjusted by left shifting the
++ * offset 1 bit and reading 16 bits of data.
++ */
++ mmd_address <<= 1;
++ index = mmd_address & ~pdata->xpcs_window_mask;
++ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
++
++ spin_lock_irqsave(&pdata->xpcs_lock, flags);
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ mmd_data = XPCS16_IOREAD(pdata, offset);
++ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
++
++ return mmd_data;
++}
++
++static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg, int mmd_data)
++{
++ unsigned long flags;
++ unsigned int mmd_address, index, offset;
++
++ if (mmd_reg & MII_ADDR_C45)
++ mmd_address = mmd_reg & ~MII_ADDR_C45;
++ else
++ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
++
++ /* The PCS registers are accessed using mmio. The underlying
++ * management interface uses indirect addressing to access the MMD
++ * register sets. This requires accessing of the PCS register in two
++ * phases, an address phase and a data phase.
++ *
++ * The mmio interface is based on 16-bit offsets and values. All
++ * register offsets must therefore be adjusted by left shifting the
++ * offset 1 bit and writing 16 bits of data.
++ */
++ mmd_address <<= 1;
++ index = mmd_address & ~pdata->xpcs_window_mask;
++ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
++
++ spin_lock_irqsave(&pdata->xpcs_lock, flags);
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ XPCS16_IOWRITE(pdata, offset, mmd_data);
++ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
++}
++
++static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg)
+ {
+ unsigned long flags;
+ unsigned int mmd_address;
+@@ -1041,15 +1210,15 @@ static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ * offset 2 bits and reading 32 bits of data.
+ */
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+- mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
++ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
++ mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+ return mmd_data;
+ }
+
+-static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+- int mmd_reg, int mmd_data)
++static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg, int mmd_data)
+ {
+ unsigned int mmd_address;
+ unsigned long flags;
+@@ -1066,14 +1235,113 @@ static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+- * offset 2 bits and reading 32 bits of data.
++ * offset 2 bits and writing 32 bits of data.
+ */
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+- XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
++ XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
++ XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+ }
+
++static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg)
++{
++ switch (pdata->vdata->xpcs_access) {
++ case XGBE_XPCS_ACCESS_V1:
++ return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
++
++ case XGBE_XPCS_ACCESS_V2:
++ default:
++ return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
++ }
++}
++
++static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
++ int mmd_reg, int mmd_data)
++{
++ switch (pdata->vdata->xpcs_access) {
++ case XGBE_XPCS_ACCESS_V1:
++ return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
++
++ case XGBE_XPCS_ACCESS_V2:
++ default:
++ return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
++ }
++}
++
++static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
++ int reg, u16 val)
++{
++ unsigned int mdio_sca, mdio_sccd;
++
++ reinit_completion(&pdata->mdio_complete);
++
++ mdio_sca = 0;
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
++
++ mdio_sccd = 0;
++ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
++ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
++ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
++ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
++
++ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
++ netdev_err(pdata->netdev, "mdio write operation timed out\n");
++ return -ETIMEDOUT;
++ }
++
++ return 0;
++}
++
++static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
++ int reg)
++{
++ unsigned int mdio_sca, mdio_sccd;
++
++ reinit_completion(&pdata->mdio_complete);
++
++ mdio_sca = 0;
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
++ XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
++ XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
++
++ mdio_sccd = 0;
++ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
++ XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
++ XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
++
++ if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
++ netdev_err(pdata->netdev, "mdio read operation timed out\n");
++ return -ETIMEDOUT;
++ }
++
++ return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
++}
++
++static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
++ enum xgbe_mdio_mode mode)
++{
++ unsigned int reg_val = 0;
++
++ switch (mode) {
++ case XGBE_MDIO_MODE_CL22:
++ if (port > XGMAC_MAX_C22_PORT)
++ return -EINVAL;
++ reg_val |= (1 << port);
++ break;
++ case XGBE_MDIO_MODE_CL45:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
++
++ return 0;
++}
++
+ static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+ {
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+@@ -1264,14 +1532,21 @@ static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+
+ static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+ {
+- unsigned int tx_snr;
++ unsigned int tx_snr, tx_ssr;
+ u64 nsec;
+
+- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
++ if (pdata->vdata->tx_tstamp_workaround) {
++ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
++ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
++ } else {
++ tx_ssr = XGMAC_IOREAD(pdata, MAC_TXSSR);
++ tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
++ }
++
+ if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+ return 0;
+
+- nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
++ nsec = tx_ssr;
+ nsec *= NSEC_PER_SEC;
+ nsec += tx_snr;
+
+@@ -1327,163 +1602,63 @@ static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+ return 0;
+ }
+
+-static void xgbe_config_tc(struct xgbe_prv_data *pdata)
++static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
++ struct xgbe_ring *ring)
+ {
+- unsigned int offset, queue, prio;
+- u8 i;
+-
+- netdev_reset_tc(pdata->netdev);
+- if (!pdata->num_tcs)
+- return;
++ struct xgbe_prv_data *pdata = channel->pdata;
++ struct xgbe_ring_data *rdata;
+
+- netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
++ /* Make sure everything is written before the register write */
++ wmb();
+
+- for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
+- while ((queue < pdata->tx_q_count) &&
+- (pdata->q2tc_map[queue] == i))
+- queue++;
++ /* Issue a poll command to Tx DMA by writing address
++ * of next immediate free descriptor */
++ rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
++ lower_32_bits(rdata->rdesc_dma));
+
+- netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
+- i, offset, queue - 1);
+- netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
+- offset = queue;
++ /* Start the Tx timer */
++ if (pdata->tx_usecs && !channel->tx_timer_active) {
++ channel->tx_timer_active = 1;
++ mod_timer(&channel->tx_timer,
++ jiffies + usecs_to_jiffies(pdata->tx_usecs));
+ }
+
+- if (!pdata->ets)
+- return;
+-
+- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+- netdev_set_prio_tc_map(pdata->netdev, prio,
+- pdata->ets->prio_tc[prio]);
++ ring->tx.xmit_more = 0;
+ }
+
+-static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
++static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ {
+- struct ieee_ets *ets = pdata->ets;
+- unsigned int total_weight, min_weight, weight;
+- unsigned int mask, reg, reg_val;
+- unsigned int i, prio;
+-
+- if (!ets)
+- return;
+-
+- /* Set Tx to deficit weighted round robin scheduling algorithm (when
+- * traffic class is using ETS algorithm)
+- */
+- XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
++ struct xgbe_prv_data *pdata = channel->pdata;
++ struct xgbe_ring *ring = channel->tx_ring;
++ struct xgbe_ring_data *rdata;
++ struct xgbe_ring_desc *rdesc;
++ struct xgbe_packet_data *packet = &ring->packet_data;
++ unsigned int csum, tso, vlan;
++ unsigned int tso_context, vlan_context;
++ unsigned int tx_set_ic;
++ int start_index = ring->cur;
++ int cur_index = ring->cur;
++ int i;
+
+- /* Set Traffic Class algorithms */
+- total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+- min_weight = total_weight / 100;
+- if (!min_weight)
+- min_weight = 1;
++ DBGPR("-->xgbe_dev_xmit\n");
+
+- for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+- /* Map the priorities to the traffic class */
+- mask = 0;
+- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+- if (ets->prio_tc[prio] == i)
+- mask |= (1 << prio);
+- }
+- mask &= 0xff;
++ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
++ CSUM_ENABLE);
++ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
++ TSO_ENABLE);
++ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
++ VLAN_CTAG);
+
+- netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
+- i, mask);
+- reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
+- reg_val = XGMAC_IOREAD(pdata, reg);
++ if (tso && (packet->mss != ring->tx.cur_mss))
++ tso_context = 1;
++ else
++ tso_context = 0;
+
+- reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
+- reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
+-
+- XGMAC_IOWRITE(pdata, reg, reg_val);
+-
+- /* Set the traffic class algorithm */
+- switch (ets->tc_tsa[i]) {
+- case IEEE_8021QAZ_TSA_STRICT:
+- netif_dbg(pdata, drv, pdata->netdev,
+- "TC%u using SP\n", i);
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+- MTL_TSA_SP);
+- break;
+- case IEEE_8021QAZ_TSA_ETS:
+- weight = total_weight * ets->tc_tx_bw[i] / 100;
+- weight = clamp(weight, min_weight, total_weight);
+-
+- netif_dbg(pdata, drv, pdata->netdev,
+- "TC%u using DWRR (weight %u)\n", i, weight);
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+- MTL_TSA_ETS);
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+- weight);
+- break;
+- }
+- }
+-
+- xgbe_config_tc(pdata);
+-}
+-
+-static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+-{
+- xgbe_config_flow_control(pdata);
+-}
+-
+-static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
+- struct xgbe_ring *ring)
+-{
+- struct xgbe_prv_data *pdata = channel->pdata;
+- struct xgbe_ring_data *rdata;
+-
+- /* Make sure everything is written before the register write */
+- wmb();
+-
+- /* Issue a poll command to Tx DMA by writing address
+- * of next immediate free descriptor */
+- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+- XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+- lower_32_bits(rdata->rdesc_dma));
+-
+- /* Start the Tx timer */
+- if (pdata->tx_usecs && !channel->tx_timer_active) {
+- channel->tx_timer_active = 1;
+- mod_timer(&channel->tx_timer,
+- jiffies + usecs_to_jiffies(pdata->tx_usecs));
+- }
+-
+- ring->tx.xmit_more = 0;
+-}
+-
+-static void xgbe_dev_xmit(struct xgbe_channel *channel)
+-{
+- struct xgbe_prv_data *pdata = channel->pdata;
+- struct xgbe_ring *ring = channel->tx_ring;
+- struct xgbe_ring_data *rdata;
+- struct xgbe_ring_desc *rdesc;
+- struct xgbe_packet_data *packet = &ring->packet_data;
+- unsigned int csum, tso, vlan;
+- unsigned int tso_context, vlan_context;
+- unsigned int tx_set_ic;
+- int start_index = ring->cur;
+- int cur_index = ring->cur;
+- int i;
+-
+- DBGPR("-->xgbe_dev_xmit\n");
+-
+- csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+- CSUM_ENABLE);
+- tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+- TSO_ENABLE);
+- vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+- VLAN_CTAG);
+-
+- if (tso && (packet->mss != ring->tx.cur_mss))
+- tso_context = 1;
+- else
+- tso_context = 0;
+-
+- if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+- vlan_context = 1;
+- else
+- vlan_context = 0;
++ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
++ vlan_context = 1;
++ else
++ vlan_context = 0;
+
+ /* Determine if an interrupt should be generated for this Tx:
+ * Interrupt:
+@@ -1903,7 +2078,7 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
+ return 0;
+ }
+
+-static int xgbe_exit(struct xgbe_prv_data *pdata)
++static int __xgbe_exit(struct xgbe_prv_data *pdata)
+ {
+ unsigned int count = 2000;
+
+@@ -1925,6 +2100,20 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
+ return 0;
+ }
+
++static int xgbe_exit(struct xgbe_prv_data *pdata)
++{
++ int ret;
++
++ /* To guard against possible incorrectly generated interrupts,
++ * issue the software reset twice.
++ */
++ ret = __xgbe_exit(pdata);
++ if (ret)
++ return ret;
++
++ return __xgbe_exit(pdata);
++}
++
+ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+ {
+ unsigned int i, count;
+@@ -2002,61 +2191,331 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+ }
+
+-static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size,
+- unsigned int queue_count)
++static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
++ unsigned int queue,
++ unsigned int q_fifo_size)
++{
++ unsigned int frame_fifo_size;
++ unsigned int rfa, rfd;
++
++ frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
++
++ if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
++ /* PFC is active for this queue */
++ rfa = pdata->pfc_rfa;
++ rfd = rfa + frame_fifo_size;
++ if (rfd > XGMAC_FLOW_CONTROL_MAX)
++ rfd = XGMAC_FLOW_CONTROL_MAX;
++ if (rfa >= XGMAC_FLOW_CONTROL_MAX)
++ rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
++ } else {
++ /* This path deals with just maximum frame sizes which are
++ * limited to a jumbo frame of 9,000 (plus headers, etc.)
++ * so we can never exceed the maximum allowable RFA/RFD
++ * values.
++ */
++ if (q_fifo_size <= 2048) {
++ /* rx_rfd to zero to signal no flow control */
++ pdata->rx_rfa[queue] = 0;
++ pdata->rx_rfd[queue] = 0;
++ return;
++ }
++
++ if (q_fifo_size <= 4096) {
++ /* Between 2048 and 4096 */
++ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
++ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
++ return;
++ }
++
++ if (q_fifo_size <= frame_fifo_size) {
++ /* Between 4096 and max-frame */
++ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
++ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
++ return;
++ }
++
++ if (q_fifo_size <= (frame_fifo_size * 3)) {
++ /* Between max-frame and 3 max-frames,
++ * trigger if we get just over a frame of data and
++ * resume when we have just under half a frame left.
++ */
++ rfa = q_fifo_size - frame_fifo_size;
++ rfd = rfa + (frame_fifo_size / 2);
++ } else {
++ /* Above 3 max-frames - trigger when just over
++ * 2 frames of space available
++ */
++ rfa = frame_fifo_size * 2;
++ rfa += XGMAC_FLOW_CONTROL_UNIT;
++ rfd = rfa + frame_fifo_size;
++ }
++ }
++
++ pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
++ pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
++}
++
++static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
++ unsigned int *fifo)
+ {
+ unsigned int q_fifo_size;
+- unsigned int p_fifo;
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++) {
++ q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
++
++ xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
++ }
++}
++
++static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
++{
++ unsigned int i;
++
++ for (i = 0; i < pdata->rx_q_count; i++) {
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
++ pdata->rx_rfa[i]);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
++ pdata->rx_rfd[i]);
++ }
++}
+
+- /* Calculate the configured fifo size */
+- q_fifo_size = 1 << (fifo_size + 7);
++static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
++{
++ /* The configured value may not be the actual amount of fifo RAM */
++ return min_t(unsigned int, pdata->tx_max_fifo_size,
++ pdata->hw_feat.tx_fifo_size);
++}
+
++static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
++{
+ /* The configured value may not be the actual amount of fifo RAM */
+- q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size);
++ return min_t(unsigned int, pdata->rx_max_fifo_size,
++ pdata->hw_feat.rx_fifo_size);
++}
+
+- q_fifo_size = q_fifo_size / queue_count;
++static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
++ unsigned int queue_count,
++ unsigned int *fifo)
++{
++ unsigned int q_fifo_size;
++ unsigned int p_fifo;
++ unsigned int i;
+
+- /* Each increment in the queue fifo size represents 256 bytes of
+- * fifo, with 0 representing 256 bytes. Distribute the fifo equally
+- * between the queues.
++ q_fifo_size = fifo_size / queue_count;
++
++ /* Calculate the fifo setting by dividing the queue's fifo size
++ * by the fifo allocation increment (with 0 representing the
++ * base allocation increment so decrement the result by 1).
+ */
+- p_fifo = q_fifo_size / 256;
++ p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+- return p_fifo;
++ /* Distribute the fifo equally amongst the queues */
++ for (i = 0; i < queue_count; i++)
++ fifo[i] = p_fifo;
++}
++
++static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
++ unsigned int queue_count,
++ unsigned int *fifo)
++{
++ unsigned int i;
++
++ BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
++
++ if (queue_count <= IEEE_8021QAZ_MAX_TCS)
++ return fifo_size;
++
++ /* Rx queues 9 and up are for specialized packets,
++ * such as PTP or DCB control packets, etc. and
++ * don't require a large fifo
++ */
++ for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
++ fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
++ fifo_size -= XGMAC_FIFO_MIN_ALLOC;
++ }
++
++ return fifo_size;
++}
++
++static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
++{
++ unsigned int delay;
++
++ /* If a delay has been provided, use that */
++ if (pdata->pfc->delay)
++ return pdata->pfc->delay / 8;
++
++ /* Allow for two maximum size frames */
++ delay = xgbe_get_max_frame(pdata);
++ delay += XGMAC_ETH_PREAMBLE;
++ delay *= 2;
++
++ /* Allow for PFC frame */
++ delay += XGMAC_PFC_DATA_LEN;
++ delay += ETH_HLEN + ETH_FCS_LEN;
++ delay += XGMAC_ETH_PREAMBLE;
++
++ /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
++ delay += XGMAC_PFC_DELAYS;
++
++ return delay;
++}
++
++static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
++{
++ unsigned int count, prio_queues;
++ unsigned int i;
++
++ if (!pdata->pfc->pfc_en)
++ return 0;
++
++ count = 0;
++ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
++ for (i = 0; i < prio_queues; i++) {
++ if (!xgbe_is_pfc_queue(pdata, i))
++ continue;
++
++ pdata->pfcq[i] = 1;
++ count++;
++ }
++
++ return count;
++}
++
++static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
++ unsigned int fifo_size,
++ unsigned int *fifo)
++{
++ unsigned int q_fifo_size, rem_fifo, addn_fifo;
++ unsigned int prio_queues;
++ unsigned int pfc_count;
++ unsigned int i;
++
++ q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
++ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
++ pfc_count = xgbe_get_pfc_queues(pdata);
++
++ if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
++ /* No traffic classes with PFC enabled or can't do lossless */
++ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
++ return;
++ }
++
++ /* Calculate how much fifo we have to play with */
++ rem_fifo = fifo_size - (q_fifo_size * prio_queues);
++
++ /* Calculate how much more than base fifo PFC needs, which also
++ * becomes the threshold activation point (RFA)
++ */
++ pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
++ pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
++
++ if (pdata->pfc_rfa > q_fifo_size) {
++ addn_fifo = pdata->pfc_rfa - q_fifo_size;
++ addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
++ } else {
++ addn_fifo = 0;
++ }
++
++ /* Calculate DCB fifo settings:
++ * - distribute remaining fifo between the VLAN priority
++ * queues based on traffic class PFC enablement and overall
++ * priority (0 is lowest priority, so start at highest)
++ */
++ i = prio_queues;
++ while (i > 0) {
++ i--;
++
++ fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
++
++ if (!pdata->pfcq[i] || !addn_fifo)
++ continue;
++
++ if (addn_fifo > rem_fifo) {
++ netdev_warn(pdata->netdev,
++ "RXq%u cannot set needed fifo size\n", i);
++ if (!rem_fifo)
++ continue;
++
++ addn_fifo = rem_fifo;
++ }
++
++ fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
++ rem_fifo -= addn_fifo;
++ }
++
++ if (rem_fifo) {
++ unsigned int inc_fifo = rem_fifo / prio_queues;
++
++ /* Distribute remaining fifo across queues */
++ for (i = 0; i < prio_queues; i++)
++ fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
++ }
+ }
+
+ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+ {
+ unsigned int fifo_size;
++ unsigned int fifo[XGBE_MAX_QUEUES];
+ unsigned int i;
+
+- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+- pdata->tx_q_count);
++ fifo_size = xgbe_get_tx_fifo_size(pdata);
++
++ xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
+
+ netif_info(pdata, drv, pdata->netdev,
+ "%d Tx hardware queues, %d byte fifo per queue\n",
+- pdata->tx_q_count, ((fifo_size + 1) * 256));
++ pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
+ }
+
+ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+ {
+ unsigned int fifo_size;
++ unsigned int fifo[XGBE_MAX_QUEUES];
++ unsigned int prio_queues;
+ unsigned int i;
+
+- fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+- pdata->rx_q_count);
++ /* Clear any DCB related fifo/queue information */
++ memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
++ pdata->pfc_rfa = 0;
++
++ fifo_size = xgbe_get_rx_fifo_size(pdata);
++ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
++
++ /* Assign a minimum fifo to the non-VLAN priority queues */
++ fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
++
++ if (pdata->pfc && pdata->ets)
++ xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
++ else
++ xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
+
+- netif_info(pdata, drv, pdata->netdev,
+- "%d Rx hardware queues, %d byte fifo per queue\n",
+- pdata->rx_q_count, ((fifo_size + 1) * 256));
++ xgbe_calculate_flow_control_threshold(pdata, fifo);
++ xgbe_config_flow_control_threshold(pdata);
++
++ if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
++ netif_info(pdata, drv, pdata->netdev,
++ "%u Rx hardware queues\n", pdata->rx_q_count);
++ for (i = 0; i < pdata->rx_q_count; i++)
++ netif_info(pdata, drv, pdata->netdev,
++ "RxQ%u, %u byte fifo queue\n", i,
++ ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
++ } else {
++ netif_info(pdata, drv, pdata->netdev,
++ "%u Rx hardware queues, %u byte fifo per queue\n",
++ pdata->rx_q_count,
++ ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
++ }
+ }
+
+ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+@@ -2092,8 +2551,7 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+ }
+
+ /* Map the 8 VLAN priority values to available MTL Rx queues */
+- prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+- pdata->rx_q_count);
++ prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
+ ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+ ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+@@ -2141,16 +2599,120 @@ static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
+ }
+ }
+
+-static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
++static void xgbe_config_tc(struct xgbe_prv_data *pdata)
+ {
+- unsigned int i;
++ unsigned int offset, queue, prio;
++ u8 i;
+
+- for (i = 0; i < pdata->rx_q_count; i++) {
+- /* Activate flow control when less than 4k left in fifo */
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
++ netdev_reset_tc(pdata->netdev);
++ if (!pdata->num_tcs)
++ return;
++
++ netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
++
++ for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
++ while ((queue < pdata->tx_q_count) &&
++ (pdata->q2tc_map[queue] == i))
++ queue++;
++
++ netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
++ i, offset, queue - 1);
++ netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
++ offset = queue;
++ }
++
++ if (!pdata->ets)
++ return;
++
++ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
++ netdev_set_prio_tc_map(pdata->netdev, prio,
++ pdata->ets->prio_tc[prio]);
++}
++
++static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
++{
++ struct ieee_ets *ets = pdata->ets;
++ unsigned int total_weight, min_weight, weight;
++ unsigned int mask, reg, reg_val;
++ unsigned int i, prio;
++
++ if (!ets)
++ return;
++
++ /* Set Tx to deficit weighted round robin scheduling algorithm (when
++ * traffic class is using ETS algorithm)
++ */
++ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
++
++ /* Set Traffic Class algorithms */
++ total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
++ min_weight = total_weight / 100;
++ if (!min_weight)
++ min_weight = 1;
++
++ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
++ /* Map the priorities to the traffic class */
++ mask = 0;
++ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
++ if (ets->prio_tc[prio] == i)
++ mask |= (1 << prio);
++ }
++ mask &= 0xff;
++
++ netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
++ i, mask);
++ reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
++ reg_val = XGMAC_IOREAD(pdata, reg);
++
++ reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
++ reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
++
++ XGMAC_IOWRITE(pdata, reg, reg_val);
++
++ /* Set the traffic class algorithm */
++ switch (ets->tc_tsa[i]) {
++ case IEEE_8021QAZ_TSA_STRICT:
++ netif_dbg(pdata, drv, pdata->netdev,
++ "TC%u using SP\n", i);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
++ MTL_TSA_SP);
++ break;
++ case IEEE_8021QAZ_TSA_ETS:
++ weight = total_weight * ets->tc_tx_bw[i] / 100;
++ weight = clamp(weight, min_weight, total_weight);
++
++ netif_dbg(pdata, drv, pdata->netdev,
++ "TC%u using DWRR (weight %u)\n", i, weight);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
++ MTL_TSA_ETS);
++ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
++ weight);
++ break;
++ }
++ }
++
++ xgbe_config_tc(pdata);
++}
++
++static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
++{
++ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
++ /* Just stop the Tx queues while Rx fifo is changed */
++ netif_tx_stop_all_queues(pdata->netdev);
++
++ /* Suspend Rx so that fifo's can be adjusted */
++ pdata->hw_if.disable_rx(pdata);
++ }
++
++ xgbe_config_rx_fifo_size(pdata);
++ xgbe_config_flow_control(pdata);
+
+- /* De-activate flow control when more than 6k left in fifo */
+- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
++ if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
++ /* Resume Rx */
++ pdata->hw_if.enable_rx(pdata);
++
++ /* Resume Tx queues */
++ netif_tx_start_all_queues(pdata->netdev);
+ }
+ }
+
+@@ -2177,19 +2739,7 @@ static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+
+ static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
+ {
+- switch (pdata->phy_speed) {
+- case SPEED_10000:
+- xgbe_set_xgmii_speed(pdata);
+- break;
+-
+- case SPEED_2500:
+- xgbe_set_gmii_2500_speed(pdata);
+- break;
+-
+- case SPEED_1000:
+- xgbe_set_gmii_speed(pdata);
+- break;
+- }
++ xgbe_set_speed(pdata, pdata->phy_speed);
+ }
+
+ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+@@ -2225,17 +2775,33 @@ static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
+ bool read_hi;
+ u64 val;
+
+- switch (reg_lo) {
+- /* These registers are always 64 bit */
+- case MMC_TXOCTETCOUNT_GB_LO:
+- case MMC_TXOCTETCOUNT_G_LO:
+- case MMC_RXOCTETCOUNT_GB_LO:
+- case MMC_RXOCTETCOUNT_G_LO:
+- read_hi = true;
+- break;
++ if (pdata->vdata->mmc_64bit) {
++ switch (reg_lo) {
++ /* These registers are always 32 bit */
++ case MMC_RXRUNTERROR:
++ case MMC_RXJABBERERROR:
++ case MMC_RXUNDERSIZE_G:
++ case MMC_RXOVERSIZE_G:
++ case MMC_RXWATCHDOGERROR:
++ read_hi = false;
++ break;
+
+- default:
+- read_hi = false;
++ default:
++ read_hi = true;
++ }
++ } else {
++ switch (reg_lo) {
++ /* These registers are always 64 bit */
++ case MMC_TXOCTETCOUNT_GB_LO:
++ case MMC_TXOCTETCOUNT_G_LO:
++ case MMC_RXOCTETCOUNT_GB_LO:
++ case MMC_RXOCTETCOUNT_G_LO:
++ read_hi = true;
++ break;
++
++ default:
++ read_hi = false;
++ }
+ }
+
+ val = XGMAC_IOREAD(pdata, reg_lo);
+@@ -2565,20 +3131,48 @@ static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+ }
+
++static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
++ unsigned int queue)
++{
++ unsigned int tx_status;
++ unsigned long tx_timeout;
++
++ /* The Tx engine cannot be stopped if it is actively processing
++ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
++ * wait forever though...
++ */
++ tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
++ while (time_before(jiffies, tx_timeout)) {
++ tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
++ if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
++ (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
++ break;
++
++ usleep_range(500, 1000);
++ }
++
++ if (!time_before(jiffies, tx_timeout))
++ netdev_info(pdata->netdev,
++ "timed out waiting for Tx queue %u to empty\n",
++ queue);
++}
++
+ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+- struct xgbe_channel *channel)
++ unsigned int queue)
+ {
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
++ if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
++ return xgbe_txq_prepare_tx_stop(pdata, queue);
++
+ /* Calculate the status register to read and the position within */
+- if (channel->queue_index < DMA_DSRX_FIRST_QUEUE) {
++ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+- tx_pos = (channel->queue_index * DMA_DSR_Q_WIDTH) +
+- DMA_DSR0_TPS_START;
++ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+- tx_qidx = channel->queue_index - DMA_DSRX_FIRST_QUEUE;
++ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+@@ -2603,7 +3197,7 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+ if (!time_before(jiffies, tx_timeout))
+ netdev_info(pdata->netdev,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+- channel->queue_index);
++ queue);
+ }
+
+ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+@@ -2635,13 +3229,8 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
+- break;
+-
+- xgbe_prepare_tx_stop(pdata, channel);
+- }
++ for (i = 0; i < pdata->tx_q_count; i++)
++ xgbe_prepare_tx_stop(pdata, i);
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+@@ -2765,13 +3354,8 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
+- break;
+-
+- xgbe_prepare_tx_stop(pdata, channel);
+- }
++ for (i = 0; i < pdata->tx_q_count; i++)
++ xgbe_prepare_tx_stop(pdata, i);
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+@@ -2825,8 +3409,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+- if (ret)
++ if (ret) {
++ netdev_err(pdata->netdev, "error flushing TX queues\n");
+ return ret;
++ }
+
+ /*
+ * Initialize DMA related features
+@@ -2858,12 +3444,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+- xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_config_dcb_tc(pdata);
+- xgbe_config_dcb_pfc(pdata);
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /*
+@@ -2879,6 +3463,11 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
++ /*
++ * Initialize ECC related features
++ */
++ xgbe_enable_ecc_interrupts(pdata);
++
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+@@ -2905,9 +3494,14 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+- hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+- hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+- hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
++ hw_if->set_speed = xgbe_set_speed;
++
++ hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
++ hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
++ hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
++
++ hw_if->set_gpio = xgbe_set_gpio;
++ hw_if->clr_gpio = xgbe_clr_gpio;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+@@ -2986,5 +3580,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+ hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
+ hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
+
++ /* For ECC */
++ hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
++ hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
++
+ DBGPR("<--xgbe_init_function_ptrs\n");
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+old mode 100644
+new mode 100755
+index 0f0f3014..fe36ded
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -114,7 +114,7 @@
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+-#include <linux/platform_device.h>
++#include <linux/module.h>
+ #include <linux/spinlock.h>
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+@@ -127,8 +127,35 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
++static unsigned int ecc_sec_info_threshold = 10;
++static unsigned int ecc_sec_warn_threshold = 10000;
++static unsigned int ecc_sec_period = 600;
++static unsigned int ecc_ded_threshold = 2;
++static unsigned int ecc_ded_period = 600;
++
++#ifdef CONFIG_AMD_XGBE_HAVE_ECC
++/* Only expose the ECC parameters if supported */
++module_param(ecc_sec_info_threshold, uint, S_IWUSR | S_IRUGO);
++MODULE_PARM_DESC(ecc_sec_info_threshold,
++ " ECC corrected error informational threshold setting");
++
++module_param(ecc_sec_warn_threshold, uint, S_IWUSR | S_IRUGO);
++MODULE_PARM_DESC(ecc_sec_warn_threshold,
++ " ECC corrected error warning threshold setting");
++
++module_param(ecc_sec_period, uint, S_IWUSR | S_IRUGO);
++MODULE_PARM_DESC(ecc_sec_period, " ECC corrected error period (in seconds)");
++
++module_param(ecc_ded_threshold, uint, S_IWUSR | S_IRUGO);
++MODULE_PARM_DESC(ecc_ded_threshold, " ECC detected error threshold setting");
++
++module_param(ecc_ded_period, uint, S_IWUSR | S_IRUGO);
++MODULE_PARM_DESC(ecc_ded_period, " ECC detected error period (in seconds)");
++#endif
++
+ static int xgbe_one_poll(struct napi_struct *, int);
+ static int xgbe_all_poll(struct napi_struct *, int);
++static void xgbe_stop(struct xgbe_prv_data *);
+
+ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ {
+@@ -160,18 +187,8 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+- if (pdata->per_channel_irq) {
+- /* Get the DMA interrupt (offset 1) */
+- ret = platform_get_irq(pdata->pdev, i + 1);
+- if (ret < 0) {
+- netdev_err(pdata->netdev,
+- "platform_get_irq %u failed\n",
+- i + 1);
+- goto err_irq;
+- }
+-
+- channel->dma_irq = ret;
+- }
++ if (pdata->per_channel_irq)
++ channel->dma_irq = pdata->channel_irq[i];
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+@@ -194,9 +211,6 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+
+ return 0;
+
+-err_irq:
+- kfree(rx_ring);
+-
+ err_rx_ring:
+ kfree(tx_ring);
+
+@@ -257,11 +271,6 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+ {
+ unsigned int rx_buf_size;
+
+- if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+- netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+- return -EINVAL;
+- }
+-
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+
+@@ -271,48 +280,161 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+ return rx_buf_size;
+ }
+
+-static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
++static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
++ struct xgbe_channel *channel)
+ {
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- struct xgbe_channel *channel;
+ enum xgbe_int int_id;
++
++ if (channel->tx_ring && channel->rx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
++ else if (channel->tx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_TI;
++ else if (channel->rx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_RI;
++ else
++ return;
++
++ hw_if->enable_int(channel, int_id);
++}
++
++static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (channel->tx_ring && channel->rx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+- else if (channel->tx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_TI;
+- else if (channel->rx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_RI;
+- else
+- continue;
++ for (i = 0; i < pdata->channel_count; i++, channel++)
++ xgbe_enable_rx_tx_int(pdata, channel);
++}
+
+- hw_if->enable_int(channel, int_id);
+- }
++static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
++ struct xgbe_channel *channel)
++{
++ struct xgbe_hw_if *hw_if = &pdata->hw_if;
++ enum xgbe_int int_id;
++
++ if (channel->tx_ring && channel->rx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
++ else if (channel->tx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_TI;
++ else if (channel->rx_ring)
++ int_id = XGMAC_INT_DMA_CH_SR_RI;
++ else
++ return;
++
++ hw_if->disable_int(channel, int_id);
+ }
+
+ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+- enum xgbe_int int_id;
+ unsigned int i;
+
+ channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (channel->tx_ring && channel->rx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+- else if (channel->tx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_TI;
+- else if (channel->rx_ring)
+- int_id = XGMAC_INT_DMA_CH_SR_RI;
+- else
+- continue;
++ for (i = 0; i < pdata->channel_count; i++, channel++)
++ xgbe_disable_rx_tx_int(pdata, channel);
++}
++
++static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
++ unsigned int *count, const char *area)
++{
++ if (time_before(jiffies, *period)) {
++ (*count)++;
++ } else {
++ *period = jiffies + (ecc_sec_period * HZ);
++ *count = 1;
++ }
+
+- hw_if->disable_int(channel, int_id);
++ if (*count > ecc_sec_info_threshold)
++ dev_warn_once(pdata->dev,
++ "%s ECC corrected errors exceed informational threshold\n",
++ area);
++
++ if (*count > ecc_sec_warn_threshold) {
++ dev_warn_once(pdata->dev,
++ "%s ECC corrected errors exceed warning threshold\n",
++ area);
++ return true;
+ }
++
++ return false;
++}
++
++static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
++ unsigned int *count, const char *area)
++{
++ if (time_before(jiffies, *period)) {
++ (*count)++;
++ } else {
++ *period = jiffies + (ecc_ded_period * HZ);
++ *count = 1;
++ }
++
++ if (*count > ecc_ded_threshold) {
++ netdev_alert(pdata->netdev,
++ "%s ECC detected errors exceed threshold\n",
++ area);
++ return true;
++ }
++
++ return false;
++}
++
++static irqreturn_t xgbe_ecc_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = data;
++ unsigned int ecc_isr;
++ bool stop = false;
++
++ /* Mask status with only the interrupts we care about */
++ ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
++ ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
++ netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_DED)) {
++ stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
++ &pdata->tx_ded_count, "TX fifo");
++ }
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_DED)) {
++ stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
++ &pdata->rx_ded_count, "RX fifo");
++ }
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_DED)) {
++ stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
++ &pdata->desc_ded_count,
++ "descriptor cache");
++ }
++
++ if (stop) {
++ pdata->hw_if.disable_ecc_ded(pdata);
++ schedule_work(&pdata->stopdev_work);
++ goto out;
++ }
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, TX_SEC)) {
++ if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
++ &pdata->tx_sec_count, "TX fifo"))
++ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
++ }
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, RX_SEC))
++ if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
++ &pdata->rx_sec_count, "RX fifo"))
++ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
++
++ if (XP_GET_BITS(ecc_isr, XP_ECC_ISR, DESC_SEC))
++ if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
++ &pdata->desc_sec_count, "descriptor cache"))
++ pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
++
++out:
++ /* Clear all ECC interrupts */
++ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
++
++ return IRQ_HANDLED;
+ }
+
+ static irqreturn_t xgbe_isr(int irq, void *data)
+@@ -321,7 +443,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+- unsigned int mac_isr, mac_tssr;
++ unsigned int mac_isr, mac_tssr, mac_mdioisr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+@@ -358,6 +480,13 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ /* Turn on polling */
+ __napi_schedule_irqoff(&pdata->napi);
+ }
++ } else {
++ /* Don't clear Rx/Tx status if doing per channel DMA
++ * interrupts, these will be cleared by the ISR for
++ * per channel DMA interrupts.
++ */
++ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0);
++ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0);
+ }
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
+@@ -367,13 +496,16 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+- /* Clear all interrupt signals */
++ /* Clear interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
++ netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
++ mac_isr);
++
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+@@ -383,6 +515,9 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+ mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
++ netif_dbg(pdata, intr, pdata->netdev,
++ "MAC_TSSR=%#010x\n", mac_tssr);
++
+ if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+ /* Read Tx Timestamp to clear interrupt */
+ pdata->tx_tstamp =
+@@ -391,27 +526,61 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ &pdata->tx_tstamp_work);
+ }
+ }
++
++ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) {
++ mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
++
++ netif_dbg(pdata, intr, pdata->netdev,
++ "MAC_MDIOISR=%#010x\n", mac_mdioisr);
++
++ if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR,
++ SNGLCOMPINT))
++ complete(&pdata->mdio_complete);
++ }
+ }
+
+ isr_done:
++ /* If there is not a separate AN irq, handle it here */
++ if (pdata->dev_irq == pdata->an_irq)
++ pdata->phy_if.an_isr(irq, pdata);
++
++ /* If there is not a separate ECC irq, handle it here */
++ if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
++ xgbe_ecc_isr(irq, pdata);
++
++ /* If there is not a separate I2C irq, handle it here */
++ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
++ pdata->i2c_if.i2c_isr(irq, pdata);
++
+ return IRQ_HANDLED;
+ }
+
+ static irqreturn_t xgbe_dma_isr(int irq, void *data)
+ {
+ struct xgbe_channel *channel = data;
++ struct xgbe_prv_data *pdata = channel->pdata;
++ unsigned int dma_status;
+
+ /* Per channel DMA interrupts are enabled, so we use the per
+ * channel napi structure and not the private data napi structure
+ */
+ if (napi_schedule_prep(&channel->napi)) {
+ /* Disable Tx and Rx interrupts */
+- disable_irq_nosync(channel->dma_irq);
++ if (pdata->channel_irq_mode)
++ xgbe_disable_rx_tx_int(pdata, channel);
++ else
++ disable_irq_nosync(channel->dma_irq);
+
+ /* Turn on polling */
+ __napi_schedule_irqoff(&channel->napi);
+ }
+
++ /* Clear Tx/Rx signals */
++ dma_status = 0;
++ XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1);
++ XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status);
++
+ return IRQ_HANDLED;
+ }
+
+@@ -428,7 +597,10 @@ static void xgbe_tx_timer(unsigned long data)
+ if (napi_schedule_prep(napi)) {
+ /* Disable Tx and Rx interrupts */
+ if (pdata->per_channel_irq)
+- disable_irq_nosync(channel->dma_irq);
++ if (pdata->channel_irq_mode)
++ xgbe_disable_rx_tx_int(pdata, channel);
++ else
++ disable_irq_nosync(channel->dma_irq);
+ else
+ xgbe_disable_rx_tx_ints(pdata);
+
+@@ -595,6 +767,10 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
++ /* Translate the fifo sizes into actual numbers */
++ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
++ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
++
+ DBGPR("<--xgbe_get_all_hw_features\n");
+ }
+
+@@ -657,6 +833,16 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
++ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
++ ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
++ 0, pdata->ecc_name, pdata);
++ if (ret) {
++ netdev_alert(netdev, "error requesting ecc irq %d\n",
++ pdata->ecc_irq);
++ goto err_dev_irq;
++ }
++ }
++
+ if (!pdata->per_channel_irq)
+ return 0;
+
+@@ -673,17 +859,21 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ channel->dma_irq);
+- goto err_irq;
++ goto err_dma_irq;
+ }
+ }
+
+ return 0;
+
+-err_irq:
++err_dma_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+ for (i--, channel--; i < pdata->channel_count; i--, channel--)
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+
++ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
++ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
++
++err_dev_irq:
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
+ return ret;
+@@ -696,6 +886,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+
+ devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+
++ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
++ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
++
+ if (!pdata->per_channel_irq)
+ return;
+
+@@ -783,7 +976,7 @@ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+ DBGPR("<--xgbe_free_rx_data\n");
+ }
+
+-static int xgbe_phy_init(struct xgbe_prv_data *pdata)
++static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+ {
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+@@ -877,11 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_start\n");
+
+- hw_if->init(pdata);
+-
+- ret = phy_if->phy_start(pdata);
++ ret = hw_if->init(pdata);
+ if (ret)
+- goto err_phy;
++ return ret;
+
+ xgbe_napi_enable(pdata, 1);
+
+@@ -889,6 +1080,10 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+ if (ret)
+ goto err_napi;
+
++ ret = phy_if->phy_start(pdata);
++ if (ret)
++ goto err_irqs;
++
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+@@ -897,16 +1092,18 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+ xgbe_start_timers(pdata);
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
++ clear_bit(XGBE_STOPPED, &pdata->dev_state);
++
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+
++err_irqs:
++ xgbe_free_irqs(pdata);
++
+ err_napi:
+ xgbe_napi_disable(pdata, 1);
+
+- phy_if->phy_stop(pdata);
+-
+-err_phy:
+ hw_if->exit(pdata);
+
+ return ret;
+@@ -923,6 +1120,9 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_stop\n");
+
++ if (test_bit(XGBE_STOPPED, &pdata->dev_state))
++ return;
++
+ netif_tx_stop_all_queues(netdev);
+
+ xgbe_stop_timers(pdata);
+@@ -948,9 +1148,29 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ netdev_tx_reset_queue(txq);
+ }
+
++ set_bit(XGBE_STOPPED, &pdata->dev_state);
++
+ DBGPR("<--xgbe_stop\n");
+ }
+
++static void xgbe_stopdev(struct work_struct *work)
++{
++ struct xgbe_prv_data *pdata = container_of(work,
++ struct xgbe_prv_data,
++ stopdev_work);
++
++ rtnl_lock();
++
++ xgbe_stop(pdata);
++
++ xgbe_free_tx_data(pdata);
++ xgbe_free_rx_data(pdata);
++
++ rtnl_unlock();
++
++ netdev_alert(pdata->netdev, "device stopped\n");
++}
++
+ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+ {
+ DBGPR("-->xgbe_restart_dev\n");
+@@ -1297,8 +1517,8 @@ static int xgbe_open(struct net_device *netdev)
+
+ DBGPR("-->xgbe_open\n");
+
+- /* Initialize the phy */
+- ret = xgbe_phy_init(pdata);
++ /* Reset the phy settings */
++ ret = xgbe_phy_reset(pdata);
+ if (ret)
+ return ret;
+
+@@ -1333,6 +1553,7 @@ static int xgbe_open(struct net_device *netdev)
+
+ INIT_WORK(&pdata->service_work, xgbe_service);
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
++ INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ xgbe_init_timers(pdata);
+
+@@ -2065,6 +2286,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
+ {
+ struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
+ napi);
++ struct xgbe_prv_data *pdata = channel->pdata;
+ int processed = 0;
+
+ DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
+@@ -2081,7 +2303,10 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
+ napi_complete_done(napi, processed);
+
+ /* Enable Tx and Rx interrupts */
+- enable_irq(channel->dma_irq);
++ if (pdata->channel_irq_mode)
++ xgbe_enable_rx_tx_int(pdata, channel);
++ else
++ enable_irq(channel->dma_irq);
+ }
+
+ DBGPR("<--xgbe_one_poll: received = %d\n", processed);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+old mode 100644
+new mode 100755
+index 4007b42..920566a
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -272,97 +272,86 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
+ return ret;
+ }
+
+-static int xgbe_get_settings(struct net_device *netdev,
+- struct ethtool_cmd *cmd)
++static int xgbe_get_link_ksettings(struct net_device *netdev,
++ struct ethtool_link_ksettings *cmd)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+- cmd->phy_address = pdata->phy.address;
++ cmd->base.phy_address = pdata->phy.address;
+
+- cmd->supported = pdata->phy.supported;
+- cmd->advertising = pdata->phy.advertising;
+- cmd->lp_advertising = pdata->phy.lp_advertising;
++ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
++ pdata->phy.supported);
++ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
++ pdata->phy.advertising);
++ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
++ pdata->phy.lp_advertising);
+
+- cmd->autoneg = pdata->phy.autoneg;
+- ethtool_cmd_speed_set(cmd, pdata->phy.speed);
+- cmd->duplex = pdata->phy.duplex;
++ cmd->base.autoneg = pdata->phy.autoneg;
++ cmd->base.speed = pdata->phy.speed;
++ cmd->base.duplex = pdata->phy.duplex;
+
+- cmd->port = PORT_NONE;
+- cmd->transceiver = XCVR_INTERNAL;
++ cmd->base.port = PORT_NONE;
+
+ return 0;
+ }
+
+-static int xgbe_set_settings(struct net_device *netdev,
+- struct ethtool_cmd *cmd)
++static int xgbe_set_link_ksettings(struct net_device *netdev,
++ const struct ethtool_link_ksettings *cmd)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ u32 advertising;
+ u32 speed;
+ int ret;
+
+- speed = ethtool_cmd_speed(cmd);
++ speed = cmd->base.speed;
+
+- if (cmd->phy_address != pdata->phy.address) {
++ if (cmd->base.phy_address != pdata->phy.address) {
+ netdev_err(netdev, "invalid phy address %hhu\n",
+- cmd->phy_address);
++ cmd->base.phy_address);
+ return -EINVAL;
+ }
+
+- if ((cmd->autoneg != AUTONEG_ENABLE) &&
+- (cmd->autoneg != AUTONEG_DISABLE)) {
++ if ((cmd->base.autoneg != AUTONEG_ENABLE) &&
++ (cmd->base.autoneg != AUTONEG_DISABLE)) {
+ netdev_err(netdev, "unsupported autoneg %hhu\n",
+- cmd->autoneg);
++ cmd->base.autoneg);
+ return -EINVAL;
+ }
+
+- if (cmd->autoneg == AUTONEG_DISABLE) {
+- switch (speed) {
+- case SPEED_10000:
+- break;
+- case SPEED_2500:
+- if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
+- netdev_err(netdev, "unsupported speed %u\n",
+- speed);
+- return -EINVAL;
+- }
+- break;
+- case SPEED_1000:
+- if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
+- netdev_err(netdev, "unsupported speed %u\n",
+- speed);
+- return -EINVAL;
+- }
+- break;
+- default:
++ if (cmd->base.autoneg == AUTONEG_DISABLE) {
++ if (!pdata->phy_if.phy_valid_speed(pdata, speed)) {
+ netdev_err(netdev, "unsupported speed %u\n", speed);
+ return -EINVAL;
+ }
+
+- if (cmd->duplex != DUPLEX_FULL) {
++ if (cmd->base.duplex != DUPLEX_FULL) {
+ netdev_err(netdev, "unsupported duplex %hhu\n",
+- cmd->duplex);
++ cmd->base.duplex);
+ return -EINVAL;
+ }
+ }
+
++ ethtool_convert_link_mode_to_legacy_u32(&advertising,
++ cmd->link_modes.advertising);
++
+ netif_dbg(pdata, link, netdev,
+ "requested advertisement %#x, phy supported %#x\n",
+- cmd->advertising, pdata->phy.supported);
++ advertising, pdata->phy.supported);
+
+- cmd->advertising &= pdata->phy.supported;
+- if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
++ advertising &= pdata->phy.supported;
++ if ((cmd->base.autoneg == AUTONEG_ENABLE) && !advertising) {
+ netdev_err(netdev,
+ "unsupported requested advertisement\n");
+ return -EINVAL;
+ }
+
+ ret = 0;
+- pdata->phy.autoneg = cmd->autoneg;
++ pdata->phy.autoneg = cmd->base.autoneg;
+ pdata->phy.speed = speed;
+- pdata->phy.duplex = cmd->duplex;
+- pdata->phy.advertising = cmd->advertising;
++ pdata->phy.duplex = cmd->base.duplex;
++ pdata->phy.advertising = advertising;
+
+- if (cmd->autoneg == AUTONEG_ENABLE)
++ if (cmd->base.autoneg == AUTONEG_ENABLE)
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ else
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+@@ -602,8 +591,6 @@ static int xgbe_get_ts_info(struct net_device *netdev,
+ }
+
+ static const struct ethtool_ops xgbe_ethtool_ops = {
+- .get_settings = xgbe_get_settings,
+- .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+ .set_msglevel = xgbe_set_msglevel,
+@@ -621,6 +608,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_rxfh = xgbe_get_rxfh,
+ .set_rxfh = xgbe_set_rxfh,
+ .get_ts_info = xgbe_get_ts_info,
++ .get_link_ksettings = xgbe_get_link_ksettings,
++ .set_link_ksettings = xgbe_set_link_ksettings,
+ };
+
+ const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+new file mode 100644
+index 0000000..0c7088a
+--- /dev/null
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+@@ -0,0 +1,492 @@
++/*
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/kmod.h>
++#include <linux/delay.h>
++#include <linux/completion.h>
++#include <linux/mutex.h>
++
++#include "xgbe.h"
++#include "xgbe-common.h"
++
++#define XGBE_ABORT_COUNT 500
++#define XGBE_DISABLE_COUNT 1000
++
++#define XGBE_STD_SPEED 1
++
++#define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
++#define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
++#define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
++#define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
++#define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \
++ XGBE_INTR_TX_EMPTY | \
++ XGBE_INTR_TX_ABRT | \
++ XGBE_INTR_STOP_DET)
++
++#define XGBE_I2C_READ BIT(8)
++#define XGBE_I2C_STOP BIT(9)
++
++static int xgbe_i2c_abort(struct xgbe_prv_data *pdata)
++{
++ unsigned int wait = XGBE_ABORT_COUNT;
++
++ /* Must be enabled to recognize the abort request */
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
++
++ /* Issue the abort */
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
++
++ while (wait--) {
++ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
++ return 0;
++
++ usleep_range(500, 600);
++ }
++
++ return -EBUSY;
++}
++
++static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable)
++{
++ unsigned int wait = XGBE_DISABLE_COUNT;
++ unsigned int mode = enable ? 1 : 0;
++
++ while (wait--) {
++ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
++ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
++ return 0;
++
++ usleep_range(100, 110);
++ }
++
++ return -EBUSY;
++}
++
++static int xgbe_i2c_disable(struct xgbe_prv_data *pdata)
++{
++ unsigned int ret;
++
++ ret = xgbe_i2c_set_enable(pdata, false);
++ if (ret) {
++ /* Disable failed, try an abort */
++ ret = xgbe_i2c_abort(pdata);
++ if (ret)
++ return ret;
++
++ /* Abort succeeded, try to disable again */
++ ret = xgbe_i2c_set_enable(pdata, false);
++ }
++
++ return ret;
++}
++
++static int xgbe_i2c_enable(struct xgbe_prv_data *pdata)
++{
++ return xgbe_i2c_set_enable(pdata, true);
++}
++
++static void xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata)
++{
++ XI2C_IOREAD(pdata, IC_CLR_INTR);
++}
++
++static void xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata)
++{
++ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
++}
++
++static void xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata)
++{
++ XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK);
++}
++
++static void xgbe_i2c_write(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int tx_slots;
++ unsigned int cmd;
++
++ /* Configured to never receive Rx overflows, so fill up Tx fifo */
++ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
++ while (tx_slots && state->tx_len) {
++ if (state->op->cmd == XGBE_I2C_CMD_READ)
++ cmd = XGBE_I2C_READ;
++ else
++ cmd = *state->tx_buf++;
++
++ if (state->tx_len == 1)
++ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
++
++ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
++
++ tx_slots--;
++ state->tx_len--;
++ }
++
++ /* No more Tx operations, so ignore TX_EMPTY and return */
++ if (!state->tx_len)
++ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
++}
++
++static void xgbe_i2c_read(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int rx_slots;
++
++ /* Anything to be read? */
++ if (state->op->cmd != XGBE_I2C_CMD_READ)
++ return;
++
++ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
++ while (rx_slots && state->rx_len) {
++ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
++ state->rx_len--;
++ rx_slots--;
++ }
++}
++
++static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
++ unsigned int isr)
++{
++ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
++
++ if (isr & XGBE_INTR_TX_ABRT) {
++ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
++ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
++ }
++
++ if (isr & XGBE_INTR_STOP_DET)
++ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
++}
++
++static irqreturn_t xgbe_i2c_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ unsigned int isr;
++
++ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
++ netif_dbg(pdata, intr, pdata->netdev,
++ "I2C interrupt received: status=%#010x\n", isr);
++
++ xgbe_i2c_clear_isr_interrupts(pdata, isr);
++
++ if (isr & XGBE_INTR_TX_ABRT) {
++ netif_dbg(pdata, link, pdata->netdev,
++ "I2C TX_ABRT received (%#010x) for target %#04x\n",
++ state->tx_abort_source, state->op->target);
++
++ xgbe_i2c_disable_interrupts(pdata);
++
++ state->ret = -EIO;
++ goto out;
++ }
++
++ /* Check for data in the Rx fifo */
++ xgbe_i2c_read(pdata);
++
++ /* Fill up the Tx fifo next */
++ xgbe_i2c_write(pdata);
++
++out:
++ /* Complete on an error or STOP condition */
++ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
++ complete(&pdata->i2c_complete);
++
++ return IRQ_HANDLED;
++}
++
++static void xgbe_i2c_set_mode(struct xgbe_prv_data *pdata)
++{
++ unsigned int reg;
++
++ reg = XI2C_IOREAD(pdata, IC_CON);
++ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
++ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
++ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
++ XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED);
++ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
++ XI2C_IOWRITE(pdata, IC_CON, reg);
++}
++
++static void xgbe_i2c_get_features(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_i2c *i2c = &pdata->i2c;
++ unsigned int reg;
++
++ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
++ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ MAX_SPEED_MODE);
++ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ RX_BUFFER_DEPTH);
++ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
++ TX_BUFFER_DEPTH);
++
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev, "I2C features: %s=%u, %s=%u, %s=%u\n",
++ "MAX_SPEED_MODE", i2c->max_speed_mode,
++ "RX_BUFFER_DEPTH", i2c->rx_fifo_size,
++ "TX_BUFFER_DEPTH", i2c->tx_fifo_size);
++}
++
++static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
++{
++ XI2C_IOWRITE(pdata, IC_TAR, addr);
++}
++
++static irqreturn_t xgbe_i2c_combined_isr(int irq, struct xgbe_prv_data *pdata)
++{
++ if (!XI2C_IOREAD(pdata, IC_RAW_INTR_STAT))
++ return IRQ_HANDLED;
++
++ return xgbe_i2c_isr(irq, pdata);
++}
++
++static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
++{
++ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
++ int ret;
++
++ mutex_lock(&pdata->i2c_mutex);
++
++ reinit_completion(&pdata->i2c_complete);
++
++ ret = xgbe_i2c_disable(pdata);
++ if (ret) {
++ netdev_err(pdata->netdev, "failed to disable i2c master\n");
++ goto unlock;
++ }
++
++ xgbe_i2c_set_target(pdata, op->target);
++
++ memset(state, 0, sizeof(*state));
++ state->op = op;
++ state->tx_len = op->len;
++ state->tx_buf = op->buf;
++ state->rx_len = op->len;
++ state->rx_buf = op->buf;
++
++ xgbe_i2c_clear_all_interrupts(pdata);
++ ret = xgbe_i2c_enable(pdata);
++ if (ret) {
++ netdev_err(pdata->netdev, "failed to enable i2c master\n");
++ goto unlock;
++ }
++
++ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
++ * fire and begin to process the command via the ISR.
++ */
++ xgbe_i2c_enable_interrupts(pdata);
++
++ if (!wait_for_completion_timeout(&pdata->i2c_complete, HZ)) {
++ netdev_err(pdata->netdev, "i2c operation timed out\n");
++ ret = -ETIMEDOUT;
++ goto disable;
++ }
++
++ ret = state->ret;
++ if (ret) {
++ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
++ ret = -ENOTCONN;
++ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
++ ret = -EAGAIN;
++ }
++
++disable:
++ xgbe_i2c_disable_interrupts(pdata);
++ xgbe_i2c_disable(pdata);
++
++unlock:
++ mutex_unlock(&pdata->i2c_mutex);
++
++ return ret;
++}
++
++static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
++{
++ if (!pdata->i2c.started)
++ return;
++
++ netif_dbg(pdata, link, pdata->netdev, "stopping I2C\n");
++
++ pdata->i2c.started = 0;
++
++ xgbe_i2c_disable_interrupts(pdata);
++ xgbe_i2c_disable(pdata);
++ xgbe_i2c_clear_all_interrupts(pdata);
++
++ if (pdata->dev_irq != pdata->i2c_irq)
++ devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
++}
++
++static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
++{
++ int ret;
++
++ if (pdata->i2c.started)
++ return 0;
++
++ netif_dbg(pdata, link, pdata->netdev, "starting I2C\n");
++
++ /* If we have a separate I2C irq, enable it */
++ if (pdata->dev_irq != pdata->i2c_irq) {
++ ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
++ xgbe_i2c_isr, 0, pdata->i2c_name,
++ pdata);
++ if (ret) {
++ netdev_err(pdata->netdev, "i2c irq request failed\n");
++ return ret;
++ }
++ }
++
++ pdata->i2c.started = 1;
++
++ return 0;
++}
++
++static int xgbe_i2c_init(struct xgbe_prv_data *pdata)
++{
++ int ret;
++
++ xgbe_i2c_disable_interrupts(pdata);
++
++ ret = xgbe_i2c_disable(pdata);
++ if (ret) {
++ dev_err(pdata->dev, "failed to disable i2c master\n");
++ return ret;
++ }
++
++ xgbe_i2c_get_features(pdata);
++
++ xgbe_i2c_set_mode(pdata);
++
++ xgbe_i2c_clear_all_interrupts(pdata);
++
++ return 0;
++}
++
++void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if)
++{
++ i2c_if->i2c_init = xgbe_i2c_init;
++
++ i2c_if->i2c_start = xgbe_i2c_start;
++ i2c_if->i2c_stop = xgbe_i2c_stop;
++
++ i2c_if->i2c_xfer = xgbe_i2c_xfer;
++
++ i2c_if->i2c_isr = xgbe_i2c_combined_isr;
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+old mode 100644
+new mode 100755
+index 4f76351..17ac8f9
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -116,19 +116,10 @@
+
+ #include <linux/module.h>
+ #include <linux/device.h>
+-#include <linux/platform_device.h>
+ #include <linux/spinlock.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/io.h>
+-#include <linux/of.h>
+-#include <linux/of_net.h>
+-#include <linux/of_address.h>
+-#include <linux/of_platform.h>
+-#include <linux/clk.h>
+-#include <linux/property.h>
+-#include <linux/acpi.h>
+-#include <linux/mdio.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -145,42 +136,6 @@ MODULE_PARM_DESC(debug, " Network interface message level setting");
+ static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
+ NETIF_MSG_IFUP);
+
+-static const u32 xgbe_serdes_blwc[] = {
+- XGBE_SPEED_1000_BLWC,
+- XGBE_SPEED_2500_BLWC,
+- XGBE_SPEED_10000_BLWC,
+-};
+-
+-static const u32 xgbe_serdes_cdr_rate[] = {
+- XGBE_SPEED_1000_CDR,
+- XGBE_SPEED_2500_CDR,
+- XGBE_SPEED_10000_CDR,
+-};
+-
+-static const u32 xgbe_serdes_pq_skew[] = {
+- XGBE_SPEED_1000_PQ,
+- XGBE_SPEED_2500_PQ,
+- XGBE_SPEED_10000_PQ,
+-};
+-
+-static const u32 xgbe_serdes_tx_amp[] = {
+- XGBE_SPEED_1000_TXAMP,
+- XGBE_SPEED_2500_TXAMP,
+- XGBE_SPEED_10000_TXAMP,
+-};
+-
+-static const u32 xgbe_serdes_dfe_tap_cfg[] = {
+- XGBE_SPEED_1000_DFE_TAP_CONFIG,
+- XGBE_SPEED_2500_DFE_TAP_CONFIG,
+- XGBE_SPEED_10000_DFE_TAP_CONFIG,
+-};
+-
+-static const u32 xgbe_serdes_dfe_tap_ena[] = {
+- XGBE_SPEED_1000_DFE_TAP_ENABLE,
+- XGBE_SPEED_2500_DFE_TAP_ENABLE,
+- XGBE_SPEED_10000_DFE_TAP_ENABLE,
+-};
+-
+ static void xgbe_default_config(struct xgbe_prv_data *pdata)
+ {
+ DBGPR("-->xgbe_default_config\n");
+@@ -206,455 +161,124 @@ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+ {
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_phy(&pdata->phy_if);
++ xgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+-}
+-
+-#ifdef CONFIG_ACPI
+-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+-{
+- struct device *dev = pdata->dev;
+- u32 property;
+- int ret;
+-
+- /* Obtain the system clock setting */
+- ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
+- if (ret) {
+- dev_err(dev, "unable to obtain %s property\n",
+- XGBE_ACPI_DMA_FREQ);
+- return ret;
+- }
+- pdata->sysclk_rate = property;
+-
+- /* Obtain the PTP clock setting */
+- ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
+- if (ret) {
+- dev_err(dev, "unable to obtain %s property\n",
+- XGBE_ACPI_PTP_FREQ);
+- return ret;
+- }
+- pdata->ptpclk_rate = property;
+
+- return 0;
++ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+ }
+-#else /* CONFIG_ACPI */
+-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
+-{
+- return -EINVAL;
+-}
+-#endif /* CONFIG_ACPI */
+
+-#ifdef CONFIG_OF
+-static int xgbe_of_support(struct xgbe_prv_data *pdata)
+-{
+- struct device *dev = pdata->dev;
+-
+- /* Obtain the system clock setting */
+- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+- if (IS_ERR(pdata->sysclk)) {
+- dev_err(dev, "dma devm_clk_get failed\n");
+- return PTR_ERR(pdata->sysclk);
+- }
+- pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
+-
+- /* Obtain the PTP clock setting */
+- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+- if (IS_ERR(pdata->ptpclk)) {
+- dev_err(dev, "ptp devm_clk_get failed\n");
+- return PTR_ERR(pdata->ptpclk);
+- }
+- pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
+-
+- return 0;
+-}
+-
+-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+-{
+- struct device *dev = pdata->dev;
+- struct device_node *phy_node;
+- struct platform_device *phy_pdev;
+-
+- phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
+- if (phy_node) {
+- /* Old style device tree:
+- * The XGBE and PHY resources are separate
+- */
+- phy_pdev = of_find_device_by_node(phy_node);
+- of_node_put(phy_node);
+- } else {
+- /* New style device tree:
+- * The XGBE and PHY resources are grouped together with
+- * the PHY resources listed last
+- */
+- get_device(dev);
+- phy_pdev = pdata->pdev;
+- }
+-
+- return phy_pdev;
+-}
+-#else /* CONFIG_OF */
+-static int xgbe_of_support(struct xgbe_prv_data *pdata)
+-{
+- return -EINVAL;
+-}
+-
+-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
+-{
+- return NULL;
+-}
+-#endif /* CONFIG_OF */
+-
+-static unsigned int xgbe_resource_count(struct platform_device *pdev,
+- unsigned int type)
+-{
+- unsigned int count;
+- int i;
+-
+- for (i = 0, count = 0; i < pdev->num_resources; i++) {
+- struct resource *res = &pdev->resource[i];
+-
+- if (type == resource_type(res))
+- count++;
+- }
+-
+- return count;
+-}
+-
+-static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
+-{
+- struct platform_device *phy_pdev;
+-
+- if (pdata->use_acpi) {
+- get_device(pdata->dev);
+- phy_pdev = pdata->pdev;
+- } else {
+- phy_pdev = xgbe_of_get_phy_pdev(pdata);
+- }
+-
+- return phy_pdev;
+-}
+-
+-static int xgbe_probe(struct platform_device *pdev)
++struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ {
+ struct xgbe_prv_data *pdata;
+ struct net_device *netdev;
+- struct device *dev = &pdev->dev, *phy_dev;
+- struct platform_device *phy_pdev;
+- struct resource *res;
+- const char *phy_mode;
+- unsigned int i, phy_memnum, phy_irqnum;
+- enum dev_dma_attr attr;
+- int ret;
+-
+- DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+- dev_err(dev, "alloc_etherdev failed\n");
+- ret = -ENOMEM;
+- goto err_alloc;
++ dev_err(dev, "alloc_etherdev_mq failed\n");
++ return ERR_PTR(-ENOMEM);
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+- pdata->pdev = pdev;
+- pdata->adev = ACPI_COMPANION(dev);
+ pdata->dev = dev;
+- platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
++ mutex_init(&pdata->i2c_mutex);
++ init_completion(&pdata->i2c_complete);
++ init_completion(&pdata->mdio_complete);
+
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
++ set_bit(XGBE_STOPPED, &pdata->dev_state);
+
+- /* Check if we should use ACPI or DT */
+- pdata->use_acpi = dev->of_node ? 0 : 1;
+-
+- phy_pdev = xgbe_get_phy_pdev(pdata);
+- if (!phy_pdev) {
+- dev_err(dev, "unable to obtain phy device\n");
+- ret = -EINVAL;
+- goto err_phydev;
+- }
+- phy_dev = &phy_pdev->dev;
+-
+- if (pdev == phy_pdev) {
+- /* New style device tree or ACPI:
+- * The XGBE and PHY resources are grouped together with
+- * the PHY resources listed last
+- */
+- phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
+- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
+- } else {
+- /* Old style device tree:
+- * The XGBE and PHY resources are separate
+- */
+- phy_memnum = 0;
+- phy_irqnum = 0;
+- }
+-
+- /* Set and validate the number of descriptors for a ring */
+- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+- pdata->tx_desc_count = XGBE_TX_DESC_CNT;
+- if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+- dev_err(dev, "tx descriptor count (%d) is not valid\n",
+- pdata->tx_desc_count);
+- ret = -EINVAL;
+- goto err_io;
+- }
+- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+- pdata->rx_desc_count = XGBE_RX_DESC_CNT;
+- if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+- dev_err(dev, "rx descriptor count (%d) is not valid\n",
+- pdata->rx_desc_count);
+- ret = -EINVAL;
+- goto err_io;
+- }
+-
+- /* Obtain the mmio areas for the device */
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+- if (IS_ERR(pdata->xgmac_regs)) {
+- dev_err(dev, "xgmac ioremap failed\n");
+- ret = PTR_ERR(pdata->xgmac_regs);
+- goto err_io;
+- }
+- if (netif_msg_probe(pdata))
+- dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
+-
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+- if (IS_ERR(pdata->xpcs_regs)) {
+- dev_err(dev, "xpcs ioremap failed\n");
+- ret = PTR_ERR(pdata->xpcs_regs);
+- goto err_io;
+- }
+- if (netif_msg_probe(pdata))
+- dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+-
+- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
+- if (IS_ERR(pdata->rxtx_regs)) {
+- dev_err(dev, "rxtx ioremap failed\n");
+- ret = PTR_ERR(pdata->rxtx_regs);
+- goto err_io;
+- }
+- if (netif_msg_probe(pdata))
+- dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
+-
+- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+- pdata->sir0_regs = devm_ioremap_resource(dev, res);
+- if (IS_ERR(pdata->sir0_regs)) {
+- dev_err(dev, "sir0 ioremap failed\n");
+- ret = PTR_ERR(pdata->sir0_regs);
+- goto err_io;
+- }
+- if (netif_msg_probe(pdata))
+- dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
+-
+- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
+- pdata->sir1_regs = devm_ioremap_resource(dev, res);
+- if (IS_ERR(pdata->sir1_regs)) {
+- dev_err(dev, "sir1 ioremap failed\n");
+- ret = PTR_ERR(pdata->sir1_regs);
+- goto err_io;
+- }
+- if (netif_msg_probe(pdata))
+- dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
+-
+- /* Retrieve the MAC address */
+- ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
+- pdata->mac_addr,
+- sizeof(pdata->mac_addr));
+- if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
+- dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
+- if (!ret)
+- ret = -EINVAL;
+- goto err_io;
+- }
+-
+- /* Retrieve the PHY mode - it must be "xgmii" */
+- ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
+- &phy_mode);
+- if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
+- dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
+- if (!ret)
+- ret = -EINVAL;
+- goto err_io;
+- }
+- pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
+-
+- /* Check for per channel interrupt support */
+- if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
+- pdata->per_channel_irq = 1;
++ return pdata;
++}
+
+- /* Retrieve the PHY speedset */
+- ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
+- &pdata->speed_set);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+- goto err_io;
+- }
++void xgbe_free_pdata(struct xgbe_prv_data *pdata)
++{
++ struct net_device *netdev = pdata->netdev;
+
+- switch (pdata->speed_set) {
+- case XGBE_SPEEDSET_1000_10000:
+- case XGBE_SPEEDSET_2500_10000:
+- break;
+- default:
+- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
+- ret = -EINVAL;
+- goto err_io;
+- }
++ free_netdev(netdev);
++}
+
+- /* Retrieve the PHY configuration properties */
+- if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_BLWC_PROPERTY,
+- pdata->serdes_blwc,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_BLWC_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
+- sizeof(pdata->serdes_blwc));
+- }
++void xgbe_set_counts(struct xgbe_prv_data *pdata)
++{
++ /* Set all the function pointers */
++ xgbe_init_all_fptrs(pdata);
+
+- if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_CDR_RATE_PROPERTY,
+- pdata->serdes_cdr_rate,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_CDR_RATE_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
+- sizeof(pdata->serdes_cdr_rate));
+- }
++ /* Populate the hardware features */
++ xgbe_get_all_hw_features(pdata);
+
+- if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_PQ_SKEW_PROPERTY,
+- pdata->serdes_pq_skew,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_PQ_SKEW_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
+- sizeof(pdata->serdes_pq_skew));
+- }
++ /* Set default max values if not provided */
++ if (!pdata->tx_max_channel_count)
++ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
++ if (!pdata->rx_max_channel_count)
++ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+- if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_TX_AMP_PROPERTY,
+- pdata->serdes_tx_amp,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_TX_AMP_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
+- sizeof(pdata->serdes_tx_amp));
+- }
++ if (!pdata->tx_max_q_count)
++ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
++ if (!pdata->rx_max_q_count)
++ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+- if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_DFE_CFG_PROPERTY,
+- pdata->serdes_dfe_tap_cfg,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_DFE_CFG_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
+- sizeof(pdata->serdes_dfe_tap_cfg));
+- }
++ /* Calculate the number of Tx and Rx rings to be created
++ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
++ * the number of Tx queues to the number of Tx channels
++ * enabled
++ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
++ * number of Rx queues or maximum allowed
++ */
++ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
++ pdata->hw_feat.tx_ch_cnt);
++ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
++ pdata->tx_max_channel_count);
++ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
++ pdata->tx_max_q_count);
+
+- if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
+- ret = device_property_read_u32_array(phy_dev,
+- XGBE_DFE_ENA_PROPERTY,
+- pdata->serdes_dfe_tap_ena,
+- XGBE_SPEEDS);
+- if (ret) {
+- dev_err(dev, "invalid %s property\n",
+- XGBE_DFE_ENA_PROPERTY);
+- goto err_io;
+- }
+- } else {
+- memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
+- sizeof(pdata->serdes_dfe_tap_ena));
+- }
++ pdata->tx_q_count = pdata->tx_ring_count;
+
+- /* Obtain device settings unique to ACPI/OF */
+- if (pdata->use_acpi)
+- ret = xgbe_acpi_support(pdata);
+- else
+- ret = xgbe_of_support(pdata);
+- if (ret)
+- goto err_io;
++ pdata->rx_ring_count = min_t(unsigned int, num_online_cpus(),
++ pdata->hw_feat.rx_ch_cnt);
++ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
++ pdata->rx_max_channel_count);
+
+- /* Set the DMA coherency values */
+- attr = device_get_dma_attr(dev);
+- if (attr == DEV_DMA_NOT_SUPPORTED) {
+- dev_err(dev, "DMA is not supported");
+- goto err_io;
+- }
+- pdata->coherent = (attr == DEV_DMA_COHERENT);
+- if (pdata->coherent) {
+- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+- pdata->arcache = XGBE_DMA_OS_ARCACHE;
+- pdata->awcache = XGBE_DMA_OS_AWCACHE;
+- } else {
+- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+- }
++ pdata->rx_q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt,
++ pdata->rx_max_q_count);
+
+- /* Get the device interrupt */
+- ret = platform_get_irq(pdev, 0);
+- if (ret < 0) {
+- dev_err(dev, "platform_get_irq 0 failed\n");
+- goto err_io;
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "TX/RX DMA channel count = %u/%u\n",
++ pdata->tx_ring_count, pdata->rx_ring_count);
++ dev_dbg(pdata->dev, "TX/RX hardware queue count = %u/%u\n",
++ pdata->tx_q_count, pdata->rx_q_count);
+ }
+- pdata->dev_irq = ret;
++}
+
+- /* Get the auto-negotiation interrupt */
+- ret = platform_get_irq(phy_pdev, phy_irqnum++);
+- if (ret < 0) {
+- dev_err(dev, "platform_get_irq phy 0 failed\n");
+- goto err_io;
+- }
+- pdata->an_irq = ret;
++int xgbe_config_netdev(struct xgbe_prv_data *pdata)
++{
++ struct net_device *netdev = pdata->netdev;
++ struct device *dev = pdata->dev;
++ unsigned int i;
++ int ret;
+
+ netdev->irq = pdata->dev_irq;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+ memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+
+- /* Set all the function pointers */
+- xgbe_init_all_fptrs(pdata);
++ /* Initialize ECC timestamps */
++ pdata->tx_sec_period = jiffies;
++ pdata->tx_ded_period = jiffies;
++ pdata->rx_sec_period = jiffies;
++ pdata->rx_ded_period = jiffies;
++ pdata->desc_sec_period = jiffies;
++ pdata->desc_ded_period = jiffies;
+
+ /* Issue software reset to device */
+ pdata->hw_if.exit(pdata);
+
+- /* Populate the hardware features */
+- xgbe_get_all_hw_features(pdata);
+-
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+@@ -663,33 +287,46 @@ static int xgbe_probe(struct platform_device *pdev)
+ DMA_BIT_MASK(pdata->hw_feat.dma_width));
+ if (ret) {
+ dev_err(dev, "dma_set_mask_and_coherent failed\n");
+- goto err_io;
++ return ret;
+ }
+
+- /* Calculate the number of Tx and Rx rings to be created
+- * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+- * the number of Tx queues to the number of Tx channels
+- * enabled
+- * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+- * number of Rx queues
+- */
+- pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+- pdata->hw_feat.tx_ch_cnt);
+- pdata->tx_q_count = pdata->tx_ring_count;
++ /* Set default max values if not provided */
++ if (!pdata->tx_max_fifo_size)
++ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
++ if (!pdata->rx_max_fifo_size)
++ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
++
++ /* Set and validate the number of descriptors for a ring */
++ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
++ pdata->tx_desc_count = XGBE_TX_DESC_CNT;
++
++ BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
++ pdata->rx_desc_count = XGBE_RX_DESC_CNT;
++
++ /* Adjust the number of queues based on interrupts assigned */
++ if (pdata->channel_irq_count) {
++ pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count,
++ pdata->channel_irq_count);
++ pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count,
++ pdata->channel_irq_count);
++
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev,
++ "adjusted TX/RX DMA channel count = %u/%u\n",
++ pdata->tx_ring_count, pdata->rx_ring_count);
++ }
++
++ /* Set the number of queues */
+ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real tx queue count\n");
+- goto err_io;
++ return ret;
+ }
+
+- pdata->rx_ring_count = min_t(unsigned int,
+- netif_get_num_default_rss_queues(),
+- pdata->hw_feat.rx_ch_cnt);
+- pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+- goto err_io;
++ return ret;
+ }
+
+ /* Initialize RSS hash key and lookup table */
+@@ -704,7 +341,9 @@ static int xgbe_probe(struct platform_device *pdev)
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+
+ /* Call MDIO/PHY initialization routine */
+- pdata->phy_if.phy_init(pdata);
++ ret = pdata->phy_if.phy_init(pdata);
++ if (ret)
++ return ret;
+
+ /* Set device operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+@@ -738,6 +377,8 @@ static int xgbe_probe(struct platform_device *pdev)
+ pdata->netdev_features = netdev->features;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
++ netdev->min_mtu = 0;
++ netdev->max_mtu = XGMAC_JUMBO_PACKET_MTU;
+
+ /* Use default watchdog timeout */
+ netdev->watchdog_timeo = 0;
+@@ -749,13 +390,21 @@ static int xgbe_probe(struct platform_device *pdev)
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+- goto err_io;
++ return ret;
+ }
+
+ /* Create the PHY/ANEG name based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+
++ /* Create the ECC name based on netdev name */
++ snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
++ netdev_name(netdev));
++
++ /* Create the I2C name based on netdev name */
++ snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
++ netdev_name(netdev));
++
+ /* Create workqueues */
+ pdata->dev_workqueue =
+ create_singlethread_workqueue(netdev_name(netdev));
+@@ -773,15 +422,15 @@ static int xgbe_probe(struct platform_device *pdev)
+ goto err_wq;
+ }
+
+- xgbe_ptp_register(pdata);
++ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
++ xgbe_ptp_register(pdata);
+
+ xgbe_debugfs_init(pdata);
+
+- platform_device_put(phy_pdev);
+-
+- netdev_notice(netdev, "net device enabled\n");
+-
+- DBGPR("<-- xgbe_probe\n");
++ netif_dbg(pdata, drv, pdata->netdev, "%u Tx software queues\n",
++ pdata->tx_ring_count);
++ netif_dbg(pdata, drv, pdata->netdev, "%u Rx software queues\n",
++ pdata->rx_ring_count);
+
+ return 0;
+
+@@ -791,28 +440,19 @@ static int xgbe_probe(struct platform_device *pdev)
+ err_netdev:
+ unregister_netdev(netdev);
+
+-err_io:
+- platform_device_put(phy_pdev);
+-
+-err_phydev:
+- free_netdev(netdev);
+-
+-err_alloc:
+- dev_notice(dev, "net device not enabled\n");
+-
+ return ret;
+ }
+
+-static int xgbe_remove(struct platform_device *pdev)
++void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+ {
+- struct net_device *netdev = platform_get_drvdata(pdev);
+- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+-
+- DBGPR("-->xgbe_remove\n");
++ struct net_device *netdev = pdata->netdev;
+
+ xgbe_debugfs_exit(pdata);
+
+- xgbe_ptp_unregister(pdata);
++ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
++ xgbe_ptp_unregister(pdata);
++
++ pdata->phy_if.phy_exit(pdata);
+
+ flush_workqueue(pdata->an_workqueue);
+ destroy_workqueue(pdata->an_workqueue);
+@@ -821,94 +461,29 @@ static int xgbe_remove(struct platform_device *pdev)
+ destroy_workqueue(pdata->dev_workqueue);
+
+ unregister_netdev(netdev);
+-
+- free_netdev(netdev);
+-
+- DBGPR("<--xgbe_remove\n");
+-
+- return 0;
+ }
+
+-#ifdef CONFIG_PM_SLEEP
+-static int xgbe_suspend(struct device *dev)
++static int __init xgbe_mod_init(void)
+ {
+- struct net_device *netdev = dev_get_drvdata(dev);
+- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- int ret = 0;
+-
+- DBGPR("-->xgbe_suspend\n");
+-
+- if (netif_running(netdev))
+- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
++ int ret;
+
+- pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
++ ret = xgbe_platform_init();
++ if (ret)
++ return ret;
+
+- DBGPR("<--xgbe_suspend\n");
++ ret = xgbe_pci_init();
++ if (ret)
++ return ret;
+
+- return ret;
++ return 0;
+ }
+
+-static int xgbe_resume(struct device *dev)
++static void __exit xgbe_mod_exit(void)
+ {
+- struct net_device *netdev = dev_get_drvdata(dev);
+- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- int ret = 0;
+-
+- DBGPR("-->xgbe_resume\n");
+-
+- pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
+-
+- if (netif_running(netdev)) {
+- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+-
+- /* Schedule a restart in case the link or phy state changed
+- * while we were powered down.
+- */
+- schedule_work(&pdata->restart_work);
+- }
+-
+- DBGPR("<--xgbe_resume\n");
++ xgbe_pci_exit();
+
+- return ret;
++ xgbe_platform_exit();
+ }
+-#endif /* CONFIG_PM_SLEEP */
+-
+-#ifdef CONFIG_ACPI
+-static const struct acpi_device_id xgbe_acpi_match[] = {
+- { "AMDI8001", 0 },
+- {},
+-};
+-
+-MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
+-#endif
+-
+-#ifdef CONFIG_OF
+-static const struct of_device_id xgbe_of_match[] = {
+- { .compatible = "amd,xgbe-seattle-v1a", },
+- {},
+-};
+-
+-MODULE_DEVICE_TABLE(of, xgbe_of_match);
+-#endif
+-
+-static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+-
+-static struct platform_driver xgbe_driver = {
+- .driver = {
+- .name = "amd-xgbe",
+-#ifdef CONFIG_ACPI
+- .acpi_match_table = xgbe_acpi_match,
+-#endif
+-#ifdef CONFIG_OF
+- .of_match_table = xgbe_of_match,
+-#endif
+- .pm = &xgbe_pm_ops,
+- },
+- .probe = xgbe_probe,
+- .remove = xgbe_remove,
+-};
+
+-module_platform_driver(xgbe_driver);
++module_init(xgbe_mod_init);
++module_exit(xgbe_mod_exit);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+old mode 100644
+new mode 100755
+index 84c5d29..4c5b90e
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -125,303 +125,284 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
+-static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
++static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++ int reg;
+
+- reg |= XGBE_KR_TRAINING_ENABLE;
+- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
++ reg &= ~XGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+ }
+
+-static void xgbe_an_disable_kr_training(struct xgbe_prv_data *pdata)
++static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
++ int reg;
+
+- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
++ reg &= ~XGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+- reg &= ~XGBE_KR_TRAINING_ENABLE;
+- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
++ reg &= ~XGBE_PCS_CL37_BP;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+ }
+
+-static void xgbe_pcs_power_cycle(struct xgbe_prv_data *pdata)
++static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ int reg;
+
+- reg |= MDIO_CTRL1_LPOWER;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
++ reg |= XGBE_PCS_CL37_BP;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+
+- usleep_range(75, 100);
+-
+- reg &= ~MDIO_CTRL1_LPOWER;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
++ reg |= XGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+ }
+
+-static void xgbe_serdes_start_ratechange(struct xgbe_prv_data *pdata)
++static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata)
+ {
+- /* Assert Rx and Tx ratechange */
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+ }
+
+-static void xgbe_serdes_complete_ratechange(struct xgbe_prv_data *pdata)
++static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata)
+ {
+- unsigned int wait;
+- u16 status;
+-
+- /* Release Rx and Tx ratechange */
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
++}
+
+- /* Wait for Rx and Tx ready */
+- wait = XGBE_RATECHANGE_COUNT;
+- while (wait--) {
+- usleep_range(50, 75);
++static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata)
++{
++ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK);
++}
+
+- status = XSIR0_IOREAD(pdata, SIR0_STATUS);
+- if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+- XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+- goto rx_reset;
++static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata)
++{
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_enable_interrupts(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_enable_interrupts(pdata);
++ break;
++ default:
++ break;
+ }
++}
+
+- netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
+- status);
+-
+-rx_reset:
+- /* Perform Rx reset for the DFE changes */
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
++static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
++{
++ xgbe_an73_clear_interrupts(pdata);
++ xgbe_an37_clear_interrupts(pdata);
+ }
+
+-static void xgbe_xgmii_mode(struct xgbe_prv_data *pdata)
++static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
+ {
+ unsigned int reg;
+
+- /* Enable KR training */
+- xgbe_an_enable_kr_training(pdata);
+-
+- /* Set MAC to 10G speed */
+- pdata->hw_if.set_xgmii_speed(pdata);
+-
+- /* Set PCS to KR/10G speed */
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+- reg &= ~MDIO_PCS_CTRL2_TYPE;
+- reg |= MDIO_PCS_CTRL2_10GBR;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- reg &= ~MDIO_CTRL1_SPEEDSEL;
+- reg |= MDIO_CTRL1_SPEED10G;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++ reg |= XGBE_KR_TRAINING_ENABLE;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++}
+
+- xgbe_pcs_power_cycle(pdata);
++static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
++{
++ unsigned int reg;
+
+- /* Set SerDes to 10G speed */
+- xgbe_serdes_start_ratechange(pdata);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
++ reg &= ~XGBE_KR_TRAINING_ENABLE;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++}
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+- pdata->serdes_cdr_rate[XGBE_SPEED_10000]);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+- pdata->serdes_tx_amp[XGBE_SPEED_10000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+- pdata->serdes_blwc[XGBE_SPEED_10000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+- pdata->serdes_pq_skew[XGBE_SPEED_10000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_10000]);
+- XRXTX_IOWRITE(pdata, RXTX_REG22,
+- pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
++static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
++{
++ /* Enable KR training */
++ xgbe_an73_enable_kr_training(pdata);
+
+- xgbe_serdes_complete_ratechange(pdata);
++ /* Set MAC to 10G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+- netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR);
+ }
+
+-static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
++static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+ /* Disable KR training */
+- xgbe_an_disable_kr_training(pdata);
++ xgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+- pdata->hw_if.set_gmii_2500_speed(pdata);
++ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+- /* Set PCS to KX/1G speed */
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+- reg &= ~MDIO_PCS_CTRL2_TYPE;
+- reg |= MDIO_PCS_CTRL2_10GBX;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500);
++}
+
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- reg &= ~MDIO_CTRL1_SPEEDSEL;
+- reg |= MDIO_CTRL1_SPEED1G;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
++{
++ /* Disable KR training */
++ xgbe_an73_disable_kr_training(pdata);
+
+- xgbe_pcs_power_cycle(pdata);
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+- /* Set SerDes to 2.5G speed */
+- xgbe_serdes_start_ratechange(pdata);
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000);
++}
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
++static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
++{
++ /* If a KR re-driver is present, change to KR mode instead */
++ if (pdata->kr_redrv)
++ return xgbe_kr_mode(pdata);
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+- pdata->serdes_cdr_rate[XGBE_SPEED_2500]);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+- pdata->serdes_tx_amp[XGBE_SPEED_2500]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+- pdata->serdes_blwc[XGBE_SPEED_2500]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+- pdata->serdes_pq_skew[XGBE_SPEED_2500]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_2500]);
+- XRXTX_IOWRITE(pdata, RXTX_REG22,
+- pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
++ /* Disable KR training */
++ xgbe_an73_disable_kr_training(pdata);
+
+- xgbe_serdes_complete_ratechange(pdata);
++ /* Set MAC to 10G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+- netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI);
+ }
+
+-static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
++static void xgbe_x_mode(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+ /* Disable KR training */
+- xgbe_an_disable_kr_training(pdata);
++ xgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+- pdata->hw_if.set_gmii_speed(pdata);
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+- /* Set PCS to KX/1G speed */
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+- reg &= ~MDIO_PCS_CTRL2_TYPE;
+- reg |= MDIO_PCS_CTRL2_10GBX;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- reg &= ~MDIO_CTRL1_SPEEDSEL;
+- reg |= MDIO_CTRL1_SPEED1G;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X);
++}
+
+- xgbe_pcs_power_cycle(pdata);
++static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
++{
++ /* Disable KR training */
++ xgbe_an73_disable_kr_training(pdata);
+
+- /* Set SerDes to 1G speed */
+- xgbe_serdes_start_ratechange(pdata);
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000);
++}
+
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
+- pdata->serdes_cdr_rate[XGBE_SPEED_1000]);
+- XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
+- pdata->serdes_tx_amp[XGBE_SPEED_1000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
+- pdata->serdes_blwc[XGBE_SPEED_1000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
+- pdata->serdes_pq_skew[XGBE_SPEED_1000]);
+- XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
+- pdata->serdes_dfe_tap_cfg[XGBE_SPEED_1000]);
+- XRXTX_IOWRITE(pdata, RXTX_REG22,
+- pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
++static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
++{
++ /* Disable KR training */
++ xgbe_an73_disable_kr_training(pdata);
+
+- xgbe_serdes_complete_ratechange(pdata);
++ /* Set MAC to 1G speed */
++ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+- netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
++ /* Call PHY implementation support to complete rate change */
++ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100);
+ }
+
+-static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
+- enum xgbe_mode *mode)
++static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
+- if ((reg & MDIO_PCS_CTRL2_TYPE) == MDIO_PCS_CTRL2_10GBR)
+- *mode = XGBE_MODE_KR;
+- else
+- *mode = XGBE_MODE_KX;
++ return pdata->phy_if.phy_impl.cur_mode(pdata);
+ }
+
+ static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata)
+ {
+- enum xgbe_mode mode;
+-
+- xgbe_cur_mode(pdata, &mode);
++ return (xgbe_cur_mode(pdata) == XGBE_MODE_KR);
++}
+
+- return (mode == XGBE_MODE_KR);
++static void xgbe_change_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ xgbe_kx_1000_mode(pdata);
++ break;
++ case XGBE_MODE_KX_2500:
++ xgbe_kx_2500_mode(pdata);
++ break;
++ case XGBE_MODE_KR:
++ xgbe_kr_mode(pdata);
++ break;
++ case XGBE_MODE_SGMII_100:
++ xgbe_sgmii_100_mode(pdata);
++ break;
++ case XGBE_MODE_SGMII_1000:
++ xgbe_sgmii_1000_mode(pdata);
++ break;
++ case XGBE_MODE_X:
++ xgbe_x_mode(pdata);
++ break;
++ case XGBE_MODE_SFI:
++ xgbe_sfi_mode(pdata);
++ break;
++ case XGBE_MODE_UNKNOWN:
++ break;
++ default:
++ netif_dbg(pdata, link, pdata->netdev,
++ "invalid operation mode requested (%u)\n", mode);
++ }
+ }
+
+ static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+ {
+- /* If we are in KR switch to KX, and vice-versa */
+- if (xgbe_in_kr_mode(pdata)) {
+- if (pdata->speed_set == XGBE_SPEEDSET_1000_10000)
+- xgbe_gmii_mode(pdata);
+- else
+- xgbe_gmii_2500_mode(pdata);
+- } else {
+- xgbe_xgmii_mode(pdata);
+- }
++ xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+ }
+
+ static void xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
+- enum xgbe_mode cur_mode;
++ if (mode == xgbe_cur_mode(pdata))
++ return;
+
+- xgbe_cur_mode(pdata, &cur_mode);
+- if (mode != cur_mode)
+- xgbe_switch_mode(pdata);
++ xgbe_change_mode(pdata, mode);
+ }
+
+-static bool xgbe_use_xgmii_mode(struct xgbe_prv_data *pdata)
++static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
+ {
+- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+- return true;
+- } else {
+- if (pdata->phy.speed == SPEED_10000)
+- return true;
+- }
++ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
++}
++
++static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable,
++ bool restart)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
++ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+- return false;
++ if (enable)
++ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
++
++ if (restart)
++ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+ }
+
+-static bool xgbe_use_gmii_2500_mode(struct xgbe_prv_data *pdata)
++static void xgbe_an37_restart(struct xgbe_prv_data *pdata)
+ {
+- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+- if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+- return true;
+- } else {
+- if (pdata->phy.speed == SPEED_2500)
+- return true;
+- }
++ xgbe_an37_enable_interrupts(pdata);
++ xgbe_an37_set(pdata, true, true);
+
+- return false;
++ netif_dbg(pdata, link, pdata->netdev, "CL37 AN enabled/restarted\n");
+ }
+
+-static bool xgbe_use_gmii_mode(struct xgbe_prv_data *pdata)
++static void xgbe_an37_disable(struct xgbe_prv_data *pdata)
+ {
+- if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+- if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+- return true;
+- } else {
+- if (pdata->phy.speed == SPEED_1000)
+- return true;
+- }
++ xgbe_an37_set(pdata, false, false);
++ xgbe_an37_disable_interrupts(pdata);
+
+- return false;
++ netif_dbg(pdata, link, pdata->netdev, "CL37 AN disabled\n");
+ }
+
+-static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
++static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
++ bool restart)
+ {
+ unsigned int reg;
+
+@@ -437,22 +418,62 @@ static void xgbe_set_an(struct xgbe_prv_data *pdata, bool enable, bool restart)
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+ }
+
+-static void xgbe_restart_an(struct xgbe_prv_data *pdata)
++static void xgbe_an73_restart(struct xgbe_prv_data *pdata)
++{
++ xgbe_an73_enable_interrupts(pdata);
++ xgbe_an73_set(pdata, true, true);
++
++ netif_dbg(pdata, link, pdata->netdev, "CL73 AN enabled/restarted\n");
++}
++
++static void xgbe_an73_disable(struct xgbe_prv_data *pdata)
+ {
+- xgbe_set_an(pdata, true, true);
++ xgbe_an73_set(pdata, false, false);
++ xgbe_an73_disable_interrupts(pdata);
+
+- netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
++ netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n");
++}
++
++static void xgbe_an_restart(struct xgbe_prv_data *pdata)
++{
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_restart(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_restart(pdata);
++ break;
++ default:
++ break;
++ }
+ }
+
+-static void xgbe_disable_an(struct xgbe_prv_data *pdata)
++static void xgbe_an_disable(struct xgbe_prv_data *pdata)
+ {
+- xgbe_set_an(pdata, false, false);
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_disable(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_disable(pdata);
++ break;
++ default:
++ break;
++ }
++}
+
+- netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
++static void xgbe_an_disable_all(struct xgbe_prv_data *pdata)
++{
++ xgbe_an73_disable(pdata);
++ xgbe_an37_disable(pdata);
+ }
+
+-static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+- enum xgbe_rx *state)
++static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
++ enum xgbe_rx *state)
+ {
+ unsigned int ad_reg, lp_reg, reg;
+
+@@ -476,13 +497,15 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & XGBE_KR_TRAINING_ENABLE) {
+- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
++ if (pdata->phy_if.phy_impl.kr_training_pre)
++ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg |= XGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+- XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
++ if (pdata->phy_if.phy_impl.kr_training_post)
++ pdata->phy_if.phy_impl.kr_training_post(pdata);
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "KR training initiated\n");
+@@ -491,8 +514,8 @@ static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
+ return XGBE_AN_PAGE_RECEIVED;
+ }
+
+-static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+- enum xgbe_rx *state)
++static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata,
++ enum xgbe_rx *state)
+ {
+ u16 msg;
+
+@@ -508,8 +531,8 @@ static enum xgbe_an xgbe_an_tx_xnp(struct xgbe_prv_data *pdata,
+ return XGBE_AN_PAGE_RECEIVED;
+ }
+
+-static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+- enum xgbe_rx *state)
++static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata,
++ enum xgbe_rx *state)
+ {
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+@@ -528,12 +551,12 @@ static enum xgbe_an xgbe_an_rx_bpa(struct xgbe_prv_data *pdata,
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+- ? xgbe_an_tx_xnp(pdata, state)
+- : xgbe_an_tx_training(pdata, state);
++ ? xgbe_an73_tx_xnp(pdata, state)
++ : xgbe_an73_tx_training(pdata, state);
+ }
+
+-static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+- enum xgbe_rx *state)
++static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata,
++ enum xgbe_rx *state)
+ {
+ unsigned int ad_reg, lp_reg;
+
+@@ -543,11 +566,11 @@ static enum xgbe_an xgbe_an_rx_xnp(struct xgbe_prv_data *pdata,
+
+ return ((ad_reg & XGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & XGBE_XNP_NP_EXCHANGE))
+- ? xgbe_an_tx_xnp(pdata, state)
+- : xgbe_an_tx_training(pdata, state);
++ ? xgbe_an73_tx_xnp(pdata, state)
++ : xgbe_an73_tx_training(pdata, state);
+ }
+
+-static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
++static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata)
+ {
+ enum xgbe_rx *state;
+ unsigned long an_timeout;
+@@ -566,20 +589,20 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+ pdata->an_start = jiffies;
+
+ netif_dbg(pdata, link, pdata->netdev,
+- "AN timed out, resetting state\n");
++ "CL73 AN timed out, resetting state\n");
+ }
+ }
+
+ state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state
+- : &pdata->kx_state;
++ : &pdata->kx_state;
+
+ switch (*state) {
+ case XGBE_RX_BPA:
+- ret = xgbe_an_rx_bpa(pdata, state);
++ ret = xgbe_an73_rx_bpa(pdata, state);
+ break;
+
+ case XGBE_RX_XNP:
+- ret = xgbe_an_rx_xnp(pdata, state);
++ ret = xgbe_an73_rx_xnp(pdata, state);
+ break;
+
+ default:
+@@ -589,7 +612,7 @@ static enum xgbe_an xgbe_an_page_received(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
+-static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
++static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+ {
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+@@ -611,23 +634,43 @@ static enum xgbe_an xgbe_an_incompat_link(struct xgbe_prv_data *pdata)
+ return XGBE_AN_NO_LINK;
+ }
+
+- xgbe_disable_an(pdata);
++ xgbe_an73_disable(pdata);
+
+ xgbe_switch_mode(pdata);
+
+- xgbe_restart_an(pdata);
++ xgbe_an73_restart(pdata);
+
+ return XGBE_AN_INCOMPAT_LINK;
+ }
+
+-static irqreturn_t xgbe_an_isr(int irq, void *data)
++static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++ unsigned int reg;
+
+- netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
++ /* Disable AN interrupts */
++ xgbe_an37_disable_interrupts(pdata);
++
++ /* Save the interrupt(s) that fired */
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
++ pdata->an_int = reg & XGBE_AN_CL37_INT_MASK;
++ pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK;
+
++ if (pdata->an_int) {
++ /* Clear the interrupt(s) that fired and process them */
++ reg &= ~XGBE_AN_CL37_INT_MASK;
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
++
++ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
++ } else {
++ /* Enable AN interrupts */
++ xgbe_an37_enable_interrupts(pdata);
++ }
++}
++
++static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
++{
+ /* Disable AN interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
++ xgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+@@ -639,13 +682,37 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
+ queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ } else {
+ /* Enable AN interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+- XGBE_AN_INT_MASK);
++ xgbe_an73_enable_interrupts(pdata);
++ }
++}
++
++static irqreturn_t xgbe_an_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++
++ netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
++
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_isr(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_isr(pdata);
++ break;
++ default:
++ break;
+ }
+
+ return IRQ_HANDLED;
+ }
+
++static irqreturn_t xgbe_an_combined_isr(int irq, struct xgbe_prv_data *pdata)
++{
++ return xgbe_an_isr(irq, pdata);
++}
++
+ static void xgbe_an_irq_work(struct work_struct *work)
+ {
+ struct xgbe_prv_data *pdata = container_of(work,
+@@ -679,36 +746,87 @@ static const char *xgbe_state_as_string(enum xgbe_an state)
+ }
+ }
+
+-static void xgbe_an_state_machine(struct work_struct *work)
++static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_prv_data *pdata = container_of(work,
+- struct xgbe_prv_data,
+- an_work);
+ enum xgbe_an cur_state = pdata->an_state;
+
+- mutex_lock(&pdata->an_mutex);
++ if (!pdata->an_int)
++ return;
++
++ if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) {
++ pdata->an_state = XGBE_AN_COMPLETE;
++ pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT;
++
++ /* If SGMII is enabled, check the link status */
++ if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) &&
++ !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS))
++ pdata->an_state = XGBE_AN_NO_LINK;
++ }
++
++ netif_dbg(pdata, link, pdata->netdev, "CL37 AN %s\n",
++ xgbe_state_as_string(pdata->an_state));
++
++ cur_state = pdata->an_state;
++
++ switch (pdata->an_state) {
++ case XGBE_AN_READY:
++ break;
++
++ case XGBE_AN_COMPLETE:
++ netif_dbg(pdata, link, pdata->netdev,
++ "Auto negotiation successful\n");
++ break;
++
++ case XGBE_AN_NO_LINK:
++ break;
++
++ default:
++ pdata->an_state = XGBE_AN_ERROR;
++ }
++
++ if (pdata->an_state == XGBE_AN_ERROR) {
++ netdev_err(pdata->netdev,
++ "error during auto-negotiation, state=%u\n",
++ cur_state);
++
++ pdata->an_int = 0;
++ xgbe_an37_clear_interrupts(pdata);
++ }
++
++ if (pdata->an_state >= XGBE_AN_COMPLETE) {
++ pdata->an_result = pdata->an_state;
++ pdata->an_state = XGBE_AN_READY;
++
++ netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n",
++ xgbe_state_as_string(pdata->an_result));
++ }
++
++ xgbe_an37_enable_interrupts(pdata);
++}
++
++static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata)
++{
++ enum xgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+- goto out;
++ return;
+
+ next_int:
+- if (pdata->an_int & XGBE_AN_PG_RCV) {
++ if (pdata->an_int & XGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = XGBE_AN_PAGE_RECEIVED;
+- pdata->an_int &= ~XGBE_AN_PG_RCV;
+- } else if (pdata->an_int & XGBE_AN_INC_LINK) {
++ pdata->an_int &= ~XGBE_AN_CL73_PG_RCV;
++ } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = XGBE_AN_INCOMPAT_LINK;
+- pdata->an_int &= ~XGBE_AN_INC_LINK;
+- } else if (pdata->an_int & XGBE_AN_INT_CMPLT) {
++ pdata->an_int &= ~XGBE_AN_CL73_INC_LINK;
++ } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = XGBE_AN_COMPLETE;
+- pdata->an_int &= ~XGBE_AN_INT_CMPLT;
++ pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = XGBE_AN_ERROR;
+ }
+
+- pdata->an_result = pdata->an_state;
+-
+ again:
+- netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
++ netif_dbg(pdata, link, pdata->netdev, "CL73 AN %s\n",
+ xgbe_state_as_string(pdata->an_state));
+
+ cur_state = pdata->an_state;
+@@ -719,14 +837,14 @@ static void xgbe_an_state_machine(struct work_struct *work)
+ break;
+
+ case XGBE_AN_PAGE_RECEIVED:
+- pdata->an_state = xgbe_an_page_received(pdata);
++ pdata->an_state = xgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+
+ case XGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+- pdata->an_state = xgbe_an_incompat_link(pdata);
++ pdata->an_state = xgbe_an73_incompat_link(pdata);
+ break;
+
+ case XGBE_AN_COMPLETE:
+@@ -745,14 +863,14 @@ static void xgbe_an_state_machine(struct work_struct *work)
+
+ if (pdata->an_state == XGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ xgbe_an73_clear_interrupts(pdata);
+ } else if (pdata->an_state == XGBE_AN_ERROR) {
+ netdev_err(pdata->netdev,
+ "error during auto-negotiation, state=%u\n",
+ cur_state);
+
+ pdata->an_int = 0;
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ xgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= XGBE_AN_COMPLETE) {
+@@ -762,7 +880,7 @@ static void xgbe_an_state_machine(struct work_struct *work)
+ pdata->kx_state = XGBE_RX_BPA;
+ pdata->an_start = 0;
+
+- netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
++ netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n",
+ xgbe_state_as_string(pdata->an_result));
+ }
+
+@@ -772,20 +890,88 @@ static void xgbe_an_state_machine(struct work_struct *work)
+ if (pdata->an_int)
+ goto next_int;
+
+-out:
+- /* Enable AN interrupts on the way out */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
++ xgbe_an73_enable_interrupts(pdata);
++}
++
++static void xgbe_an_state_machine(struct work_struct *work)
++{
++ struct xgbe_prv_data *pdata = container_of(work,
++ struct xgbe_prv_data,
++ an_work);
++
++ mutex_lock(&pdata->an_mutex);
++
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_state_machine(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_state_machine(pdata);
++ break;
++ default:
++ break;
++ }
+
+ mutex_unlock(&pdata->an_mutex);
+ }
+
+-static void xgbe_an_init(struct xgbe_prv_data *pdata)
++static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
++ unsigned int advertising, reg;
++
++ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
++
++ /* Set up Advertisement register */
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
++ if (advertising & ADVERTISED_Pause)
++ reg |= 0x100;
++ else
++ reg &= ~0x100;
++
++ if (advertising & ADVERTISED_Asym_Pause)
++ reg |= 0x80;
++ else
++ reg &= ~0x80;
++
++ /* Full duplex, but not half */
++ reg |= XGBE_AN_CL37_FD_MASK;
++ reg &= ~XGBE_AN_CL37_HD_MASK;
++
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg);
++
++ /* Set up the Control register */
++ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
++ reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK;
++ reg &= ~XGBE_AN_CL37_PCS_MODE_MASK;
++
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL37:
++ reg |= XGBE_AN_CL37_PCS_MODE_BASEX;
++ break;
++ case XGBE_AN_MODE_CL37_SGMII:
++ reg |= XGBE_AN_CL37_PCS_MODE_SGMII;
++ break;
++ default:
++ break;
++ }
++
++ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
++
++ netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
++ (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII");
++}
++
++static void xgbe_an73_init(struct xgbe_prv_data *pdata)
++{
++ unsigned int advertising, reg;
++
++ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+- if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
++ if (advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+@@ -794,13 +980,13 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
++ if (advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+- if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+- (pdata->phy.advertising & ADVERTISED_2500baseX_Full))
++ if ((advertising & ADVERTISED_1000baseKX_Full) ||
++ (advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+@@ -809,12 +995,12 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+- if (pdata->phy.advertising & ADVERTISED_Pause)
++ if (advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+- if (pdata->phy.advertising & ADVERTISED_Asym_Pause)
++ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+@@ -824,7 +1010,25 @@ static void xgbe_an_init(struct xgbe_prv_data *pdata)
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+
+- netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
++ netif_dbg(pdata, link, pdata->netdev, "CL73 AN initialized\n");
++}
++
++static void xgbe_an_init(struct xgbe_prv_data *pdata)
++{
++ /* Set up advertisement registers based on current settings */
++ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ case XGBE_AN_MODE_CL73_REDRV:
++ xgbe_an73_init(pdata);
++ break;
++ case XGBE_AN_MODE_CL37:
++ case XGBE_AN_MODE_CL37_SGMII:
++ xgbe_an37_init(pdata);
++ break;
++ default:
++ break;
++ }
+ }
+
+ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+@@ -842,6 +1046,8 @@ static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
+ static const char *xgbe_phy_speed_string(int speed)
+ {
+ switch (speed) {
++ case SPEED_100:
++ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+@@ -907,24 +1113,32 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
+ xgbe_phy_print_status(pdata);
+ }
+
++static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
++{
++ return pdata->phy_if.phy_impl.valid_speed(pdata, speed);
++}
++
+ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ {
++ enum xgbe_mode mode;
++
+ netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
+
+ /* Disable auto-negotiation */
+- xgbe_disable_an(pdata);
+-
+- /* Validate/Set specified speed */
+- switch (pdata->phy.speed) {
+- case SPEED_10000:
+- xgbe_set_mode(pdata, XGBE_MODE_KR);
++ xgbe_an_disable(pdata);
++
++ /* Set specified mode for specified speed */
++ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ case XGBE_MODE_KX_2500:
++ case XGBE_MODE_KR:
++ case XGBE_MODE_SGMII_100:
++ case XGBE_MODE_SGMII_1000:
++ case XGBE_MODE_X:
++ case XGBE_MODE_SFI:
+ break;
+-
+- case SPEED_2500:
+- case SPEED_1000:
+- xgbe_set_mode(pdata, XGBE_MODE_KX);
+- break;
+-
++ case XGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+@@ -933,38 +1147,60 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
++ xgbe_set_mode(pdata, mode);
++
+ return 0;
+ }
+
+ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+ {
++ int ret;
++
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+- if (pdata->phy.autoneg != AUTONEG_ENABLE)
+- return xgbe_phy_config_fixed(pdata);
++ ret = pdata->phy_if.phy_impl.an_config(pdata);
++ if (ret)
++ return ret;
++
++ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
++ ret = xgbe_phy_config_fixed(pdata);
++ if (ret || !pdata->kr_redrv)
++ return ret;
+
+- netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
++ netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
++ } else {
++ netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
++ }
+
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+ /* Start auto-negotiation in a supported mode */
+- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
++ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+ xgbe_set_mode(pdata, XGBE_MODE_KR);
+- } else if ((pdata->phy.advertising & ADVERTISED_1000baseKX_Full) ||
+- (pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
+- xgbe_set_mode(pdata, XGBE_MODE_KX);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
++ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
++ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SFI);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
++ xgbe_set_mode(pdata, XGBE_MODE_X);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+ } else {
+ enable_irq(pdata->an_irq);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+- xgbe_disable_an(pdata);
++ xgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ xgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = XGBE_AN_READY;
+ pdata->an_state = XGBE_AN_READY;
+@@ -974,11 +1210,8 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+ /* Re-enable auto-negotiation interrupt */
+ enable_irq(pdata->an_irq);
+
+- /* Set up advertisement registers based on current settings */
+ xgbe_an_init(pdata);
+-
+- /* Enable and start auto-negotiation */
+- xgbe_restart_an(pdata);
++ xgbe_an_restart(pdata);
+
+ return 0;
+ }
+@@ -1016,108 +1249,52 @@ static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
+ }
+ }
+
+-static void xgbe_phy_status_force(struct xgbe_prv_data *pdata)
++static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+ {
+- if (xgbe_in_kr_mode(pdata)) {
+- pdata->phy.speed = SPEED_10000;
+- } else {
+- switch (pdata->speed_set) {
+- case XGBE_SPEEDSET_1000_10000:
+- pdata->phy.speed = SPEED_1000;
+- break;
+-
+- case XGBE_SPEEDSET_2500_10000:
+- pdata->phy.speed = SPEED_2500;
+- break;
+- }
+- }
+- pdata->phy.duplex = DUPLEX_FULL;
++ return pdata->phy_if.phy_impl.an_outcome(pdata);
+ }
+
+-static void xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
++static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+ {
+- unsigned int ad_reg, lp_reg;
++ enum xgbe_mode mode;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+- return xgbe_phy_status_force(pdata);
+-
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+-
+- /* Compare Advertisement and Link Partner register 1 */
+- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+- if (lp_reg & 0x400)
+- pdata->phy.lp_advertising |= ADVERTISED_Pause;
+- if (lp_reg & 0x800)
+- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+-
+- if (pdata->phy.pause_autoneg) {
+- /* Set flow control based on auto-negotiation result */
+- pdata->phy.tx_pause = 0;
+- pdata->phy.rx_pause = 0;
+-
+- if (ad_reg & lp_reg & 0x400) {
+- pdata->phy.tx_pause = 1;
+- pdata->phy.rx_pause = 1;
+- } else if (ad_reg & lp_reg & 0x800) {
+- if (ad_reg & 0x400)
+- pdata->phy.rx_pause = 1;
+- else if (lp_reg & 0x400)
+- pdata->phy.tx_pause = 1;
+- }
+- }
+-
+- /* Compare Advertisement and Link Partner register 2 */
+- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+- if (lp_reg & 0x80)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+- if (lp_reg & 0x20) {
+- switch (pdata->speed_set) {
+- case XGBE_SPEEDSET_1000_10000:
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+- break;
+- case XGBE_SPEEDSET_2500_10000:
+- pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
+- break;
+- }
+- }
++ mode = xgbe_cur_mode(pdata);
++ else
++ mode = xgbe_phy_status_aneg(pdata);
+
+- ad_reg &= lp_reg;
+- if (ad_reg & 0x80) {
++ switch (mode) {
++ case XGBE_MODE_SGMII_100:
++ pdata->phy.speed = SPEED_100;
++ break;
++ case XGBE_MODE_X:
++ case XGBE_MODE_KX_1000:
++ case XGBE_MODE_SGMII_1000:
++ pdata->phy.speed = SPEED_1000;
++ break;
++ case XGBE_MODE_KX_2500:
++ pdata->phy.speed = SPEED_2500;
++ break;
++ case XGBE_MODE_KR:
++ case XGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+- xgbe_set_mode(pdata, XGBE_MODE_KR);
+- } else if (ad_reg & 0x20) {
+- switch (pdata->speed_set) {
+- case XGBE_SPEEDSET_1000_10000:
+- pdata->phy.speed = SPEED_1000;
+- break;
+-
+- case XGBE_SPEEDSET_2500_10000:
+- pdata->phy.speed = SPEED_2500;
+- break;
+- }
+-
+- xgbe_set_mode(pdata, XGBE_MODE_KX);
+- } else {
++ break;
++ case XGBE_MODE_UNKNOWN:
++ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+- /* Compare Advertisement and Link Partner register 3 */
+- ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+- lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+- if (lp_reg & 0xc000)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+-
+ pdata->phy.duplex = DUPLEX_FULL;
++
++ xgbe_set_mode(pdata, mode);
+ }
+
+ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg, link_aneg;
++ unsigned int link_aneg;
++ int an_restart;
+
+ if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
+ netif_carrier_off(pdata->netdev);
+@@ -1128,12 +1305,12 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+- /* Get the link status. Link status is latched low, so read
+- * once to clear and then read again to get current state
+- */
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+- pdata->phy.link = (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
++ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
++ &an_restart);
++ if (an_restart) {
++ xgbe_phy_config_aneg(pdata);
++ return;
++ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !xgbe_phy_aneg_done(pdata)) {
+@@ -1141,7 +1318,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ return;
+ }
+
+- xgbe_phy_status_aneg(pdata);
++ xgbe_phy_status_result(pdata);
+
+ if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
+ clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
+@@ -1155,7 +1332,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ return;
+ }
+
+- xgbe_phy_status_aneg(pdata);
++ xgbe_phy_status_result(pdata);
+
+ netif_carrier_off(pdata->netdev);
+ }
+@@ -1168,13 +1345,19 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ {
+ netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
+
++ if (!pdata->phy_started)
++ return;
++
++ /* Indicate the PHY is down */
++ pdata->phy_started = 0;
++
+ /* Disable auto-negotiation */
+- xgbe_disable_an(pdata);
++ xgbe_an_disable_all(pdata);
+
+- /* Disable auto-negotiation interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
++ if (pdata->dev_irq != pdata->an_irq)
++ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+
+- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
++ pdata->phy_if.phy_impl.stop(pdata);
+
+ pdata->phy.link = 0;
+ netif_carrier_off(pdata->netdev);
+@@ -1189,64 +1372,74 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+
+ netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
+
+- ret = devm_request_irq(pdata->dev, pdata->an_irq,
+- xgbe_an_isr, 0, pdata->an_name,
+- pdata);
+- if (ret) {
+- netdev_err(netdev, "phy irq request failed\n");
++ ret = pdata->phy_if.phy_impl.start(pdata);
++ if (ret)
+ return ret;
++
++ /* If we have a separate AN irq, enable it */
++ if (pdata->dev_irq != pdata->an_irq) {
++ ret = devm_request_irq(pdata->dev, pdata->an_irq,
++ xgbe_an_isr, 0, pdata->an_name,
++ pdata);
++ if (ret) {
++ netdev_err(netdev, "phy irq request failed\n");
++ goto err_stop;
++ }
+ }
+
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+- if (xgbe_use_xgmii_mode(pdata)) {
+- xgbe_xgmii_mode(pdata);
+- } else if (xgbe_use_gmii_mode(pdata)) {
+- xgbe_gmii_mode(pdata);
+- } else if (xgbe_use_gmii_2500_mode(pdata)) {
+- xgbe_gmii_2500_mode(pdata);
++ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
++ xgbe_kr_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
++ xgbe_kx_2500_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
++ xgbe_kx_1000_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
++ xgbe_sfi_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
++ xgbe_x_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
++ xgbe_sgmii_1000_mode(pdata);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
++ xgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_irq;
+ }
+
+- /* Set up advertisement registers based on current settings */
+- xgbe_an_init(pdata);
++ /* Indicate the PHY is up and running */
++ pdata->phy_started = 1;
+
+- /* Enable auto-negotiation interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
++ xgbe_an_init(pdata);
++ xgbe_an_enable_interrupts(pdata);
+
+ return xgbe_phy_config_aneg(pdata);
+
+ err_irq:
+- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
++ if (pdata->dev_irq != pdata->an_irq)
++ devm_free_irq(pdata->dev, pdata->an_irq, pdata);
++
++err_stop:
++ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+ }
+
+ static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
+ {
+- unsigned int count, reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- reg |= MDIO_CTRL1_RESET;
+- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
+-
+- count = 50;
+- do {
+- msleep(20);
+- reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
+- } while ((reg & MDIO_CTRL1_RESET) && --count);
++ int ret;
+
+- if (reg & MDIO_CTRL1_RESET)
+- return -ETIMEDOUT;
++ ret = pdata->phy_if.phy_impl.reset(pdata);
++ if (ret)
++ return ret;
+
+ /* Disable auto-negotiation for now */
+- xgbe_disable_an(pdata);
++ xgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+- XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
++ xgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+ }
+@@ -1257,74 +1450,96 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+
+ dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
+
+- dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
++ dev_dbg(dev, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+- dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
++ dev_dbg(dev, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+- dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
++ dev_dbg(dev, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+- dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
++ dev_dbg(dev, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
++ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
++ dev_dbg(dev, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+- dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
++ dev_dbg(dev, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+- dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
++ dev_dbg(dev, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+- dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
++ dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+- dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
++ dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+- dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
++ dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+- dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
++ dev_dbg(dev, "Auto-Neg Completion Reg (%#06x) = %#06x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_dbg(dev, "\n*************************************************\n");
+ }
+
+-static void xgbe_phy_init(struct xgbe_prv_data *pdata)
++static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
+ {
++ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
++ return SPEED_10000;
++ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
++ return SPEED_10000;
++ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
++ return SPEED_2500;
++ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
++ return SPEED_1000;
++ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
++ return SPEED_1000;
++ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
++ return SPEED_100;
++
++ return SPEED_UNKNOWN;
++}
++
++static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
++{
++ xgbe_phy_stop(pdata);
++
++ pdata->phy_if.phy_impl.exit(pdata);
++}
++
++static int xgbe_phy_init(struct xgbe_prv_data *pdata)
++{
++ int ret;
++
+ mutex_init(&pdata->an_mutex);
+ INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
+ INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+- /* Initialize supported features */
+- pdata->phy.supported = SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_Backplane;
+- pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+- switch (pdata->speed_set) {
+- case XGBE_SPEEDSET_1000_10000:
+- pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+- break;
+- case XGBE_SPEEDSET_2500_10000:
+- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+- break;
+- }
+-
++ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+- pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+
++ /* Setup the phy (including supported features) */
++ ret = pdata->phy_if.phy_impl.init(pdata);
++ if (ret)
++ return ret;
+ pdata->phy.advertising = pdata->phy.supported;
+
+ pdata->phy.address = 0;
+
+- pdata->phy.autoneg = AUTONEG_ENABLE;
+- pdata->phy.speed = SPEED_UNKNOWN;
+- pdata->phy.duplex = DUPLEX_UNKNOWN;
++ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ } else {
++ pdata->phy.autoneg = AUTONEG_DISABLE;
++ pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata);
++ pdata->phy.duplex = DUPLEX_FULL;
++ }
+
+ pdata->phy.link = 0;
+
+@@ -1346,11 +1561,14 @@ static void xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
++
++ return 0;
+ }
+
+ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+ {
+ phy_if->phy_init = xgbe_phy_init;
++ phy_if->phy_exit = xgbe_phy_exit;
+
+ phy_if->phy_reset = xgbe_phy_reset;
+ phy_if->phy_start = xgbe_phy_start;
+@@ -1358,4 +1576,8 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+
+ phy_if->phy_status = xgbe_phy_status;
+ phy_if->phy_config_aneg = xgbe_phy_config_aneg;
++
++ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
++
++ phy_if->an_isr = xgbe_an_combined_isr;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+new file mode 100644
+index 0000000..c2730f1
+--- /dev/null
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -0,0 +1,542 @@
++/*
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/log2.h>
++
++#include "xgbe.h"
++#include "xgbe-common.h"
++
++static int xgbe_config_msi(struct xgbe_prv_data *pdata)
++{
++ unsigned int msi_count;
++ unsigned int i, j;
++ int ret;
++
++ msi_count = XGBE_MSIX_BASE_COUNT;
++ msi_count += max(pdata->rx_ring_count,
++ pdata->tx_ring_count);
++ msi_count = roundup_pow_of_two(msi_count);
++
++ ret = pci_enable_msi_exact(pdata->pcidev, msi_count);
++ if (ret < 0) {
++ dev_info(pdata->dev, "MSI request for %u interrupts failed\n",
++ msi_count);
++
++ ret = pci_enable_msi(pdata->pcidev);
++ if (ret < 0) {
++ dev_info(pdata->dev, "MSI enablement failed\n");
++ return ret;
++ }
++
++ msi_count = 1;
++ }
++
++ pdata->irq_count = msi_count;
++
++ pdata->dev_irq = pdata->pcidev->irq;
++
++ if (msi_count > 1) {
++ pdata->ecc_irq = pdata->pcidev->irq + 1;
++ pdata->i2c_irq = pdata->pcidev->irq + 2;
++ pdata->an_irq = pdata->pcidev->irq + 3;
++
++ for (i = XGBE_MSIX_BASE_COUNT, j = 0;
++ (i < msi_count) && (j < XGBE_MAX_DMA_CHANNELS);
++ i++, j++)
++ pdata->channel_irq[j] = pdata->pcidev->irq + i;
++ pdata->channel_irq_count = j;
++
++ pdata->per_channel_irq = 1;
++ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
++ } else {
++ pdata->ecc_irq = pdata->pcidev->irq;
++ pdata->i2c_irq = pdata->pcidev->irq;
++ pdata->an_irq = pdata->pcidev->irq;
++ }
++
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev, "MSI interrupts enabled\n");
++
++ return 0;
++}
++
++static int xgbe_config_msix(struct xgbe_prv_data *pdata)
++{
++ unsigned int msix_count;
++ unsigned int i, j;
++ int ret;
++
++ msix_count = XGBE_MSIX_BASE_COUNT;
++ msix_count += max(pdata->rx_ring_count,
++ pdata->tx_ring_count);
++
++ pdata->msix_entries = devm_kcalloc(pdata->dev, msix_count,
++ sizeof(struct msix_entry),
++ GFP_KERNEL);
++ if (!pdata->msix_entries)
++ return -ENOMEM;
++
++ for (i = 0; i < msix_count; i++)
++ pdata->msix_entries[i].entry = i;
++
++ ret = pci_enable_msix_range(pdata->pcidev, pdata->msix_entries,
++ XGBE_MSIX_MIN_COUNT, msix_count);
++ if (ret < 0) {
++ dev_info(pdata->dev, "MSI-X enablement failed\n");
++ devm_kfree(pdata->dev, pdata->msix_entries);
++ pdata->msix_entries = NULL;
++ return ret;
++ }
++
++ pdata->irq_count = ret;
++
++ pdata->dev_irq = pdata->msix_entries[0].vector;
++ pdata->ecc_irq = pdata->msix_entries[1].vector;
++ pdata->i2c_irq = pdata->msix_entries[2].vector;
++ pdata->an_irq = pdata->msix_entries[3].vector;
++
++ for (i = XGBE_MSIX_BASE_COUNT, j = 0; i < ret; i++, j++)
++ pdata->channel_irq[j] = pdata->msix_entries[i].vector;
++ pdata->channel_irq_count = j;
++
++ pdata->per_channel_irq = 1;
++ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
++
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev, "MSI-X interrupts enabled\n");
++
++ return 0;
++}
++
++static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
++{
++ int ret;
++
++ ret = xgbe_config_msix(pdata);
++ if (!ret)
++ goto out;
++
++ ret = xgbe_config_msi(pdata);
++ if (!ret)
++ goto out;
++
++ pdata->irq_count = 1;
++ pdata->irq_shared = 1;
++
++ pdata->dev_irq = pdata->pcidev->irq;
++ pdata->ecc_irq = pdata->pcidev->irq;
++ pdata->i2c_irq = pdata->pcidev->irq;
++ pdata->an_irq = pdata->pcidev->irq;
++
++out:
++ if (netif_msg_probe(pdata)) {
++ unsigned int i;
++
++ dev_dbg(pdata->dev, " dev irq=%d\n", pdata->dev_irq);
++ dev_dbg(pdata->dev, " ecc irq=%d\n", pdata->ecc_irq);
++ dev_dbg(pdata->dev, " i2c irq=%d\n", pdata->i2c_irq);
++ dev_dbg(pdata->dev, " an irq=%d\n", pdata->an_irq);
++ for (i = 0; i < pdata->channel_irq_count; i++)
++ dev_dbg(pdata->dev, " dma%u irq=%d\n",
++ i, pdata->channel_irq[i]);
++ }
++
++ return 0;
++}
++
++static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
++{
++ struct xgbe_prv_data *pdata;
++ struct device *dev = &pdev->dev;
++ void __iomem * const *iomap_table;
++ struct pci_dev *rdev;
++ unsigned int ma_lo, ma_hi;
++ unsigned int reg;
++ int bar_mask;
++ int ret;
++
++ pdata = xgbe_alloc_pdata(dev);
++ if (IS_ERR(pdata)) {
++ ret = PTR_ERR(pdata);
++ goto err_alloc;
++ }
++
++ pdata->pcidev = pdev;
++ pci_set_drvdata(pdev, pdata);
++
++ /* Get the version data */
++ pdata->vdata = (struct xgbe_version_data *)id->driver_data;
++
++ ret = pcim_enable_device(pdev);
++ if (ret) {
++ dev_err(dev, "pcim_enable_device failed\n");
++ goto err_pci_enable;
++ }
++
++ /* Obtain the mmio areas for the device */
++ bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
++ ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
++ if (ret) {
++ dev_err(dev, "pcim_iomap_regions failed\n");
++ goto err_pci_enable;
++ }
++
++ iomap_table = pcim_iomap_table(pdev);
++ if (!iomap_table) {
++ dev_err(dev, "pcim_iomap_table failed\n");
++ ret = -ENOMEM;
++ goto err_pci_enable;
++ }
++
++ pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
++ if (!pdata->xgmac_regs) {
++ dev_err(dev, "xgmac ioremap failed\n");
++ ret = -ENOMEM;
++ goto err_pci_enable;
++ }
++ pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
++ pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
++ dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
++ dev_dbg(dev, "xi2c_regs = %p\n", pdata->xi2c_regs);
++ }
++
++ pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
++ if (!pdata->xpcs_regs) {
++ dev_err(dev, "xpcs ioremap failed\n");
++ ret = -ENOMEM;
++ goto err_pci_enable;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
++
++ /* Set the PCS indirect addressing definition registers */
++ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
++ if (rdev &&
++ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
++ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
++ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
++ } else {
++ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
++ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
++ }
++ pci_dev_put(rdev);
++
++ /* Configure the PCS indirect addressing support */
++ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
++ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
++ pdata->xpcs_window <<= 6;
++ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
++ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
++ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(dev, "xpcs window = %#010x\n",
++ pdata->xpcs_window);
++ dev_dbg(dev, "xpcs window size = %#010x\n",
++ pdata->xpcs_window_size);
++ dev_dbg(dev, "xpcs window mask = %#010x\n",
++ pdata->xpcs_window_mask);
++ }
++
++ pci_set_master(pdev);
++
++ /* Enable all interrupts in the hardware */
++ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
++
++ /* Retrieve the MAC address */
++ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
++ ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
++ pdata->mac_addr[0] = ma_lo & 0xff;
++ pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
++ pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
++ pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
++ pdata->mac_addr[4] = ma_hi & 0xff;
++ pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
++ if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
++ !is_valid_ether_addr(pdata->mac_addr)) {
++ dev_err(dev, "invalid mac address\n");
++ ret = -EINVAL;
++ goto err_pci_enable;
++ }
++
++ /* Clock settings */
++ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
++ pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;
++
++ /* Set the DMA coherency values */
++ pdata->coherent = 1;
++ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
++ pdata->arcache = XGBE_DMA_OS_ARCACHE;
++ pdata->awcache = XGBE_DMA_OS_AWCACHE;
++
++ /* Set the maximum channels and queues */
++ reg = XP_IOREAD(pdata, XP_PROP_1);
++ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
++ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
++ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
++ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
++ pdata->tx_max_channel_count,
++ pdata->tx_max_channel_count);
++ dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
++ pdata->tx_max_q_count, pdata->rx_max_q_count);
++ }
++
++ /* Set the hardware channel and queue counts */
++ xgbe_set_counts(pdata);
++
++ /* Set the maximum fifo amounts */
++ reg = XP_IOREAD(pdata, XP_PROP_2);
++ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
++ pdata->tx_max_fifo_size *= 16384;
++ pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
++ pdata->vdata->tx_max_fifo_size);
++ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
++ pdata->rx_max_fifo_size *= 16384;
++ pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
++ pdata->vdata->rx_max_fifo_size);
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
++ pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);
++
++ /* Configure interrupt support */
++ ret = xgbe_config_irqs(pdata);
++ if (ret)
++ goto err_pci_enable;
++
++ /* Configure the netdev resource */
++ ret = xgbe_config_netdev(pdata);
++ if (ret)
++ goto err_pci_enable;
++
++ netdev_notice(pdata->netdev, "net device enabled\n");
++
++ return 0;
++
++err_pci_enable:
++ xgbe_free_pdata(pdata);
++
++err_alloc:
++ dev_notice(dev, "net device not enabled\n");
++
++ return ret;
++}
++
++static void xgbe_pci_remove(struct pci_dev *pdev)
++{
++ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
++
++ xgbe_deconfig_netdev(pdata);
++
++ xgbe_free_pdata(pdata);
++}
++
++#ifdef CONFIG_PM
++static int xgbe_pci_suspend(struct pci_dev *pdev, pm_message_t state)
++{
++ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
++ struct net_device *netdev = pdata->netdev;
++ int ret = 0;
++
++ if (netif_running(netdev))
++ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
++
++ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
++
++ return ret;
++}
++
++static int xgbe_pci_resume(struct pci_dev *pdev)
++{
++ struct xgbe_prv_data *pdata = pci_get_drvdata(pdev);
++ struct net_device *netdev = pdata->netdev;
++ int ret = 0;
++
++ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
++
++ if (netif_running(netdev)) {
++ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
++
++ /* Schedule a restart in case the link or phy state changed
++ * while we were powered down.
++ */
++ schedule_work(&pdata->restart_work);
++ }
++
++ return ret;
++}
++#endif /* CONFIG_PM */
++
++static const struct xgbe_version_data xgbe_v2a = {
++ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
++ .xpcs_access = XGBE_XPCS_ACCESS_V2,
++ .mmc_64bit = 1,
++ .tx_max_fifo_size = 229376,
++ .rx_max_fifo_size = 229376,
++ .tx_tstamp_workaround = 1,
++ .ecc_support = 1,
++ .i2c_support = 1,
++};
++
++static const struct xgbe_version_data xgbe_v2b = {
++ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2,
++ .xpcs_access = XGBE_XPCS_ACCESS_V2,
++ .mmc_64bit = 1,
++ .tx_max_fifo_size = 65536,
++ .rx_max_fifo_size = 65536,
++ .tx_tstamp_workaround = 1,
++ .ecc_support = 1,
++ .i2c_support = 1,
++};
++
++static const struct pci_device_id xgbe_pci_table[] = {
++ { PCI_VDEVICE(AMD, 0x1458),
++ .driver_data = (kernel_ulong_t)&xgbe_v2a },
++ { PCI_VDEVICE(AMD, 0x1459),
++ .driver_data = (kernel_ulong_t)&xgbe_v2b },
++ /* Last entry must be zero */
++ { 0, }
++};
++MODULE_DEVICE_TABLE(pci, xgbe_pci_table);
++
++static struct pci_driver xgbe_driver = {
++ .name = XGBE_DRV_NAME,
++ .id_table = xgbe_pci_table,
++ .probe = xgbe_pci_probe,
++ .remove = xgbe_pci_remove,
++#ifdef CONFIG_PM
++ .suspend = xgbe_pci_suspend,
++ .resume = xgbe_pci_resume,
++#endif
++};
++
++int xgbe_pci_init(void)
++{
++ return pci_register_driver(&xgbe_driver);
++}
++
++void xgbe_pci_exit(void)
++{
++ pci_unregister_driver(&xgbe_driver);
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+new file mode 100644
+index 0000000..c75edca
+--- /dev/null
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+@@ -0,0 +1,845 @@
++/*
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/kmod.h>
++#include <linux/device.h>
++#include <linux/property.h>
++#include <linux/mdio.h>
++#include <linux/phy.h>
++
++#include "xgbe.h"
++#include "xgbe-common.h"
++
++#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
++#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
++#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
++#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
++#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
++#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
++
++/* Default SerDes settings */
++#define XGBE_SPEED_1000_BLWC 1
++#define XGBE_SPEED_1000_CDR 0x2
++#define XGBE_SPEED_1000_PLL 0x0
++#define XGBE_SPEED_1000_PQ 0xa
++#define XGBE_SPEED_1000_RATE 0x3
++#define XGBE_SPEED_1000_TXAMP 0xf
++#define XGBE_SPEED_1000_WORD 0x1
++#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
++#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
++
++#define XGBE_SPEED_2500_BLWC 1
++#define XGBE_SPEED_2500_CDR 0x2
++#define XGBE_SPEED_2500_PLL 0x0
++#define XGBE_SPEED_2500_PQ 0xa
++#define XGBE_SPEED_2500_RATE 0x1
++#define XGBE_SPEED_2500_TXAMP 0xf
++#define XGBE_SPEED_2500_WORD 0x1
++#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
++#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
++
++#define XGBE_SPEED_10000_BLWC 0
++#define XGBE_SPEED_10000_CDR 0x7
++#define XGBE_SPEED_10000_PLL 0x1
++#define XGBE_SPEED_10000_PQ 0x12
++#define XGBE_SPEED_10000_RATE 0x0
++#define XGBE_SPEED_10000_TXAMP 0xa
++#define XGBE_SPEED_10000_WORD 0x7
++#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
++#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
++
++/* Rate-change complete wait/retry count */
++#define XGBE_RATECHANGE_COUNT 500
++
++static const u32 xgbe_phy_blwc[] = {
++ XGBE_SPEED_1000_BLWC,
++ XGBE_SPEED_2500_BLWC,
++ XGBE_SPEED_10000_BLWC,
++};
++
++static const u32 xgbe_phy_cdr_rate[] = {
++ XGBE_SPEED_1000_CDR,
++ XGBE_SPEED_2500_CDR,
++ XGBE_SPEED_10000_CDR,
++};
++
++static const u32 xgbe_phy_pq_skew[] = {
++ XGBE_SPEED_1000_PQ,
++ XGBE_SPEED_2500_PQ,
++ XGBE_SPEED_10000_PQ,
++};
++
++static const u32 xgbe_phy_tx_amp[] = {
++ XGBE_SPEED_1000_TXAMP,
++ XGBE_SPEED_2500_TXAMP,
++ XGBE_SPEED_10000_TXAMP,
++};
++
++static const u32 xgbe_phy_dfe_tap_cfg[] = {
++ XGBE_SPEED_1000_DFE_TAP_CONFIG,
++ XGBE_SPEED_2500_DFE_TAP_CONFIG,
++ XGBE_SPEED_10000_DFE_TAP_CONFIG,
++};
++
++static const u32 xgbe_phy_dfe_tap_ena[] = {
++ XGBE_SPEED_1000_DFE_TAP_ENABLE,
++ XGBE_SPEED_2500_DFE_TAP_ENABLE,
++ XGBE_SPEED_10000_DFE_TAP_ENABLE,
++};
++
++struct xgbe_phy_data {
++ /* 1000/10000 vs 2500/10000 indicator */
++ unsigned int speed_set;
++
++ /* SerDes UEFI configurable settings.
++ * Switching between modes/speeds requires new values for some
++ * SerDes settings. The values can be supplied as device
++ * properties in array format. The first array entry is for
++ * 1GbE, second for 2.5GbE and third for 10GbE
++ */
++ u32 blwc[XGBE_SPEEDS];
++ u32 cdr_rate[XGBE_SPEEDS];
++ u32 pq_skew[XGBE_SPEEDS];
++ u32 tx_amp[XGBE_SPEEDS];
++ u32 dfe_tap_cfg[XGBE_SPEEDS];
++ u32 dfe_tap_ena[XGBE_SPEEDS];
++};
++
++static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata)
++{
++ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 1);
++}
++
++static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
++{
++ XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
++}
++
++static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++
++ /* Compare Advertisement and Link Partner register 1 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
++ if (lp_reg & 0x400)
++ pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ if (lp_reg & 0x800)
++ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++
++ if (pdata->phy.pause_autoneg) {
++ /* Set flow control based on auto-negotiation result */
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++
++ if (ad_reg & lp_reg & 0x400) {
++ pdata->phy.tx_pause = 1;
++ pdata->phy.rx_pause = 1;
++ } else if (ad_reg & lp_reg & 0x800) {
++ if (ad_reg & 0x400)
++ pdata->phy.rx_pause = 1;
++ else if (lp_reg & 0x400)
++ pdata->phy.tx_pause = 1;
++ }
++ }
++
++ /* Compare Advertisement and Link Partner register 2 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ if (lp_reg & 0x20) {
++ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
++ pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
++ else
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++ }
++
++ ad_reg &= lp_reg;
++ if (ad_reg & 0x80) {
++ mode = XGBE_MODE_KR;
++ } else if (ad_reg & 0x20) {
++ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
++ mode = XGBE_MODE_KX_2500;
++ else
++ mode = XGBE_MODE_KX_1000;
++ } else {
++ mode = XGBE_MODE_UNKNOWN;
++ }
++
++ /* Compare Advertisement and Link Partner register 3 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++ if (lp_reg & 0xc000)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++
++ return mode;
++}
++
++static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
++{
++ return pdata->phy.advertising;
++}
++
++static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
++{
++ /* Nothing uniquely required for an configuration */
++ return 0;
++}
++
++static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
++{
++ return XGBE_AN_MODE_CL73;
++}
++
++static void xgbe_phy_pcs_power_cycle(struct xgbe_prv_data *pdata)
++{
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++
++ reg |= MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++
++ usleep_range(75, 100);
++
++ reg &= ~MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++}
++
++static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
++{
++ /* Assert Rx and Tx ratechange */
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 1);
++}
++
++static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
++{
++ unsigned int wait;
++ u16 status;
++
++ /* Release Rx and Tx ratechange */
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, RATECHANGE, 0);
++
++ /* Wait for Rx and Tx ready */
++ wait = XGBE_RATECHANGE_COUNT;
++ while (wait--) {
++ usleep_range(50, 75);
++
++ status = XSIR0_IOREAD(pdata, SIR0_STATUS);
++ if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
++ XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
++ goto rx_reset;
++ }
++
++ netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
++ status);
++
++rx_reset:
++ /* Perform Rx reset for the DFE changes */
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 1);
++}
++
++static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ /* Set PCS to KR/10G speed */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
++ reg &= ~MDIO_PCS_CTRL2_TYPE;
++ reg |= MDIO_PCS_CTRL2_10GBR;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ reg &= ~MDIO_CTRL1_SPEEDSEL;
++ reg |= MDIO_CTRL1_SPEED10G;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++
++ xgbe_phy_pcs_power_cycle(pdata);
++
++ /* Set SerDes to 10G speed */
++ xgbe_phy_start_ratechange(pdata);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_10000_RATE);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_10000_WORD);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_10000_PLL);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
++ phy_data->cdr_rate[XGBE_SPEED_10000]);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
++ phy_data->tx_amp[XGBE_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
++ phy_data->blwc[XGBE_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
++ phy_data->pq_skew[XGBE_SPEED_10000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
++ phy_data->dfe_tap_cfg[XGBE_SPEED_10000]);
++ XRXTX_IOWRITE(pdata, RXTX_REG22,
++ phy_data->dfe_tap_ena[XGBE_SPEED_10000]);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
++}
++
++static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ /* Set PCS to KX/1G speed */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
++ reg &= ~MDIO_PCS_CTRL2_TYPE;
++ reg |= MDIO_PCS_CTRL2_10GBX;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ reg &= ~MDIO_CTRL1_SPEEDSEL;
++ reg |= MDIO_CTRL1_SPEED1G;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++
++ xgbe_phy_pcs_power_cycle(pdata);
++
++ /* Set SerDes to 2.5G speed */
++ xgbe_phy_start_ratechange(pdata);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_2500_RATE);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_2500_WORD);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_2500_PLL);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
++ phy_data->cdr_rate[XGBE_SPEED_2500]);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
++ phy_data->tx_amp[XGBE_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
++ phy_data->blwc[XGBE_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
++ phy_data->pq_skew[XGBE_SPEED_2500]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
++ phy_data->dfe_tap_cfg[XGBE_SPEED_2500]);
++ XRXTX_IOWRITE(pdata, RXTX_REG22,
++ phy_data->dfe_tap_ena[XGBE_SPEED_2500]);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
++}
++
++static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ /* Set PCS to KX/1G speed */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
++ reg &= ~MDIO_PCS_CTRL2_TYPE;
++ reg |= MDIO_PCS_CTRL2_10GBX;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL2, reg);
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ reg &= ~MDIO_CTRL1_SPEEDSEL;
++ reg |= MDIO_CTRL1_SPEED1G;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++
++ xgbe_phy_pcs_power_cycle(pdata);
++
++ /* Set SerDes to 1G speed */
++ xgbe_phy_start_ratechange(pdata);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, DATARATE, XGBE_SPEED_1000_RATE);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, WORDMODE, XGBE_SPEED_1000_WORD);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, PLLSEL, XGBE_SPEED_1000_PLL);
++
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, CDR_RATE,
++ phy_data->cdr_rate[XGBE_SPEED_1000]);
++ XSIR1_IOWRITE_BITS(pdata, SIR1_SPEED, TXAMP,
++ phy_data->tx_amp[XGBE_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG20, BLWC_ENA,
++ phy_data->blwc[XGBE_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG114, PQ_REG,
++ phy_data->pq_skew[XGBE_SPEED_1000]);
++ XRXTX_IOWRITE_BITS(pdata, RXTX_REG129, RXDFE_CONFIG,
++ phy_data->dfe_tap_cfg[XGBE_SPEED_1000]);
++ XRXTX_IOWRITE(pdata, RXTX_REG22,
++ phy_data->dfe_tap_ena[XGBE_SPEED_1000]);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
++}
++
++static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_mode mode;
++ unsigned int reg;
++
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL2);
++ reg &= MDIO_PCS_CTRL2_TYPE;
++
++ if (reg == MDIO_PCS_CTRL2_10GBR) {
++ mode = XGBE_MODE_KR;
++ } else {
++ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
++ mode = XGBE_MODE_KX_2500;
++ else
++ mode = XGBE_MODE_KX_1000;
++ }
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_mode mode;
++
++ /* If we are in KR switch to KX, and vice-versa */
++ if (xgbe_phy_cur_mode(pdata) == XGBE_MODE_KR) {
++ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
++ mode = XGBE_MODE_KX_2500;
++ else
++ mode = XGBE_MODE_KX_1000;
++ } else {
++ mode = XGBE_MODE_KR;
++ }
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
++ int speed)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (speed) {
++ case SPEED_1000:
++ return (phy_data->speed_set == XGBE_SPEEDSET_1000_10000)
++ ? XGBE_MODE_KX_1000 : XGBE_MODE_UNKNOWN;
++ case SPEED_2500:
++ return (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
++ ? XGBE_MODE_KX_2500 : XGBE_MODE_UNKNOWN;
++ case SPEED_10000:
++ return XGBE_MODE_KR;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ xgbe_phy_kx_1000_mode(pdata);
++ break;
++ case XGBE_MODE_KX_2500:
++ xgbe_phy_kx_2500_mode(pdata);
++ break;
++ case XGBE_MODE_KR:
++ xgbe_phy_kr_mode(pdata);
++ break;
++ default:
++ break;
++ }
++}
++
++static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode, u32 advert)
++{
++ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
++ if (pdata->phy.advertising & advert)
++ return true;
++ } else {
++ enum xgbe_mode cur_mode;
++
++ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
++ if (cur_mode == mode)
++ return true;
++ }
++
++ return false;
++}
++
++static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseKX_Full);
++ case XGBE_MODE_KX_2500:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_2500baseX_Full);
++ case XGBE_MODE_KR:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseKR_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (speed) {
++ case SPEED_1000:
++ if (phy_data->speed_set != XGBE_SPEEDSET_1000_10000)
++ return false;
++ return true;
++ case SPEED_2500:
++ if (phy_data->speed_set != XGBE_SPEEDSET_2500_10000)
++ return false;
++ return true;
++ case SPEED_10000:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
++{
++ unsigned int reg;
++
++ *an_restart = 0;
++
++ /* Link status is latched low, so read once to clear
++ * and then read again to get current state
++ */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++
++ return (reg & MDIO_STAT1_LSTATUS) ? 1 : 0;
++}
++
++static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
++{
++ /* Nothing uniquely required for stop */
++}
++
++static int xgbe_phy_start(struct xgbe_prv_data *pdata)
++{
++ /* Nothing uniquely required for start */
++ return 0;
++}
++
++static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
++{
++ unsigned int reg, count;
++
++ /* Perform a software reset of the PCS */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ reg |= MDIO_CTRL1_RESET;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
++
++ count = 50;
++ do {
++ msleep(20);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ } while ((reg & MDIO_CTRL1_RESET) && --count);
++
++ if (reg & MDIO_CTRL1_RESET)
++ return -ETIMEDOUT;
++
++ return 0;
++}
++
++static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
++{
++ /* Nothing uniquely required for exit */
++}
++
++static int xgbe_phy_init(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data;
++ int ret;
++
++ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
++ if (!phy_data)
++ return -ENOMEM;
++
++ /* Retrieve the PHY speedset */
++ ret = device_property_read_u32(pdata->phy_dev, XGBE_SPEEDSET_PROPERTY,
++ &phy_data->speed_set);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_SPEEDSET_PROPERTY);
++ return ret;
++ }
++
++ switch (phy_data->speed_set) {
++ case XGBE_SPEEDSET_1000_10000:
++ case XGBE_SPEEDSET_2500_10000:
++ break;
++ default:
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_SPEEDSET_PROPERTY);
++ return -EINVAL;
++ }
++
++ /* Retrieve the PHY configuration properties */
++ if (device_property_present(pdata->phy_dev, XGBE_BLWC_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_BLWC_PROPERTY,
++ phy_data->blwc,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_BLWC_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->blwc, xgbe_phy_blwc,
++ sizeof(phy_data->blwc));
++ }
++
++ if (device_property_present(pdata->phy_dev, XGBE_CDR_RATE_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_CDR_RATE_PROPERTY,
++ phy_data->cdr_rate,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_CDR_RATE_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->cdr_rate, xgbe_phy_cdr_rate,
++ sizeof(phy_data->cdr_rate));
++ }
++
++ if (device_property_present(pdata->phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_PQ_SKEW_PROPERTY,
++ phy_data->pq_skew,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_PQ_SKEW_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->pq_skew, xgbe_phy_pq_skew,
++ sizeof(phy_data->pq_skew));
++ }
++
++ if (device_property_present(pdata->phy_dev, XGBE_TX_AMP_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_TX_AMP_PROPERTY,
++ phy_data->tx_amp,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_TX_AMP_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->tx_amp, xgbe_phy_tx_amp,
++ sizeof(phy_data->tx_amp));
++ }
++
++ if (device_property_present(pdata->phy_dev, XGBE_DFE_CFG_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_DFE_CFG_PROPERTY,
++ phy_data->dfe_tap_cfg,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_DFE_CFG_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->dfe_tap_cfg, xgbe_phy_dfe_tap_cfg,
++ sizeof(phy_data->dfe_tap_cfg));
++ }
++
++ if (device_property_present(pdata->phy_dev, XGBE_DFE_ENA_PROPERTY)) {
++ ret = device_property_read_u32_array(pdata->phy_dev,
++ XGBE_DFE_ENA_PROPERTY,
++ phy_data->dfe_tap_ena,
++ XGBE_SPEEDS);
++ if (ret) {
++ dev_err(pdata->dev, "invalid %s property\n",
++ XGBE_DFE_ENA_PROPERTY);
++ return ret;
++ }
++ } else {
++ memcpy(phy_data->dfe_tap_ena, xgbe_phy_dfe_tap_ena,
++ sizeof(phy_data->dfe_tap_ena));
++ }
++
++ /* Initialize supported features */
++ pdata->phy.supported = SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_Backplane;
++ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
++ switch (phy_data->speed_set) {
++ case XGBE_SPEEDSET_1000_10000:
++ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
++ break;
++ case XGBE_SPEEDSET_2500_10000:
++ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ break;
++ }
++
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
++
++ pdata->phy_data = phy_data;
++
++ return 0;
++}
++
++void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *phy_if)
++{
++ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
++
++ phy_impl->init = xgbe_phy_init;
++ phy_impl->exit = xgbe_phy_exit;
++
++ phy_impl->reset = xgbe_phy_reset;
++ phy_impl->start = xgbe_phy_start;
++ phy_impl->stop = xgbe_phy_stop;
++
++ phy_impl->link_status = xgbe_phy_link_status;
++
++ phy_impl->valid_speed = xgbe_phy_valid_speed;
++
++ phy_impl->use_mode = xgbe_phy_use_mode;
++ phy_impl->set_mode = xgbe_phy_set_mode;
++ phy_impl->get_mode = xgbe_phy_get_mode;
++ phy_impl->switch_mode = xgbe_phy_switch_mode;
++ phy_impl->cur_mode = xgbe_phy_cur_mode;
++
++ phy_impl->an_mode = xgbe_phy_an_mode;
++
++ phy_impl->an_config = xgbe_phy_an_config;
++
++ phy_impl->an_advertising = xgbe_phy_an_advertising;
++
++ phy_impl->an_outcome = xgbe_phy_an_outcome;
++
++ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
++ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+new file mode 100644
+index 0000000..9d8c9530
+--- /dev/null
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -0,0 +1,3084 @@
++/*
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2016 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/kmod.h>
++#include <linux/mdio.h>
++#include <linux/phy.h>
++
++#include "xgbe.h"
++#include "xgbe-common.h"
++
++#define XGBE_PHY_PORT_SPEED_100 BIT(0)
++#define XGBE_PHY_PORT_SPEED_1000 BIT(1)
++#define XGBE_PHY_PORT_SPEED_2500 BIT(2)
++#define XGBE_PHY_PORT_SPEED_10000 BIT(3)
++
++#define XGBE_MUTEX_RELEASE 0x80000000
++
++#define XGBE_SFP_DIRECT 7
++
++/* I2C target addresses */
++#define XGBE_SFP_SERIAL_ID_ADDRESS 0x50
++#define XGBE_SFP_DIAG_INFO_ADDRESS 0x51
++#define XGBE_SFP_PHY_ADDRESS 0x56
++#define XGBE_GPIO_ADDRESS_PCA9555 0x20
++
++/* SFP sideband signal indicators */
++#define XGBE_GPIO_NO_TX_FAULT BIT(0)
++#define XGBE_GPIO_NO_RATE_SELECT BIT(1)
++#define XGBE_GPIO_NO_MOD_ABSENT BIT(2)
++#define XGBE_GPIO_NO_RX_LOS BIT(3)
++
++/* Rate-change complete wait/retry count */
++#define XGBE_RATECHANGE_COUNT 500
++
++enum xgbe_port_mode {
++ XGBE_PORT_MODE_RSVD = 0,
++ XGBE_PORT_MODE_BACKPLANE,
++ XGBE_PORT_MODE_BACKPLANE_2500,
++ XGBE_PORT_MODE_1000BASE_T,
++ XGBE_PORT_MODE_1000BASE_X,
++ XGBE_PORT_MODE_NBASE_T,
++ XGBE_PORT_MODE_10GBASE_T,
++ XGBE_PORT_MODE_10GBASE_R,
++ XGBE_PORT_MODE_SFP,
++ XGBE_PORT_MODE_MAX,
++};
++
++enum xgbe_conn_type {
++ XGBE_CONN_TYPE_NONE = 0,
++ XGBE_CONN_TYPE_SFP,
++ XGBE_CONN_TYPE_MDIO,
++ XGBE_CONN_TYPE_RSVD1,
++ XGBE_CONN_TYPE_BACKPLANE,
++ XGBE_CONN_TYPE_MAX,
++};
++
++/* SFP/SFP+ related definitions */
++enum xgbe_sfp_comm {
++ XGBE_SFP_COMM_DIRECT = 0,
++ XGBE_SFP_COMM_PCA9545,
++};
++
++enum xgbe_sfp_cable {
++ XGBE_SFP_CABLE_UNKNOWN = 0,
++ XGBE_SFP_CABLE_ACTIVE,
++ XGBE_SFP_CABLE_PASSIVE,
++};
++
++enum xgbe_sfp_base {
++ XGBE_SFP_BASE_UNKNOWN = 0,
++ XGBE_SFP_BASE_1000_T,
++ XGBE_SFP_BASE_1000_SX,
++ XGBE_SFP_BASE_1000_LX,
++ XGBE_SFP_BASE_1000_CX,
++ XGBE_SFP_BASE_10000_SR,
++ XGBE_SFP_BASE_10000_LR,
++ XGBE_SFP_BASE_10000_LRM,
++ XGBE_SFP_BASE_10000_ER,
++ XGBE_SFP_BASE_10000_CR,
++};
++
++enum xgbe_sfp_speed {
++ XGBE_SFP_SPEED_UNKNOWN = 0,
++ XGBE_SFP_SPEED_100_1000,
++ XGBE_SFP_SPEED_1000,
++ XGBE_SFP_SPEED_10000,
++};
++
++/* SFP Serial ID Base ID values relative to an offset of 0 */
++#define XGBE_SFP_BASE_ID 0
++#define XGBE_SFP_ID_SFP 0x03
++
++#define XGBE_SFP_BASE_EXT_ID 1
++#define XGBE_SFP_EXT_ID_SFP 0x04
++
++#define XGBE_SFP_BASE_10GBE_CC 3
++#define XGBE_SFP_BASE_10GBE_CC_SR BIT(4)
++#define XGBE_SFP_BASE_10GBE_CC_LR BIT(5)
++#define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
++#define XGBE_SFP_BASE_10GBE_CC_ER BIT(7)
++
++#define XGBE_SFP_BASE_1GBE_CC 6
++#define XGBE_SFP_BASE_1GBE_CC_SX BIT(0)
++#define XGBE_SFP_BASE_1GBE_CC_LX BIT(1)
++#define XGBE_SFP_BASE_1GBE_CC_CX BIT(2)
++#define XGBE_SFP_BASE_1GBE_CC_T BIT(3)
++
++#define XGBE_SFP_BASE_CABLE 8
++#define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
++#define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
++
++#define XGBE_SFP_BASE_BR 12
++#define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a
++#define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d
++#define XGBE_SFP_BASE_BR_10GBE_MIN 0x64
++#define XGBE_SFP_BASE_BR_10GBE_MAX 0x68
++
++#define XGBE_SFP_BASE_CU_CABLE_LEN 18
++
++#define XGBE_SFP_BASE_VENDOR_NAME 20
++#define XGBE_SFP_BASE_VENDOR_NAME_LEN 16
++#define XGBE_SFP_BASE_VENDOR_PN 40
++#define XGBE_SFP_BASE_VENDOR_PN_LEN 16
++#define XGBE_SFP_BASE_VENDOR_REV 56
++#define XGBE_SFP_BASE_VENDOR_REV_LEN 4
++
++#define XGBE_SFP_BASE_CC 63
++
++/* SFP Serial ID Extended ID values relative to an offset of 64 */
++#define XGBE_SFP_BASE_VENDOR_SN 4
++#define XGBE_SFP_BASE_VENDOR_SN_LEN 16
++
++#define XGBE_SFP_EXTD_DIAG 28
++#define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
++
++#define XGBE_SFP_EXTD_SFF_8472 30
++
++#define XGBE_SFP_EXTD_CC 31
++
++struct xgbe_sfp_eeprom {
++ u8 base[64];
++ u8 extd[32];
++ u8 vendor[32];
++};
++
++#define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
++#define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
++
++struct xgbe_sfp_ascii {
++ union {
++ char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
++ char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1];
++ char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1];
++ char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1];
++ } u;
++};
++
++/* MDIO PHY reset types */
++enum xgbe_mdio_reset {
++ XGBE_MDIO_RESET_NONE = 0,
++ XGBE_MDIO_RESET_I2C_GPIO,
++ XGBE_MDIO_RESET_INT_GPIO,
++ XGBE_MDIO_RESET_MAX,
++};
++
++/* Re-driver related definitions */
++enum xgbe_phy_redrv_if {
++ XGBE_PHY_REDRV_IF_MDIO = 0,
++ XGBE_PHY_REDRV_IF_I2C,
++ XGBE_PHY_REDRV_IF_MAX,
++};
++
++enum xgbe_phy_redrv_model {
++ XGBE_PHY_REDRV_MODEL_4223 = 0,
++ XGBE_PHY_REDRV_MODEL_4227,
++ XGBE_PHY_REDRV_MODEL_MAX,
++};
++
++enum xgbe_phy_redrv_mode {
++ XGBE_PHY_REDRV_MODE_CX = 5,
++ XGBE_PHY_REDRV_MODE_SR = 9,
++};
++
++#define XGBE_PHY_REDRV_MODE_REG 0x12b0
++
++/* PHY related configuration information */
++struct xgbe_phy_data {
++ enum xgbe_port_mode port_mode;
++
++ unsigned int port_id;
++
++ unsigned int port_speeds;
++
++ enum xgbe_conn_type conn_type;
++
++ enum xgbe_mode cur_mode;
++ enum xgbe_mode start_mode;
++
++ unsigned int rrc_count;
++
++ unsigned int mdio_addr;
++
++ unsigned int comm_owned;
++
++ /* SFP Support */
++ enum xgbe_sfp_comm sfp_comm;
++ unsigned int sfp_mux_address;
++ unsigned int sfp_mux_channel;
++
++ unsigned int sfp_gpio_address;
++ unsigned int sfp_gpio_mask;
++ unsigned int sfp_gpio_rx_los;
++ unsigned int sfp_gpio_tx_fault;
++ unsigned int sfp_gpio_mod_absent;
++ unsigned int sfp_gpio_rate_select;
++
++ unsigned int sfp_rx_los;
++ unsigned int sfp_tx_fault;
++ unsigned int sfp_mod_absent;
++ unsigned int sfp_diags;
++ unsigned int sfp_changed;
++ unsigned int sfp_phy_avail;
++ unsigned int sfp_cable_len;
++ enum xgbe_sfp_base sfp_base;
++ enum xgbe_sfp_cable sfp_cable;
++ enum xgbe_sfp_speed sfp_speed;
++ struct xgbe_sfp_eeprom sfp_eeprom;
++
++ /* External PHY support */
++ enum xgbe_mdio_mode phydev_mode;
++ struct mii_bus *mii;
++ struct phy_device *phydev;
++ enum xgbe_mdio_reset mdio_reset;
++ unsigned int mdio_reset_addr;
++ unsigned int mdio_reset_gpio;
++
++ /* Re-driver support */
++ unsigned int redrv;
++ unsigned int redrv_if;
++ unsigned int redrv_addr;
++ unsigned int redrv_lane;
++ unsigned int redrv_model;
++};
++
++/* I2C, MDIO and GPIO lines are muxed, so only one device at a time */
++static DEFINE_MUTEX(xgbe_phy_comm_lock);
++
++static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
++
++static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
++ struct xgbe_i2c_op *i2c_op)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* Be sure we own the bus */
++ if (WARN_ON(!phy_data->comm_owned))
++ return -EIO;
++
++ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
++}
++
++static int xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg,
++ unsigned int val)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_i2c_op i2c_op;
++ __be16 *redrv_val;
++ u8 redrv_data[5], csum;
++ unsigned int i, retry;
++ int ret;
++
++ /* High byte of register contains read/write indicator */
++ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
++ redrv_data[1] = reg & 0xff;
++ redrv_val = (__be16 *)&redrv_data[2];
++ *redrv_val = cpu_to_be16(val);
++
++ /* Calculate 1 byte checksum */
++ csum = 0;
++ for (i = 0; i < 4; i++) {
++ csum += redrv_data[i];
++ if (redrv_data[i] > csum)
++ csum++;
++ }
++ redrv_data[4] = ~csum;
++
++ retry = 1;
++again1:
++ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->redrv_addr;
++ i2c_op.len = sizeof(redrv_data);
++ i2c_op.buf = redrv_data;
++ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again1;
++
++ return ret;
++ }
++
++ retry = 1;
++again2:
++ i2c_op.cmd = XGBE_I2C_CMD_READ;
++ i2c_op.target = phy_data->redrv_addr;
++ i2c_op.len = 1;
++ i2c_op.buf = redrv_data;
++ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again2;
++
++ return ret;
++ }
++
++ if (redrv_data[0] != 0xff) {
++ netif_dbg(pdata, drv, pdata->netdev,
++ "Redriver write checksum error\n");
++ ret = -EIO;
++ }
++
++ return ret;
++}
++
++static int xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target,
++ void *val, unsigned int val_len)
++{
++ struct xgbe_i2c_op i2c_op;
++ int retry, ret;
++
++ retry = 1;
++again:
++ /* Write the specfied register */
++ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
++ i2c_op.target = target;
++ i2c_op.len = val_len;
++ i2c_op.buf = val;
++ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if ((ret == -EAGAIN) && retry--)
++ goto again;
++
++ return ret;
++}
++
++static int xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target,
++ void *reg, unsigned int reg_len,
++ void *val, unsigned int val_len)
++{
++ struct xgbe_i2c_op i2c_op;
++ int retry, ret;
++
++ retry = 1;
++again1:
++ /* Set the specified register to read */
++ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
++ i2c_op.target = target;
++ i2c_op.len = reg_len;
++ i2c_op.buf = reg;
++ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if (ret) {
++ if ((ret == -EAGAIN) && retry--)
++ goto again1;
++
++ return ret;
++ }
++
++ retry = 1;
++again2:
++ /* Read the specfied register */
++ i2c_op.cmd = XGBE_I2C_CMD_READ;
++ i2c_op.target = target;
++ i2c_op.len = val_len;
++ i2c_op.buf = val;
++ ret = xgbe_phy_i2c_xfer(pdata, &i2c_op);
++ if ((ret == -EAGAIN) && retry--)
++ goto again2;
++
++ return ret;
++}
++
++static int xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_i2c_op i2c_op;
++ u8 mux_channel;
++
++ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
++ return 0;
++
++ /* Select no mux channels */
++ mux_channel = 0;
++ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->sfp_mux_address;
++ i2c_op.len = sizeof(mux_channel);
++ i2c_op.buf = &mux_channel;
++
++ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
++}
++
++static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_i2c_op i2c_op;
++ u8 mux_channel;
++
++ if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT)
++ return 0;
++
++ /* Select desired mux channel */
++ mux_channel = 1 << phy_data->sfp_mux_channel;
++ i2c_op.cmd = XGBE_I2C_CMD_WRITE;
++ i2c_op.target = phy_data->sfp_mux_address;
++ i2c_op.len = sizeof(mux_channel);
++ i2c_op.buf = &mux_channel;
++
++ return xgbe_phy_i2c_xfer(pdata, &i2c_op);
++}
++
++static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ phy_data->comm_owned = 0;
++
++ mutex_unlock(&xgbe_phy_comm_lock);
++}
++
++static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned long timeout;
++ unsigned int mutex_id;
++
++ if (phy_data->comm_owned)
++ return 0;
++
++ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
++ * the driver needs to take the software mutex and then the hardware
++ * mutexes before being able to use the busses.
++ */
++ mutex_lock(&xgbe_phy_comm_lock);
++
++ /* Clear the mutexes */
++ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE);
++ XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE);
++
++ /* Mutex formats are the same for I2C and MDIO/GPIO */
++ mutex_id = 0;
++ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
++ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
++
++ timeout = jiffies + (5 * HZ);
++ while (time_before(jiffies, timeout)) {
++ /* Must be all zeroes in order to obtain the mutex */
++ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
++ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
++ usleep_range(100, 200);
++ continue;
++ }
++
++ /* Obtain the mutex */
++ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
++ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
++
++ phy_data->comm_owned = 1;
++ return 0;
++ }
++
++ mutex_unlock(&xgbe_phy_comm_lock);
++
++ netdev_err(pdata->netdev, "unable to obtain hardware mutexes\n");
++
++ return -ETIMEDOUT;
++}
++
++static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr,
++ int reg, u16 val)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (reg & MII_ADDR_C45) {
++ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
++ return -ENOTSUPP;
++ } else {
++ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
++ return -ENOTSUPP;
++ }
++
++ return pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val);
++}
++
++static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, u16 val)
++{
++ __be16 *mii_val;
++ u8 mii_data[3];
++ int ret;
++
++ ret = xgbe_phy_sfp_get_mux(pdata);
++ if (ret)
++ return ret;
++
++ mii_data[0] = reg & 0xff;
++ mii_val = (__be16 *)&mii_data[1];
++ *mii_val = cpu_to_be16(val);
++
++ ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS,
++ mii_data, sizeof(mii_data));
++
++ xgbe_phy_sfp_put_mux(pdata);
++
++ return ret;
++}
++
++static int xgbe_phy_mii_write(struct mii_bus *mii, int addr, int reg, u16 val)
++{
++ struct xgbe_prv_data *pdata = mii->priv;
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return ret;
++
++ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
++ ret = xgbe_phy_i2c_mii_write(pdata, reg, val);
++ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
++ ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val);
++ else
++ ret = -ENOTSUPP;
++
++ xgbe_phy_put_comm_ownership(pdata);
++
++ return ret;
++}
++
++static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr,
++ int reg)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (reg & MII_ADDR_C45) {
++ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45)
++ return -ENOTSUPP;
++ } else {
++ if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22)
++ return -ENOTSUPP;
++ }
++
++ return pdata->hw_if.read_ext_mii_regs(pdata, addr, reg);
++}
++
++static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg)
++{
++ __be16 mii_val;
++ u8 mii_reg;
++ int ret;
++
++ ret = xgbe_phy_sfp_get_mux(pdata);
++ if (ret)
++ return ret;
++
++ mii_reg = reg;
++ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS,
++ &mii_reg, sizeof(mii_reg),
++ &mii_val, sizeof(mii_val));
++ if (!ret)
++ ret = be16_to_cpu(mii_val);
++
++ xgbe_phy_sfp_put_mux(pdata);
++
++ return ret;
++}
++
++static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
++{
++ struct xgbe_prv_data *pdata = mii->priv;
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return ret;
++
++ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
++ ret = xgbe_phy_i2c_mii_read(pdata, reg);
++ else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO)
++ ret = xgbe_phy_mdio_mii_read(pdata, addr, reg);
++ else
++ ret = -ENOTSUPP;
++
++ xgbe_phy_put_comm_ownership(pdata);
++
++ return ret;
++}
++
++static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (phy_data->sfp_mod_absent) {
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.advertising = pdata->phy.supported;
++ }
++
++ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
++ pdata->phy.advertising &= ~ADVERTISED_TP;
++ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
++ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
++ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
++
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ case XGBE_SFP_BASE_1000_SX:
++ case XGBE_SFP_BASE_1000_LX:
++ case XGBE_SFP_BASE_1000_CX:
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.advertising |= ADVERTISED_Autoneg;
++ break;
++ case XGBE_SFP_BASE_10000_SR:
++ case XGBE_SFP_BASE_10000_LR:
++ case XGBE_SFP_BASE_10000_LRM:
++ case XGBE_SFP_BASE_10000_ER:
++ case XGBE_SFP_BASE_10000_CR:
++ default:
++ pdata->phy.speed = SPEED_10000;
++ pdata->phy.duplex = DUPLEX_FULL;
++ pdata->phy.autoneg = AUTONEG_DISABLE;
++ break;
++ }
++
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ case XGBE_SFP_BASE_1000_CX:
++ case XGBE_SFP_BASE_10000_CR:
++ pdata->phy.advertising |= ADVERTISED_TP;
++ break;
++ default:
++ pdata->phy.advertising |= ADVERTISED_FIBRE;
++ }
++
++ switch (phy_data->sfp_speed) {
++ case XGBE_SFP_SPEED_100_1000:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
++ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ break;
++ case XGBE_SFP_SPEED_1000:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ break;
++ case XGBE_SFP_SPEED_10000:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
++ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ break;
++ default:
++ /* Choose the fastest supported speed */
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
++ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
++ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ }
++}
++
++static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
++ enum xgbe_sfp_speed sfp_speed)
++{
++ u8 *sfp_base, min, max;
++
++ sfp_base = sfp_eeprom->base;
++
++ switch (sfp_speed) {
++ case XGBE_SFP_SPEED_1000:
++ min = XGBE_SFP_BASE_BR_1GBE_MIN;
++ max = XGBE_SFP_BASE_BR_1GBE_MAX;
++ break;
++ case XGBE_SFP_SPEED_10000:
++ min = XGBE_SFP_BASE_BR_10GBE_MIN;
++ max = XGBE_SFP_BASE_BR_10GBE_MAX;
++ break;
++ default:
++ return false;
++ }
++
++ return ((sfp_base[XGBE_SFP_BASE_BR] >= min) &&
++ (sfp_base[XGBE_SFP_BASE_BR] <= max));
++}
++
++static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (phy_data->phydev) {
++ phy_detach(phy_data->phydev);
++ phy_device_remove(phy_data->phydev);
++ phy_device_free(phy_data->phydev);
++ phy_data->phydev = NULL;
++ }
++}
++
++static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int phy_id = phy_data->phydev->phy_id;
++
++ if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
++ return false;
++
++ /* Enable Base-T AN */
++ phy_write(phy_data->phydev, 0x16, 0x0001);
++ phy_write(phy_data->phydev, 0x00, 0x9140);
++ phy_write(phy_data->phydev, 0x16, 0x0000);
++
++ /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */
++ phy_write(phy_data->phydev, 0x1b, 0x9084);
++ phy_write(phy_data->phydev, 0x09, 0x0e00);
++ phy_write(phy_data->phydev, 0x00, 0x8140);
++ phy_write(phy_data->phydev, 0x04, 0x0d01);
++ phy_write(phy_data->phydev, 0x00, 0x9140);
++
++ phy_data->phydev->supported = PHY_GBIT_FEATURES;
++ phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ phy_data->phydev->advertising = phy_data->phydev->supported;
++
++ netif_dbg(pdata, drv, pdata->netdev,
++ "Finisar PHY quirk in place\n");
++
++ return true;
++}
++
++static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
++{
++ if (xgbe_phy_finisar_phy_quirks(pdata))
++ return;
++}
++
++static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct phy_device *phydev;
++ int ret;
++
++ /* If we already have a PHY, just return */
++ if (phy_data->phydev)
++ return 0;
++
++ /* Check for the use of an external PHY */
++ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
++ return 0;
++
++ /* For SFP, only use an external PHY if available */
++ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
++ !phy_data->sfp_phy_avail)
++ return 0;
++
++ /* Create and connect to the PHY device */
++ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
++ (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
++ if (IS_ERR(phydev)) {
++ netdev_err(pdata->netdev, "get_phy_device failed\n");
++ return -ENODEV;
++ }
++ netif_dbg(pdata, drv, pdata->netdev, "external PHY id is %#010x\n",
++ phydev->phy_id);
++
++ /*TODO: If c45, add request_module based on one of the MMD ids? */
++
++ ret = phy_device_register(phydev);
++ if (ret) {
++ netdev_err(pdata->netdev, "phy_device_register failed\n");
++ phy_device_free(phydev);
++ return ret;
++ }
++
++ ret = phy_attach_direct(pdata->netdev, phydev, phydev->dev_flags,
++ PHY_INTERFACE_MODE_SGMII);
++ if (ret) {
++ netdev_err(pdata->netdev, "phy_attach_direct failed\n");
++ phy_device_remove(phydev);
++ phy_device_free(phydev);
++ return ret;
++ }
++ phy_data->phydev = phydev;
++
++ xgbe_phy_external_phy_quirks(pdata);
++ phydev->advertising &= pdata->phy.advertising;
++
++ phy_start_aneg(phy_data->phydev);
++
++ return 0;
++}
++
++static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ if (!phy_data->sfp_changed)
++ return;
++
++ phy_data->sfp_phy_avail = 0;
++
++ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
++ return;
++
++ /* Check access to the PHY by reading CTRL1 */
++ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR);
++ if (ret < 0)
++ return;
++
++ /* Successfully accessed the PHY */
++ phy_data->sfp_phy_avail = 1;
++}
++
++static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
++
++ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
++ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
++ return false;
++
++ if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
++ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) {
++ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
++ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
++ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
++ if (phy_data->sfp_changed)
++ netif_dbg(pdata, drv, pdata->netdev,
++ "Bel-Fuse SFP quirk in place\n");
++ return true;
++ }
++
++ return false;
++}
++
++static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata)
++{
++ if (xgbe_phy_belfuse_parse_quirks(pdata))
++ return true;
++
++ return false;
++}
++
++static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
++ u8 *sfp_base;
++
++ sfp_base = sfp_eeprom->base;
++
++ if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP)
++ return;
++
++ if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP)
++ return;
++
++ if (xgbe_phy_sfp_parse_quirks(pdata))
++ return;
++
++ /* Assume ACTIVE cable unless told it is PASSIVE */
++ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
++ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
++ phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN];
++ } else {
++ phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
++ }
++
++ /* Determine the type of SFP */
++ if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR)
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_SR;
++ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR)
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_LR;
++ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM)
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM;
++ else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER)
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_ER;
++ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX)
++ phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
++ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX)
++ phy_data->sfp_base = XGBE_SFP_BASE_1000_LX;
++ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX)
++ phy_data->sfp_base = XGBE_SFP_BASE_1000_CX;
++ else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T)
++ phy_data->sfp_base = XGBE_SFP_BASE_1000_T;
++ else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) &&
++ xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000))
++ phy_data->sfp_base = XGBE_SFP_BASE_10000_CR;
++
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000;
++ break;
++ case XGBE_SFP_BASE_1000_SX:
++ case XGBE_SFP_BASE_1000_LX:
++ case XGBE_SFP_BASE_1000_CX:
++ phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
++ break;
++ case XGBE_SFP_BASE_10000_SR:
++ case XGBE_SFP_BASE_10000_LR:
++ case XGBE_SFP_BASE_10000_LRM:
++ case XGBE_SFP_BASE_10000_ER:
++ case XGBE_SFP_BASE_10000_CR:
++ phy_data->sfp_speed = XGBE_SFP_SPEED_10000;
++ break;
++ default:
++ break;
++ }
++}
++
++static void xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata,
++ struct xgbe_sfp_eeprom *sfp_eeprom)
++{
++ struct xgbe_sfp_ascii sfp_ascii;
++ char *sfp_data = (char *)&sfp_ascii;
++
++ netif_dbg(pdata, drv, pdata->netdev, "SFP detected:\n");
++ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
++ XGBE_SFP_BASE_VENDOR_NAME_LEN);
++ sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0';
++ netif_dbg(pdata, drv, pdata->netdev, " vendor: %s\n",
++ sfp_data);
++
++ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
++ XGBE_SFP_BASE_VENDOR_PN_LEN);
++ sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0';
++ netif_dbg(pdata, drv, pdata->netdev, " part number: %s\n",
++ sfp_data);
++
++ memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV],
++ XGBE_SFP_BASE_VENDOR_REV_LEN);
++ sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0';
++ netif_dbg(pdata, drv, pdata->netdev, " revision level: %s\n",
++ sfp_data);
++
++ memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN],
++ XGBE_SFP_BASE_VENDOR_SN_LEN);
++ sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0';
++ netif_dbg(pdata, drv, pdata->netdev, " serial number: %s\n",
++ sfp_data);
++}
++
++static bool xgbe_phy_sfp_verify_eeprom(u8 cc_in, u8 *buf, unsigned int len)
++{
++ u8 cc;
++
++ for (cc = 0; len; buf++, len--)
++ cc += *buf;
++
++ return (cc == cc_in) ? true : false;
++}
++
++static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_sfp_eeprom sfp_eeprom;
++ u8 eeprom_addr;
++ int ret;
++
++ ret = xgbe_phy_sfp_get_mux(pdata);
++ if (ret) {
++ netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
++ return ret;
++ }
++
++ /* Read the SFP serial ID eeprom */
++ eeprom_addr = 0;
++ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
++ &eeprom_addr, sizeof(eeprom_addr),
++ &sfp_eeprom, sizeof(sfp_eeprom));
++ if (ret) {
++ netdev_err(pdata->netdev, "I2C error reading SFP EEPROM\n");
++ goto put;
++ }
++
++ /* Validate the contents read */
++ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC],
++ sfp_eeprom.base,
++ sizeof(sfp_eeprom.base) - 1)) {
++ ret = -EINVAL;
++ goto put;
++ }
++
++ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC],
++ sfp_eeprom.extd,
++ sizeof(sfp_eeprom.extd) - 1)) {
++ ret = -EINVAL;
++ goto put;
++ }
++
++ /* Check for an added or changed SFP */
++ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
++ phy_data->sfp_changed = 1;
++
++ if (netif_msg_drv(pdata))
++ xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom);
++
++ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
++
++ if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) {
++ u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG];
++
++ if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
++ phy_data->sfp_diags = 1;
++ }
++
++ xgbe_phy_free_phy_device(pdata);
++ } else {
++ phy_data->sfp_changed = 0;
++ }
++
++put:
++ xgbe_phy_sfp_put_mux(pdata);
++
++ return ret;
++}
++
++static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int gpio_input;
++ u8 gpio_reg, gpio_ports[2];
++ int ret;
++
++ /* Read the input port registers */
++ gpio_reg = 0;
++ ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
++ &gpio_reg, sizeof(gpio_reg),
++ gpio_ports, sizeof(gpio_ports));
++ if (ret) {
++ netdev_err(pdata->netdev, "I2C error reading SFP GPIOs\n");
++ return;
++ }
++
++ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
++
++ if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) {
++ /* No GPIO, just assume the module is present for now */
++ phy_data->sfp_mod_absent = 0;
++ } else {
++ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
++ phy_data->sfp_mod_absent = 0;
++ }
++
++ if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) &&
++ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
++ phy_data->sfp_rx_los = 1;
++
++ if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) &&
++ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
++ phy_data->sfp_tx_fault = 1;
++}
++
++static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ xgbe_phy_free_phy_device(pdata);
++
++ phy_data->sfp_mod_absent = 1;
++ phy_data->sfp_phy_avail = 0;
++ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
++}
++
++static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
++{
++ phy_data->sfp_rx_los = 0;
++ phy_data->sfp_tx_fault = 0;
++ phy_data->sfp_mod_absent = 1;
++ phy_data->sfp_diags = 0;
++ phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
++ phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
++ phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
++}
++
++static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ /* Reset the SFP signals and info */
++ xgbe_phy_sfp_reset(phy_data);
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return;
++
++ /* Read the SFP signals and check for module presence */
++ xgbe_phy_sfp_signals(pdata);
++ if (phy_data->sfp_mod_absent) {
++ xgbe_phy_sfp_mod_absent(pdata);
++ goto put;
++ }
++
++ ret = xgbe_phy_sfp_read_eeprom(pdata);
++ if (ret) {
++ /* Treat any error as if there isn't an SFP plugged in */
++ xgbe_phy_sfp_reset(phy_data);
++ xgbe_phy_sfp_mod_absent(pdata);
++ goto put;
++ }
++
++ xgbe_phy_sfp_parse_eeprom(pdata);
++
++ xgbe_phy_sfp_external_phy(pdata);
++
++put:
++ xgbe_phy_sfp_phy_settings(pdata);
++
++ xgbe_phy_put_comm_ownership(pdata);
++}
++
++static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ u16 lcl_adv = 0, rmt_adv = 0;
++ u8 fc;
++
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++
++ if (!phy_data->phydev)
++ return;
++
++ if (phy_data->phydev->advertising & ADVERTISED_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_CAP;
++ if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_ASYM;
++
++ if (phy_data->phydev->pause) {
++ pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ rmt_adv |= LPA_PAUSE_CAP;
++ }
++ if (phy_data->phydev->asym_pause) {
++ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++ rmt_adv |= LPA_PAUSE_ASYM;
++ }
++
++ fc = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
++ if (fc & FLOW_CTRL_TX)
++ pdata->phy.tx_pause = 1;
++ if (fc & FLOW_CTRL_RX)
++ pdata->phy.rx_pause = 1;
++}
++
++static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
++{
++ enum xgbe_mode mode;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_TP;
++
++ /* Use external PHY to determine flow control */
++ if (pdata->phy.pause_autoneg)
++ xgbe_phy_phydev_flowctrl(pdata);
++
++ switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
++ case XGBE_SGMII_AN_LINK_SPEED_100:
++ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
++ pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full;
++ mode = XGBE_MODE_SGMII_100;
++ } else {
++ /* Half-duplex not supported */
++ pdata->phy.lp_advertising |= ADVERTISED_100baseT_Half;
++ mode = XGBE_MODE_UNKNOWN;
++ }
++ break;
++ case XGBE_SGMII_AN_LINK_SPEED_1000:
++ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
++ mode = XGBE_MODE_SGMII_1000;
++ } else {
++ /* Half-duplex not supported */
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
++ mode = XGBE_MODE_UNKNOWN;
++ }
++ break;
++ default:
++ mode = XGBE_MODE_UNKNOWN;
++ }
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
++{
++ enum xgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_FIBRE;
++
++ /* Compare Advertisement and Link Partner register */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY);
++ if (lp_reg & 0x100)
++ pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++
++ if (pdata->phy.pause_autoneg) {
++ /* Set flow control based on auto-negotiation result */
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++
++ if (ad_reg & lp_reg & 0x100) {
++ pdata->phy.tx_pause = 1;
++ pdata->phy.rx_pause = 1;
++ } else if (ad_reg & lp_reg & 0x80) {
++ if (ad_reg & 0x100)
++ pdata->phy.rx_pause = 1;
++ else if (lp_reg & 0x100)
++ pdata->phy.tx_pause = 1;
++ }
++ }
++
++ if (lp_reg & 0x40)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
++ if (lp_reg & 0x20)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
++
++ /* Half duplex is not supported */
++ ad_reg &= lp_reg;
++ mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN;
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++
++ /* Use external PHY to determine flow control */
++ if (pdata->phy.pause_autoneg)
++ xgbe_phy_phydev_flowctrl(pdata);
++
++ /* Compare Advertisement and Link Partner register 2 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ if (lp_reg & 0x20)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++
++ ad_reg &= lp_reg;
++ if (ad_reg & 0x80) {
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ mode = XGBE_MODE_KR;
++ break;
++ default:
++ mode = XGBE_MODE_SFI;
++ break;
++ }
++ } else if (ad_reg & 0x20) {
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ mode = XGBE_MODE_KX_1000;
++ break;
++ case XGBE_PORT_MODE_1000BASE_X:
++ mode = XGBE_MODE_X;
++ break;
++ case XGBE_PORT_MODE_SFP:
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ if (phy_data->phydev &&
++ (phy_data->phydev->speed == SPEED_100))
++ mode = XGBE_MODE_SGMII_100;
++ else
++ mode = XGBE_MODE_SGMII_1000;
++ break;
++ case XGBE_SFP_BASE_1000_SX:
++ case XGBE_SFP_BASE_1000_LX:
++ case XGBE_SFP_BASE_1000_CX:
++ default:
++ mode = XGBE_MODE_X;
++ break;
++ }
++ break;
++ default:
++ if (phy_data->phydev &&
++ (phy_data->phydev->speed == SPEED_100))
++ mode = XGBE_MODE_SGMII_100;
++ else
++ mode = XGBE_MODE_SGMII_1000;
++ break;
++ }
++ } else {
++ mode = XGBE_MODE_UNKNOWN;
++ }
++
++ /* Compare Advertisement and Link Partner register 3 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++ if (lp_reg & 0xc000)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
++{
++ enum xgbe_mode mode;
++ unsigned int ad_reg, lp_reg;
++
++ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
++ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++
++ /* Compare Advertisement and Link Partner register 1 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
++ if (lp_reg & 0x400)
++ pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ if (lp_reg & 0x800)
++ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++
++ if (pdata->phy.pause_autoneg) {
++ /* Set flow control based on auto-negotiation result */
++ pdata->phy.tx_pause = 0;
++ pdata->phy.rx_pause = 0;
++
++ if (ad_reg & lp_reg & 0x400) {
++ pdata->phy.tx_pause = 1;
++ pdata->phy.rx_pause = 1;
++ } else if (ad_reg & lp_reg & 0x800) {
++ if (ad_reg & 0x400)
++ pdata->phy.rx_pause = 1;
++ else if (lp_reg & 0x400)
++ pdata->phy.tx_pause = 1;
++ }
++ }
++
++ /* Compare Advertisement and Link Partner register 2 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
++ if (lp_reg & 0x80)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ if (lp_reg & 0x20)
++ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++
++ ad_reg &= lp_reg;
++ if (ad_reg & 0x80)
++ mode = XGBE_MODE_KR;
++ else if (ad_reg & 0x20)
++ mode = XGBE_MODE_KX_1000;
++ else
++ mode = XGBE_MODE_UNKNOWN;
++
++ /* Compare Advertisement and Link Partner register 3 */
++ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
++ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
++ if (lp_reg & 0xc000)
++ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++
++ return mode;
++}
++
++static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
++{
++ switch (pdata->an_mode) {
++ case XGBE_AN_MODE_CL73:
++ return xgbe_phy_an73_outcome(pdata);
++ case XGBE_AN_MODE_CL73_REDRV:
++ return xgbe_phy_an73_redrv_outcome(pdata);
++ case XGBE_AN_MODE_CL37:
++ return xgbe_phy_an37_outcome(pdata);
++ case XGBE_AN_MODE_CL37_SGMII:
++ return xgbe_phy_an37_sgmii_outcome(pdata);
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int advertising;
++
++ /* Without a re-driver, just return current advertising */
++ if (!phy_data->redrv)
++ return pdata->phy.advertising;
++
++ /* With the KR re-driver we need to advertise a single speed */
++ advertising = pdata->phy.advertising;
++ advertising &= ~ADVERTISED_1000baseKX_Full;
++ advertising &= ~ADVERTISED_10000baseKR_Full;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_NBASE_T:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ case XGBE_PORT_MODE_10GBASE_T:
++ if (phy_data->phydev &&
++ (phy_data->phydev->speed == SPEED_10000))
++ advertising |= ADVERTISED_10000baseKR_Full;
++ else
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ case XGBE_PORT_MODE_10GBASE_R:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ case XGBE_PORT_MODE_SFP:
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ case XGBE_SFP_BASE_1000_SX:
++ case XGBE_SFP_BASE_1000_LX:
++ case XGBE_SFP_BASE_1000_CX:
++ advertising |= ADVERTISED_1000baseKX_Full;
++ break;
++ default:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ }
++ break;
++ default:
++ advertising |= ADVERTISED_10000baseKR_Full;
++ break;
++ }
++
++ return advertising;
++}
++
++static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ ret = xgbe_phy_find_phy_device(pdata);
++ if (ret)
++ return ret;
++
++ if (!phy_data->phydev)
++ return 0;
++
++ phy_data->phydev->autoneg = pdata->phy.autoneg;
++ phy_data->phydev->advertising = phy_data->phydev->supported &
++ pdata->phy.advertising;
++
++ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
++ phy_data->phydev->speed = pdata->phy.speed;
++ phy_data->phydev->duplex = pdata->phy.duplex;
++ }
++
++ ret = phy_start_aneg(phy_data->phydev);
++
++ return ret;
++}
++
++static enum xgbe_an_mode xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data)
++{
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_1000_T:
++ return XGBE_AN_MODE_CL37_SGMII;
++ case XGBE_SFP_BASE_1000_SX:
++ case XGBE_SFP_BASE_1000_LX:
++ case XGBE_SFP_BASE_1000_CX:
++ return XGBE_AN_MODE_CL37;
++ default:
++ return XGBE_AN_MODE_NONE;
++ }
++}
++
++static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* A KR re-driver will always require CL73 AN */
++ if (phy_data->redrv)
++ return XGBE_AN_MODE_CL73_REDRV;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ return XGBE_AN_MODE_CL73;
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ return XGBE_AN_MODE_NONE;
++ case XGBE_PORT_MODE_1000BASE_T:
++ return XGBE_AN_MODE_CL37_SGMII;
++ case XGBE_PORT_MODE_1000BASE_X:
++ return XGBE_AN_MODE_CL37;
++ case XGBE_PORT_MODE_NBASE_T:
++ return XGBE_AN_MODE_CL37_SGMII;
++ case XGBE_PORT_MODE_10GBASE_T:
++ return XGBE_AN_MODE_CL73;
++ case XGBE_PORT_MODE_10GBASE_R:
++ return XGBE_AN_MODE_NONE;
++ case XGBE_PORT_MODE_SFP:
++ return xgbe_phy_an_sfp_mode(phy_data);
++ default:
++ return XGBE_AN_MODE_NONE;
++ }
++}
++
++static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata,
++ enum xgbe_phy_redrv_mode mode)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ u16 redrv_reg, redrv_val;
++
++ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
++ redrv_val = (u16)mode;
++
++ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
++ redrv_reg, redrv_val);
++}
++
++static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata,
++ enum xgbe_phy_redrv_mode mode)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int redrv_reg;
++ int ret;
++
++ /* Calculate the register to write */
++ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
++
++ ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode);
++
++ return ret;
++}
++
++static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_phy_redrv_mode mode;
++ int ret;
++
++ if (!phy_data->redrv)
++ return;
++
++ mode = XGBE_PHY_REDRV_MODE_CX;
++ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) &&
++ (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) &&
++ (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR))
++ mode = XGBE_PHY_REDRV_MODE_SR;
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return;
++
++ if (phy_data->redrv_if)
++ xgbe_phy_set_redrv_mode_i2c(pdata, mode);
++ else
++ xgbe_phy_set_redrv_mode_mdio(pdata, mode);
++
++ xgbe_phy_put_comm_ownership(pdata);
++}
++
++static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
++{
++ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ return;
++
++ /* Log if a previous command did not complete */
++ netif_dbg(pdata, link, pdata->netdev,
++ "firmware mailbox not ready for command\n");
++}
++
++static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
++{
++ unsigned int wait;
++
++ /* Wait for command to complete */
++ wait = XGBE_RATECHANGE_COUNT;
++ while (wait--) {
++ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ return;
++
++ usleep_range(1000, 2000);
++ }
++
++ netif_dbg(pdata, link, pdata->netdev,
++ "firmware mailbox command did not complete\n");
++}
++
++static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
++{
++ unsigned int s0;
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* Receiver Reset Cycle */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
++}
++
++static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
++
++ netif_dbg(pdata, link, pdata->netdev, "phy powered off\n");
++}
++
++static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 10G/SFI */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
++ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++ } else {
++ if (phy_data->sfp_cable_len <= 1)
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
++ else if (phy_data->sfp_cable_len <= 3)
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
++ else
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
++ }
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_SFI;
++
++ netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
++}
++
++static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 1G/X */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_X;
++
++ netif_dbg(pdata, link, pdata->netdev, "1GbE X mode set\n");
++}
++
++static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 1G/SGMII */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_SGMII_1000;
++
++ netif_dbg(pdata, link, pdata->netdev, "1GbE SGMII mode set\n");
++}
++
++static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 1G/SGMII */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_SGMII_100;
++
++ netif_dbg(pdata, link, pdata->netdev, "100MbE SGMII mode set\n");
++}
++
++static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 10G/KR */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_KR;
++
++ netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
++}
++
++static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 2.5G/KX */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_KX_2500;
++
++ netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
++}
++
++static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int s0;
++
++ xgbe_phy_set_redrv_mode(pdata);
++
++ xgbe_phy_start_ratechange(pdata);
++
++ /* 1G/KX */
++ s0 = 0;
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
++
++ /* Call FW to make the change */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
++
++ xgbe_phy_complete_ratechange(pdata);
++
++ phy_data->cur_mode = XGBE_MODE_KX_1000;
++
++ netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
++}
++
++static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ return phy_data->cur_mode;
++}
++
++static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* No switching if not 10GBase-T */
++ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T)
++ return xgbe_phy_cur_mode(pdata);
++
++ switch (xgbe_phy_cur_mode(pdata)) {
++ case XGBE_MODE_SGMII_100:
++ case XGBE_MODE_SGMII_1000:
++ return XGBE_MODE_KR;
++ case XGBE_MODE_KR:
++ default:
++ return XGBE_MODE_SGMII_1000;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata)
++{
++ return XGBE_MODE_KX_2500;
++}
++
++static enum xgbe_mode xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata)
++{
++ /* If we are in KR switch to KX, and vice-versa */
++ switch (xgbe_phy_cur_mode(pdata)) {
++ case XGBE_MODE_KX_1000:
++ return XGBE_MODE_KR;
++ case XGBE_MODE_KR:
++ default:
++ return XGBE_MODE_KX_1000;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ return xgbe_phy_switch_bp_mode(pdata);
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ return xgbe_phy_switch_bp_2500_mode(pdata);
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_NBASE_T:
++ case XGBE_PORT_MODE_10GBASE_T:
++ return xgbe_phy_switch_baset_mode(pdata);
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_10GBASE_R:
++ case XGBE_PORT_MODE_SFP:
++ /* No switching, so just return current mode */
++ return xgbe_phy_cur_mode(pdata);
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ return XGBE_MODE_X;
++ case SPEED_10000:
++ return XGBE_MODE_KR;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ return XGBE_MODE_SGMII_100;
++ case SPEED_1000:
++ return XGBE_MODE_SGMII_1000;
++ case SPEED_10000:
++ return XGBE_MODE_KR;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ return XGBE_MODE_SGMII_100;
++ case SPEED_1000:
++ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
++ return XGBE_MODE_SGMII_1000;
++ else
++ return XGBE_MODE_X;
++ case SPEED_10000:
++ case SPEED_UNKNOWN:
++ return XGBE_MODE_SFI;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_bp_2500_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_2500:
++ return XGBE_MODE_KX_2500;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_bp_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ return XGBE_MODE_KX_1000;
++ case SPEED_10000:
++ return XGBE_MODE_KR;
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata,
++ int speed)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ return xgbe_phy_get_bp_mode(speed);
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ return xgbe_phy_get_bp_2500_mode(speed);
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_NBASE_T:
++ case XGBE_PORT_MODE_10GBASE_T:
++ return xgbe_phy_get_baset_mode(phy_data, speed);
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_10GBASE_R:
++ return xgbe_phy_get_basex_mode(phy_data, speed);
++ case XGBE_PORT_MODE_SFP:
++ return xgbe_phy_get_sfp_mode(phy_data, speed);
++ default:
++ return XGBE_MODE_UNKNOWN;
++ }
++}
++
++static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ xgbe_phy_kx_1000_mode(pdata);
++ break;
++ case XGBE_MODE_KX_2500:
++ xgbe_phy_kx_2500_mode(pdata);
++ break;
++ case XGBE_MODE_KR:
++ xgbe_phy_kr_mode(pdata);
++ break;
++ case XGBE_MODE_SGMII_100:
++ xgbe_phy_sgmii_100_mode(pdata);
++ break;
++ case XGBE_MODE_SGMII_1000:
++ xgbe_phy_sgmii_1000_mode(pdata);
++ break;
++ case XGBE_MODE_X:
++ xgbe_phy_x_mode(pdata);
++ break;
++ case XGBE_MODE_SFI:
++ xgbe_phy_sfi_mode(pdata);
++ break;
++ default:
++ break;
++ }
++}
++
++static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode, u32 advert)
++{
++ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
++ if (pdata->phy.advertising & advert)
++ return true;
++ } else {
++ enum xgbe_mode cur_mode;
++
++ cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed);
++ if (cur_mode == mode)
++ return true;
++ }
++
++ return false;
++}
++
++static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_X:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case XGBE_MODE_KR:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_SGMII_100:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_100baseT_Full);
++ case XGBE_MODE_SGMII_1000:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case XGBE_MODE_KR:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (mode) {
++ case XGBE_MODE_X:
++ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
++ return false;
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case XGBE_MODE_SGMII_100:
++ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
++ return false;
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_100baseT_Full);
++ case XGBE_MODE_SGMII_1000:
++ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
++ return false;
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseT_Full);
++ case XGBE_MODE_SFI:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseT_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_2500:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_2500baseX_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata,
++ enum xgbe_mode mode)
++{
++ switch (mode) {
++ case XGBE_MODE_KX_1000:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_1000baseKX_Full);
++ case XGBE_MODE_KR:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_10000baseKR_Full);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ return xgbe_phy_use_bp_mode(pdata, mode);
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ return xgbe_phy_use_bp_2500_mode(pdata, mode);
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_NBASE_T:
++ case XGBE_PORT_MODE_10GBASE_T:
++ return xgbe_phy_use_baset_mode(pdata, mode);
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_10GBASE_R:
++ return xgbe_phy_use_basex_mode(pdata, mode);
++ case XGBE_PORT_MODE_SFP:
++ return xgbe_phy_use_sfp_mode(pdata, mode);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X);
++ case SPEED_10000:
++ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ case SPEED_1000:
++ return true;
++ case SPEED_10000:
++ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data,
++ int speed)
++{
++ switch (speed) {
++ case SPEED_100:
++ return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000);
++ case SPEED_1000:
++ return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) ||
++ (phy_data->sfp_speed == XGBE_SFP_SPEED_1000));
++ case SPEED_10000:
++ return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000);
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed_bp_2500_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_2500:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed_bp_mode(int speed)
++{
++ switch (speed) {
++ case SPEED_1000:
++ case SPEED_10000:
++ return true;
++ default:
++ return false;
++ }
++}
++
++static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ return xgbe_phy_valid_speed_bp_mode(speed);
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ return xgbe_phy_valid_speed_bp_2500_mode(speed);
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_NBASE_T:
++ case XGBE_PORT_MODE_10GBASE_T:
++ return xgbe_phy_valid_speed_baset_mode(phy_data, speed);
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_10GBASE_R:
++ return xgbe_phy_valid_speed_basex_mode(phy_data, speed);
++ case XGBE_PORT_MODE_SFP:
++ return xgbe_phy_valid_speed_sfp_mode(phy_data, speed);
++ default:
++ return false;
++ }
++}
++
++static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++ int ret;
++
++ *an_restart = 0;
++
++ if (phy_data->port_mode == XGBE_PORT_MODE_SFP) {
++ /* Check SFP signals */
++ xgbe_phy_sfp_detect(pdata);
++
++ if (phy_data->sfp_changed) {
++ *an_restart = 1;
++ return 0;
++ }
++
++ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
++ return 0;
++ }
++
++ if (phy_data->phydev) {
++ /* Check external PHY */
++ ret = phy_read_status(phy_data->phydev);
++ if (ret < 0)
++ return 0;
++
++ if ((pdata->phy.autoneg == AUTONEG_ENABLE) &&
++ !phy_aneg_done(phy_data->phydev))
++ return 0;
++
++ if (!phy_data->phydev->link)
++ return 0;
++ }
++
++ /* Link status is latched low, so read once to clear
++ * and then read again to get current state
++ */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
++ if (reg & MDIO_STAT1_LSTATUS)
++ return 1;
++
++ /* No link, attempt a receiver reset cycle */
++ if (phy_data->rrc_count++) {
++ phy_data->rrc_count = 0;
++ xgbe_phy_rrc(pdata);
++ }
++
++ return 0;
++}
++
++static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ reg = XP_IOREAD(pdata, XP_PROP_3);
++
++ phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
++ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
++
++ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
++
++ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_RX_LOS);
++ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_TX_FAULT);
++ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_MOD_ABS);
++ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
++ GPIO_RATE_SELECT);
++
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "SFP: gpio_address=%#x\n",
++ phy_data->sfp_gpio_address);
++ dev_dbg(pdata->dev, "SFP: gpio_mask=%#x\n",
++ phy_data->sfp_gpio_mask);
++ dev_dbg(pdata->dev, "SFP: gpio_rx_los=%u\n",
++ phy_data->sfp_gpio_rx_los);
++ dev_dbg(pdata->dev, "SFP: gpio_tx_fault=%u\n",
++ phy_data->sfp_gpio_tx_fault);
++ dev_dbg(pdata->dev, "SFP: gpio_mod_absent=%u\n",
++ phy_data->sfp_gpio_mod_absent);
++ dev_dbg(pdata->dev, "SFP: gpio_rate_select=%u\n",
++ phy_data->sfp_gpio_rate_select);
++ }
++}
++
++static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg, mux_addr_hi, mux_addr_lo;
++
++ reg = XP_IOREAD(pdata, XP_PROP_4);
++
++ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
++ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
++ if (mux_addr_lo == XGBE_SFP_DIRECT)
++ return;
++
++ phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
++ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
++ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
++
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
++ phy_data->sfp_mux_address);
++ dev_dbg(pdata->dev, "SFP: mux_channel=%u\n",
++ phy_data->sfp_mux_channel);
++ }
++}
++
++static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata)
++{
++ xgbe_phy_sfp_comm_setup(pdata);
++ xgbe_phy_sfp_gpio_setup(pdata);
++}
++
++static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int ret;
++
++ ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio);
++ if (ret)
++ return ret;
++
++ ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio);
++
++ return ret;
++}
++
++static int xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ u8 gpio_reg, gpio_ports[2], gpio_data[3];
++ int ret;
++
++ /* Read the output port registers */
++ gpio_reg = 2;
++ ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr,
++ &gpio_reg, sizeof(gpio_reg),
++ gpio_ports, sizeof(gpio_ports));
++ if (ret)
++ return ret;
++
++ /* Prepare to write the GPIO data */
++ gpio_data[0] = 2;
++ gpio_data[1] = gpio_ports[0];
++ gpio_data[2] = gpio_ports[1];
++
++ /* Set the GPIO pin */
++ if (phy_data->mdio_reset_gpio < 8)
++ gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8));
++ else
++ gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8));
++
++ /* Write the output port registers */
++ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
++ gpio_data, sizeof(gpio_data));
++ if (ret)
++ return ret;
++
++ /* Clear the GPIO pin */
++ if (phy_data->mdio_reset_gpio < 8)
++ gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
++ else
++ gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8));
++
++ /* Write the output port registers */
++ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr,
++ gpio_data, sizeof(gpio_data));
++
++ return ret;
++}
++
++static int xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
++ return 0;
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret)
++ return ret;
++
++ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO)
++ ret = xgbe_phy_i2c_mdio_reset(pdata);
++ else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO)
++ ret = xgbe_phy_int_mdio_reset(pdata);
++
++ xgbe_phy_put_comm_ownership(pdata);
++
++ return ret;
++}
++
++static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
++{
++ if (!phy_data->redrv)
++ return false;
++
++ if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX)
++ return true;
++
++ switch (phy_data->redrv_model) {
++ case XGBE_PHY_REDRV_MODEL_4223:
++ if (phy_data->redrv_lane > 3)
++ return true;
++ break;
++ case XGBE_PHY_REDRV_MODEL_4227:
++ if (phy_data->redrv_lane > 1)
++ return true;
++ break;
++ default:
++ return true;
++ }
++
++ return false;
++}
++
++static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ unsigned int reg;
++
++ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
++ return 0;
++
++ reg = XP_IOREAD(pdata, XP_PROP_3);
++ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
++ switch (phy_data->mdio_reset) {
++ case XGBE_MDIO_RESET_NONE:
++ case XGBE_MDIO_RESET_I2C_GPIO:
++ case XGBE_MDIO_RESET_INT_GPIO:
++ break;
++ default:
++ dev_err(pdata->dev, "unsupported MDIO reset (%#x)\n",
++ phy_data->mdio_reset);
++ return -EINVAL;
++ }
++
++ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
++ phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
++ XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_I2C_ADDR);
++ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_I2C_GPIO);
++ } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
++ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ MDIO_RESET_INT_GPIO);
++ }
++
++ return 0;
++}
++
++static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)
++ return false;
++ break;
++ case XGBE_PORT_MODE_1000BASE_T:
++ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000))
++ return false;
++ break;
++ case XGBE_PORT_MODE_1000BASE_X:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ return false;
++ break;
++ case XGBE_PORT_MODE_NBASE_T:
++ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500))
++ return false;
++ break;
++ case XGBE_PORT_MODE_10GBASE_T:
++ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ case XGBE_PORT_MODE_10GBASE_R:
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
++ return false;
++ break;
++ case XGBE_PORT_MODE_SFP:
++ if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) ||
++ (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000))
++ return false;
++ break;
++ default:
++ break;
++ }
++
++ return true;
++}
++
++static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_BACKPLANE:
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE)
++ return false;
++ break;
++ case XGBE_PORT_MODE_1000BASE_T:
++ case XGBE_PORT_MODE_1000BASE_X:
++ case XGBE_PORT_MODE_NBASE_T:
++ case XGBE_PORT_MODE_10GBASE_T:
++ case XGBE_PORT_MODE_10GBASE_R:
++ if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO)
++ return false;
++ break;
++ case XGBE_PORT_MODE_SFP:
++ if (phy_data->conn_type == XGBE_CONN_TYPE_SFP)
++ return false;
++ break;
++ default:
++ break;
++ }
++
++ return true;
++}
++
++static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
++{
++ unsigned int reg;
++
++ reg = XP_IOREAD(pdata, XP_PROP_0);
++ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
++ return false;
++ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
++ return false;
++
++ return true;
++}
++
++static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* If we have an external PHY, free it */
++ xgbe_phy_free_phy_device(pdata);
++
++ /* Reset SFP data */
++ xgbe_phy_sfp_reset(phy_data);
++ xgbe_phy_sfp_mod_absent(pdata);
++
++ /* Power off the PHY */
++ xgbe_phy_power_off(pdata);
++
++ /* Stop the I2C controller */
++ pdata->i2c_if.i2c_stop(pdata);
++}
++
++static int xgbe_phy_start(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ int ret;
++
++ /* Start the I2C controller */
++ ret = pdata->i2c_if.i2c_start(pdata);
++ if (ret)
++ return ret;
++
++ /* Start in highest supported mode */
++ xgbe_phy_set_mode(pdata, phy_data->start_mode);
++
++ /* After starting the I2C controller, we can check for an SFP */
++ switch (phy_data->port_mode) {
++ case XGBE_PORT_MODE_SFP:
++ xgbe_phy_sfp_detect(pdata);
++ break;
++ default:
++ break;
++ }
++
++ /* If we have an external PHY, start it */
++ ret = xgbe_phy_find_phy_device(pdata);
++ if (ret)
++ goto err_i2c;
++
++ return 0;
++
++err_i2c:
++ pdata->i2c_if.i2c_stop(pdata);
++
++ return ret;
++}
++
++static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ enum xgbe_mode cur_mode;
++ int ret;
++
++ /* Reset by power cycling the PHY */
++ cur_mode = phy_data->cur_mode;
++ xgbe_phy_power_off(pdata);
++ xgbe_phy_set_mode(pdata, cur_mode);
++
++ if (!phy_data->phydev)
++ return 0;
++
++ /* Reset the external PHY */
++ ret = xgbe_phy_mdio_reset(pdata);
++ if (ret)
++ return ret;
++
++ return phy_init_hw(phy_data->phydev);
++}
++
++static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ /* Unregister for driving external PHYs */
++ mdiobus_unregister(phy_data->mii);
++}
++
++static int xgbe_phy_init(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data;
++ struct mii_bus *mii;
++ unsigned int reg;
++ int ret;
++
++ /* Check if enabled */
++ if (!xgbe_phy_port_enabled(pdata)) {
++ dev_info(pdata->dev, "device is not enabled\n");
++ return -ENODEV;
++ }
++
++ /* Initialize the I2C controller */
++ ret = pdata->i2c_if.i2c_init(pdata);
++ if (ret)
++ return ret;
++
++ phy_data = devm_kzalloc(pdata->dev, sizeof(*phy_data), GFP_KERNEL);
++ if (!phy_data)
++ return -ENOMEM;
++ pdata->phy_data = phy_data;
++
++ reg = XP_IOREAD(pdata, XP_PROP_0);
++ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
++ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
++ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
++ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
++ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
++ dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
++ dev_dbg(pdata->dev, "port speeds=%#x\n", phy_data->port_speeds);
++ dev_dbg(pdata->dev, "conn type=%u\n", phy_data->conn_type);
++ dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
++ }
++
++ reg = XP_IOREAD(pdata, XP_PROP_4);
++ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
++ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
++ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
++ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
++ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
++ if (phy_data->redrv && netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "redrv present\n");
++ dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
++ dev_dbg(pdata->dev, "redrv addr=%#x\n", phy_data->redrv_addr);
++ dev_dbg(pdata->dev, "redrv lane=%u\n", phy_data->redrv_lane);
++ dev_dbg(pdata->dev, "redrv model=%u\n", phy_data->redrv_model);
++ }
++
++ /* Validate the connection requested */
++ if (xgbe_phy_conn_type_mismatch(pdata)) {
++ dev_err(pdata->dev, "phy mode/connection mismatch (%#x/%#x)\n",
++ phy_data->port_mode, phy_data->conn_type);
++ return -EINVAL;
++ }
++
++ /* Validate the mode requested */
++ if (xgbe_phy_port_mode_mismatch(pdata)) {
++ dev_err(pdata->dev, "phy mode/speed mismatch (%#x/%#x)\n",
++ phy_data->port_mode, phy_data->port_speeds);
++ return -EINVAL;
++ }
++
++ /* Check for and validate MDIO reset support */
++ ret = xgbe_phy_mdio_reset_setup(pdata);
++ if (ret)
++ return ret;
++
++ /* Validate the re-driver information */
++ if (xgbe_phy_redrv_error(phy_data)) {
++ dev_err(pdata->dev, "phy re-driver settings error\n");
++ return -EINVAL;
++ }
++ pdata->kr_redrv = phy_data->redrv;
++
++ /* Indicate current mode is unknown */
++ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
++
++ /* Initialize supported features */
++ pdata->phy.supported = 0;
++
++ switch (phy_data->port_mode) {
++ /* Backplane support */
++ case XGBE_PORT_MODE_BACKPLANE:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_Backplane;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
++ phy_data->start_mode = XGBE_MODE_KX_1000;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |=
++ SUPPORTED_10000baseR_FEC;
++ phy_data->start_mode = XGBE_MODE_KR;
++ }
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
++ break;
++ case XGBE_PORT_MODE_BACKPLANE_2500:
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_Backplane;
++ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ phy_data->start_mode = XGBE_MODE_KX_2500;
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
++ break;
++
++ /* MDIO 1GBase-T support */
++ case XGBE_PORT_MODE_1000BASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_1000;
++ }
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
++ break;
++
++ /* MDIO Base-X support */
++ case XGBE_PORT_MODE_1000BASE_X:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_X;
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
++ break;
++
++ /* MDIO NBase-T support */
++ case XGBE_PORT_MODE_NBASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
++ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ phy_data->start_mode = XGBE_MODE_KX_2500;
++ }
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
++ break;
++
++ /* 10GBase-T support */
++ case XGBE_PORT_MODE_10GBASE_T:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_KR;
++ }
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
++ break;
++
++ /* 10GBase-R support */
++ case XGBE_PORT_MODE_10GBASE_R:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
++ phy_data->start_mode = XGBE_MODE_SFI;
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
++ break;
++
++ /* SFP support */
++ case XGBE_PORT_MODE_SFP:
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_100;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SGMII_1000;
++ }
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ phy_data->start_mode = XGBE_MODE_SFI;
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ pdata->phy.supported |=
++ SUPPORTED_10000baseR_FEC;
++ }
++
++ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
++
++ xgbe_phy_sfp_setup(pdata);
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev, "phy supported=%#x\n",
++ pdata->phy.supported);
++
++ if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) &&
++ (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) {
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
++ phy_data->phydev_mode);
++ if (ret) {
++ dev_err(pdata->dev,
++ "mdio port/clause not compatible (%d/%u)\n",
++ phy_data->mdio_addr, phy_data->phydev_mode);
++ return -EINVAL;
++ }
++ }
++
++ if (phy_data->redrv && !phy_data->redrv_if) {
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
++ XGBE_MDIO_MODE_CL22);
++ if (ret) {
++ dev_err(pdata->dev,
++ "redriver mdio port not compatible (%u)\n",
++ phy_data->redrv_addr);
++ return -EINVAL;
++ }
++ }
++
++ /* Register for driving external PHYs */
++ mii = devm_mdiobus_alloc(pdata->dev);
++ if (!mii) {
++ dev_err(pdata->dev, "mdiobus_alloc failed\n");
++ return -ENOMEM;
++ }
++
++ mii->priv = pdata;
++ mii->name = "amd-xgbe-mii";
++ mii->read = xgbe_phy_mii_read;
++ mii->write = xgbe_phy_mii_write;
++ mii->parent = pdata->dev;
++ mii->phy_mask = ~0;
++ snprintf(mii->id, sizeof(mii->id), "%s", dev_name(pdata->dev));
++ ret = mdiobus_register(mii);
++ if (ret) {
++ dev_err(pdata->dev, "mdiobus_register failed\n");
++ return ret;
++ }
++ phy_data->mii = mii;
++
++ return 0;
++}
++
++void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
++{
++ struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
++
++ phy_impl->init = xgbe_phy_init;
++ phy_impl->exit = xgbe_phy_exit;
++
++ phy_impl->reset = xgbe_phy_reset;
++ phy_impl->start = xgbe_phy_start;
++ phy_impl->stop = xgbe_phy_stop;
++
++ phy_impl->link_status = xgbe_phy_link_status;
++
++ phy_impl->valid_speed = xgbe_phy_valid_speed;
++
++ phy_impl->use_mode = xgbe_phy_use_mode;
++ phy_impl->set_mode = xgbe_phy_set_mode;
++ phy_impl->get_mode = xgbe_phy_get_mode;
++ phy_impl->switch_mode = xgbe_phy_switch_mode;
++ phy_impl->cur_mode = xgbe_phy_cur_mode;
++
++ phy_impl->an_mode = xgbe_phy_an_mode;
++
++ phy_impl->an_config = xgbe_phy_an_config;
++
++ phy_impl->an_advertising = xgbe_phy_an_advertising;
++
++ phy_impl->an_outcome = xgbe_phy_an_outcome;
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+new file mode 100644
+index 0000000..84d4c51
+--- /dev/null
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+@@ -0,0 +1,642 @@
++/*
++ * AMD 10Gb Ethernet driver
++ *
++ * This file is available to you under your choice of the following two
++ * licenses:
++ *
++ * License 1: GPLv2
++ *
++ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
++ *
++ * This file is free software; you may copy, redistribute and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This file is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * License 2: Modified BSD
++ *
++ * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions are met:
++ * * Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * * Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ * * Neither the name of Advanced Micro Devices, Inc. nor the
++ * names of its contributors may be used to endorse or promote products
++ * derived from this software without specific prior written permission.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
++ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
++ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
++ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
++ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
++ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ * This file incorporates work covered by the following copyright and
++ * permission notice:
++ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
++ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
++ * Inc. unless otherwise expressly agreed to in writing between Synopsys
++ * and you.
++ *
++ * The Software IS NOT an item of Licensed Software or Licensed Product
++ * under any End User Software License Agreement or Agreement for Licensed
++ * Product with Synopsys or any supplement thereto. Permission is hereby
++ * granted, free of charge, to any person obtaining a copy of this software
++ * annotated with this license and the Software, to deal in the Software
++ * without restriction, including without limitation the rights to use,
++ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
++ * of the Software, and to permit persons to whom the Software is furnished
++ * to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included
++ * in all copies or substantial portions of the Software.
++ *
++ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
++ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
++ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
++ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
++ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
++ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
++ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
++ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
++ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
++ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
++ * THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/module.h>
++#include <linux/device.h>
++#include <linux/platform_device.h>
++#include <linux/spinlock.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/io.h>
++#include <linux/of.h>
++#include <linux/of_net.h>
++#include <linux/of_address.h>
++#include <linux/of_platform.h>
++#include <linux/of_device.h>
++#include <linux/clk.h>
++#include <linux/property.h>
++#include <linux/acpi.h>
++#include <linux/mdio.h>
++
++#include "xgbe.h"
++#include "xgbe-common.h"
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id xgbe_acpi_match[];
++
++static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
++{
++ const struct acpi_device_id *id;
++
++ id = acpi_match_device(xgbe_acpi_match, pdata->dev);
++
++ return id ? (struct xgbe_version_data *)id->driver_data : NULL;
++}
++
++static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
++{
++ struct device *dev = pdata->dev;
++ u32 property;
++ int ret;
++
++ /* Obtain the system clock setting */
++ ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
++ if (ret) {
++ dev_err(dev, "unable to obtain %s property\n",
++ XGBE_ACPI_DMA_FREQ);
++ return ret;
++ }
++ pdata->sysclk_rate = property;
++
++ /* Obtain the PTP clock setting */
++ ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
++ if (ret) {
++ dev_err(dev, "unable to obtain %s property\n",
++ XGBE_ACPI_PTP_FREQ);
++ return ret;
++ }
++ pdata->ptpclk_rate = property;
++
++ return 0;
++}
++#else /* CONFIG_ACPI */
++static struct xgbe_version_data *xgbe_acpi_vdata(struct xgbe_prv_data *pdata)
++{
++ return NULL;
++}
++
++static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
++{
++ return -EINVAL;
++}
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_OF
++static const struct of_device_id xgbe_of_match[];
++
++static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
++{
++ const struct of_device_id *id;
++
++ id = of_match_device(xgbe_of_match, pdata->dev);
++
++ return id ? (struct xgbe_version_data *)id->data : NULL;
++}
++
++static int xgbe_of_support(struct xgbe_prv_data *pdata)
++{
++ struct device *dev = pdata->dev;
++
++ /* Obtain the system clock setting */
++ pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
++ if (IS_ERR(pdata->sysclk)) {
++ dev_err(dev, "dma devm_clk_get failed\n");
++ return PTR_ERR(pdata->sysclk);
++ }
++ pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
++
++ /* Obtain the PTP clock setting */
++ pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
++ if (IS_ERR(pdata->ptpclk)) {
++ dev_err(dev, "ptp devm_clk_get failed\n");
++ return PTR_ERR(pdata->ptpclk);
++ }
++ pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
++
++ return 0;
++}
++
++static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
++{
++ struct device *dev = pdata->dev;
++ struct device_node *phy_node;
++ struct platform_device *phy_pdev;
++
++ phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
++ if (phy_node) {
++ /* Old style device tree:
++ * The XGBE and PHY resources are separate
++ */
++ phy_pdev = of_find_device_by_node(phy_node);
++ of_node_put(phy_node);
++ } else {
++ /* New style device tree:
++ * The XGBE and PHY resources are grouped together with
++ * the PHY resources listed last
++ */
++ get_device(dev);
++ phy_pdev = pdata->platdev;
++ }
++
++ return phy_pdev;
++}
++#else /* CONFIG_OF */
++static struct xgbe_version_data *xgbe_of_vdata(struct xgbe_prv_data *pdata)
++{
++ return NULL;
++}
++
++static int xgbe_of_support(struct xgbe_prv_data *pdata)
++{
++ return -EINVAL;
++}
++
++static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
++{
++ return NULL;
++}
++#endif /* CONFIG_OF */
++
++static unsigned int xgbe_resource_count(struct platform_device *pdev,
++ unsigned int type)
++{
++ unsigned int count;
++ int i;
++
++ for (i = 0, count = 0; i < pdev->num_resources; i++) {
++ struct resource *res = &pdev->resource[i];
++
++ if (type == resource_type(res))
++ count++;
++ }
++
++ return count;
++}
++
++static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
++{
++ struct platform_device *phy_pdev;
++
++ if (pdata->use_acpi) {
++ get_device(pdata->dev);
++ phy_pdev = pdata->platdev;
++ } else {
++ phy_pdev = xgbe_of_get_phy_pdev(pdata);
++ }
++
++ return phy_pdev;
++}
++
++static struct xgbe_version_data *xgbe_get_vdata(struct xgbe_prv_data *pdata)
++{
++ return pdata->use_acpi ? xgbe_acpi_vdata(pdata)
++ : xgbe_of_vdata(pdata);
++}
++
++static int xgbe_platform_probe(struct platform_device *pdev)
++{
++ struct xgbe_prv_data *pdata;
++ struct device *dev = &pdev->dev;
++ struct platform_device *phy_pdev;
++ struct resource *res;
++ const char *phy_mode;
++ unsigned int phy_memnum, phy_irqnum;
++ unsigned int dma_irqnum, dma_irqend;
++ enum dev_dma_attr attr;
++ int ret;
++
++ pdata = xgbe_alloc_pdata(dev);
++ if (IS_ERR(pdata)) {
++ ret = PTR_ERR(pdata);
++ goto err_alloc;
++ }
++
++ pdata->platdev = pdev;
++ pdata->adev = ACPI_COMPANION(dev);
++ platform_set_drvdata(pdev, pdata);
++
++ /* Check if we should use ACPI or DT */
++ pdata->use_acpi = dev->of_node ? 0 : 1;
++
++ /* Get the version data */
++ pdata->vdata = xgbe_get_vdata(pdata);
++
++ phy_pdev = xgbe_get_phy_pdev(pdata);
++ if (!phy_pdev) {
++ dev_err(dev, "unable to obtain phy device\n");
++ ret = -EINVAL;
++ goto err_phydev;
++ }
++ pdata->phy_platdev = phy_pdev;
++ pdata->phy_dev = &phy_pdev->dev;
++
++ if (pdev == phy_pdev) {
++ /* New style device tree or ACPI:
++ * The XGBE and PHY resources are grouped together with
++ * the PHY resources listed last
++ */
++ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
++ phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
++ dma_irqnum = 1;
++ dma_irqend = phy_irqnum;
++ } else {
++ /* Old style device tree:
++ * The XGBE and PHY resources are separate
++ */
++ phy_memnum = 0;
++ phy_irqnum = 0;
++ dma_irqnum = 1;
++ dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ);
++ }
++
++ /* Obtain the mmio areas for the device */
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pdata->xgmac_regs)) {
++ dev_err(dev, "xgmac ioremap failed\n");
++ ret = PTR_ERR(pdata->xgmac_regs);
++ goto err_io;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pdata->xpcs_regs)) {
++ dev_err(dev, "xpcs ioremap failed\n");
++ ret = PTR_ERR(pdata->xpcs_regs);
++ goto err_io;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
++
++ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
++ pdata->rxtx_regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pdata->rxtx_regs)) {
++ dev_err(dev, "rxtx ioremap failed\n");
++ ret = PTR_ERR(pdata->rxtx_regs);
++ goto err_io;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
++
++ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
++ pdata->sir0_regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pdata->sir0_regs)) {
++ dev_err(dev, "sir0 ioremap failed\n");
++ ret = PTR_ERR(pdata->sir0_regs);
++ goto err_io;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
++
++ res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
++ pdata->sir1_regs = devm_ioremap_resource(dev, res);
++ if (IS_ERR(pdata->sir1_regs)) {
++ dev_err(dev, "sir1 ioremap failed\n");
++ ret = PTR_ERR(pdata->sir1_regs);
++ goto err_io;
++ }
++ if (netif_msg_probe(pdata))
++ dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
++
++ /* Retrieve the MAC address */
++ ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
++ pdata->mac_addr,
++ sizeof(pdata->mac_addr));
++ if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
++ dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
++ if (!ret)
++ ret = -EINVAL;
++ goto err_io;
++ }
++
++ /* Retrieve the PHY mode - it must be "xgmii" */
++ ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
++ &phy_mode);
++ if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
++ dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
++ if (!ret)
++ ret = -EINVAL;
++ goto err_io;
++ }
++ pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
++
++ /* Check for per channel interrupt support */
++ if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY)) {
++ pdata->per_channel_irq = 1;
++ pdata->channel_irq_mode = XGBE_IRQ_MODE_EDGE;
++ }
++
++ /* Obtain device settings unique to ACPI/OF */
++ if (pdata->use_acpi)
++ ret = xgbe_acpi_support(pdata);
++ else
++ ret = xgbe_of_support(pdata);
++ if (ret)
++ goto err_io;
++
++ /* Set the DMA coherency values */
++ attr = device_get_dma_attr(dev);
++ if (attr == DEV_DMA_NOT_SUPPORTED) {
++ dev_err(dev, "DMA is not supported");
++ ret = -ENODEV;
++ goto err_io;
++ }
++ pdata->coherent = (attr == DEV_DMA_COHERENT);
++ if (pdata->coherent) {
++ pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
++ pdata->arcache = XGBE_DMA_OS_ARCACHE;
++ pdata->awcache = XGBE_DMA_OS_AWCACHE;
++ } else {
++ pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
++ pdata->arcache = XGBE_DMA_SYS_ARCACHE;
++ pdata->awcache = XGBE_DMA_SYS_AWCACHE;
++ }
++
++ /* Set the maximum fifo amounts */
++ pdata->tx_max_fifo_size = pdata->vdata->tx_max_fifo_size;
++ pdata->rx_max_fifo_size = pdata->vdata->rx_max_fifo_size;
++
++ /* Set the hardware channel and queue counts */
++ xgbe_set_counts(pdata);
++
++ /* Always have XGMAC and XPCS (auto-negotiation) interrupts */
++ pdata->irq_count = 2;
++
++ /* Get the device interrupt */
++ ret = platform_get_irq(pdev, 0);
++ if (ret < 0) {
++ dev_err(dev, "platform_get_irq 0 failed\n");
++ goto err_io;
++ }
++ pdata->dev_irq = ret;
++
++ /* Get the per channel DMA interrupts */
++ if (pdata->per_channel_irq) {
++ unsigned int i, max = ARRAY_SIZE(pdata->channel_irq);
++
++ for (i = 0; (i < max) && (dma_irqnum < dma_irqend); i++) {
++ ret = platform_get_irq(pdata->platdev, dma_irqnum++);
++ if (ret < 0) {
++ netdev_err(pdata->netdev,
++ "platform_get_irq %u failed\n",
++ dma_irqnum - 1);
++ goto err_io;
++ }
++
++ pdata->channel_irq[i] = ret;
++ }
++
++ pdata->channel_irq_count = max;
++
++ pdata->irq_count += max;
++ }
++
++ /* Get the auto-negotiation interrupt */
++ ret = platform_get_irq(phy_pdev, phy_irqnum++);
++ if (ret < 0) {
++ dev_err(dev, "platform_get_irq phy 0 failed\n");
++ goto err_io;
++ }
++ pdata->an_irq = ret;
++
++ /* Configure the netdev resource */
++ ret = xgbe_config_netdev(pdata);
++ if (ret)
++ goto err_io;
++
++ netdev_notice(pdata->netdev, "net device enabled\n");
++
++ return 0;
++
++err_io:
++ platform_device_put(phy_pdev);
++
++err_phydev:
++ xgbe_free_pdata(pdata);
++
++err_alloc:
++ dev_notice(dev, "net device not enabled\n");
++
++ return ret;
++}
++
++static int xgbe_platform_remove(struct platform_device *pdev)
++{
++ struct xgbe_prv_data *pdata = platform_get_drvdata(pdev);
++
++ xgbe_deconfig_netdev(pdata);
++
++ platform_device_put(pdata->phy_platdev);
++
++ xgbe_free_pdata(pdata);
++
++ return 0;
++}
++
++#ifdef CONFIG_PM_SLEEP
++static int xgbe_platform_suspend(struct device *dev)
++{
++ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
++ struct net_device *netdev = pdata->netdev;
++ int ret = 0;
++
++ DBGPR("-->xgbe_suspend\n");
++
++ if (netif_running(netdev))
++ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
++
++ pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
++ pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
++
++ DBGPR("<--xgbe_suspend\n");
++
++ return ret;
++}
++
++static int xgbe_platform_resume(struct device *dev)
++{
++ struct xgbe_prv_data *pdata = dev_get_drvdata(dev);
++ struct net_device *netdev = pdata->netdev;
++ int ret = 0;
++
++ DBGPR("-->xgbe_resume\n");
++
++ pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
++ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
++
++ if (netif_running(netdev)) {
++ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
++
++ /* Schedule a restart in case the link or phy state changed
++ * while we were powered down.
++ */
++ schedule_work(&pdata->restart_work);
++ }
++
++ DBGPR("<--xgbe_resume\n");
++
++ return ret;
++}
++#endif /* CONFIG_PM_SLEEP */
++
++static const struct xgbe_version_data xgbe_v1 = {
++ .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v1,
++ .xpcs_access = XGBE_XPCS_ACCESS_V1,
++ .tx_max_fifo_size = 81920,
++ .rx_max_fifo_size = 81920,
++ .tx_tstamp_workaround = 1,
++};
++
++#ifdef CONFIG_ACPI
++static const struct acpi_device_id xgbe_acpi_match[] = {
++ { .id = "AMDI8001",
++ .driver_data = (kernel_ulong_t)&xgbe_v1 },
++ {},
++};
++
++MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
++#endif
++
++#ifdef CONFIG_OF
++static const struct of_device_id xgbe_of_match[] = {
++ { .compatible = "amd,xgbe-seattle-v1a",
++ .data = &xgbe_v1 },
++ {},
++};
++
++MODULE_DEVICE_TABLE(of, xgbe_of_match);
++#endif
++
++static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
++ xgbe_platform_suspend, xgbe_platform_resume);
++
++static struct platform_driver xgbe_driver = {
++ .driver = {
++ .name = XGBE_DRV_NAME,
++#ifdef CONFIG_ACPI
++ .acpi_match_table = xgbe_acpi_match,
++#endif
++#ifdef CONFIG_OF
++ .of_match_table = xgbe_of_match,
++#endif
++ .pm = &xgbe_platform_pm_ops,
++ },
++ .probe = xgbe_platform_probe,
++ .remove = xgbe_platform_remove,
++};
++
++int xgbe_platform_init(void)
++{
++ return platform_driver_register(&xgbe_driver);
++}
++
++void xgbe_platform_exit(void)
++{
++ platform_driver_unregister(&xgbe_driver);
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+old mode 100644
+new mode 100755
+index b03e4f5..a533a6c
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+@@ -122,7 +122,7 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
+-static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
++static u64 xgbe_cc_read(const struct cyclecounter *cc)
+ {
+ struct xgbe_prv_data *pdata = container_of(cc,
+ struct xgbe_prv_data,
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+old mode 100644
+new mode 100755
+index 5dd17dc..0010881
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -127,9 +127,10 @@
+ #include <linux/timecounter.h>
+ #include <linux/net_tstamp.h>
+ #include <net/dcbnl.h>
++#include <linux/completion.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+-#define XGBE_DRV_VERSION "1.0.2"
++#define XGBE_DRV_VERSION "1.0.3"
+ #define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+ /* Descriptor related defines */
+@@ -158,7 +159,8 @@
+
+ #define XGBE_MAX_DMA_CHANNELS 16
+ #define XGBE_MAX_QUEUES 16
+-#define XGBE_DMA_STOP_TIMEOUT 5
++#define XGBE_PRIORITY_QUEUES 8
++#define XGBE_DMA_STOP_TIMEOUT 1
+
+ /* DMA cache settings - Outer sharable, write-back, write-allocate */
+ #define XGBE_DMA_OS_AXDOMAIN 0x2
+@@ -170,6 +172,10 @@
+ #define XGBE_DMA_SYS_ARCACHE 0x0
+ #define XGBE_DMA_SYS_AWCACHE 0x0
+
++/* DMA channel interrupt modes */
++#define XGBE_IRQ_MODE_EDGE 0
++#define XGBE_IRQ_MODE_LEVEL 1
++
+ #define XGBE_DMA_INTERRUPT_MASK 0x31c7
+
+ #define XGMAC_MIN_PACKET 60
+@@ -177,18 +183,19 @@
+ #define XGMAC_MAX_STD_PACKET 1518
+ #define XGMAC_JUMBO_PACKET_MTU 9000
+ #define XGMAC_MAX_JUMBO_PACKET 9018
++#define XGMAC_ETH_PREAMBLE (12 + 8) /* Inter-frame gap + preamble */
++
++#define XGMAC_PFC_DATA_LEN 46
++#define XGMAC_PFC_DELAYS 14000
++
++#define XGMAC_PRIO_QUEUES(_cnt) \
++ min_t(unsigned int, IEEE_8021QAZ_MAX_TCS, (_cnt))
+
+ /* Common property names */
+ #define XGBE_MAC_ADDR_PROPERTY "mac-address"
+ #define XGBE_PHY_MODE_PROPERTY "phy-mode"
+ #define XGBE_DMA_IRQS_PROPERTY "amd,per-channel-interrupt"
+ #define XGBE_SPEEDSET_PROPERTY "amd,speed-set"
+-#define XGBE_BLWC_PROPERTY "amd,serdes-blwc"
+-#define XGBE_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
+-#define XGBE_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
+-#define XGBE_TX_AMP_PROPERTY "amd,serdes-tx-amp"
+-#define XGBE_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
+-#define XGBE_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
+
+ /* Device-tree clock names */
+ #define XGBE_DMA_CLOCK "dma_clk"
+@@ -198,6 +205,20 @@
+ #define XGBE_ACPI_DMA_FREQ "amd,dma-freq"
+ #define XGBE_ACPI_PTP_FREQ "amd,ptp-freq"
+
++/* PCI BAR mapping */
++#define XGBE_XGMAC_BAR 0
++#define XGBE_XPCS_BAR 1
++#define XGBE_MAC_PROP_OFFSET 0x1d000
++#define XGBE_I2C_CTRL_OFFSET 0x1e000
++
++/* PCI MSIx support */
++#define XGBE_MSIX_BASE_COUNT 4
++#define XGBE_MSIX_MIN_COUNT (XGBE_MSIX_BASE_COUNT + 1)
++
++/* PCI clock frequencies */
++#define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
++#define XGBE_V2_PTP_CLOCK_FREQ 125000000 /* 125 MHz */
++
+ /* Timestamp support - values based on 50MHz PTP clock
+ * 50MHz => 20 nsec
+ */
+@@ -208,7 +229,12 @@
+ #define XGMAC_DRIVER_CONTEXT 1
+ #define XGMAC_IOCTL_CONTEXT 2
+
+-#define XGBE_FIFO_MAX 81920
++#define XGMAC_FIFO_MIN_ALLOC 2048
++#define XGMAC_FIFO_UNIT 256
++#define XGMAC_FIFO_ALIGN(_x) \
++ (((_x) + XGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
++#define XGMAC_FIFO_FC_OFF 2048
++#define XGMAC_FIFO_FC_MIN 4096
+
+ #define XGBE_TC_MIN_QUANTUM 10
+
+@@ -233,6 +259,14 @@
+ /* Flow control queue count */
+ #define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
++/* Flow control threshold units */
++#define XGMAC_FLOW_CONTROL_UNIT 512
++#define XGMAC_FLOW_CONTROL_ALIGN(_x) \
++ (((_x) + XGMAC_FLOW_CONTROL_UNIT - 1) & ~(XGMAC_FLOW_CONTROL_UNIT - 1))
++#define XGMAC_FLOW_CONTROL_VALUE(_x) \
++ (((_x) < 1024) ? 0 : ((_x) / XGMAC_FLOW_CONTROL_UNIT) - 2)
++#define XGMAC_FLOW_CONTROL_MAX 33280
++
+ /* Maximum MAC address hash table size (256 bits = 8 bytes) */
+ #define XGBE_MAC_HASH_TABLE_SIZE 8
+
+@@ -244,46 +278,19 @@
+
+ /* Auto-negotiation */
+ #define XGBE_AN_MS_TIMEOUT 500
+-#define XGBE_LINK_TIMEOUT 10
+-
+-#define XGBE_AN_INT_CMPLT 0x01
+-#define XGBE_AN_INC_LINK 0x02
+-#define XGBE_AN_PG_RCV 0x04
+-#define XGBE_AN_INT_MASK 0x07
+-
+-/* Rate-change complete wait/retry count */
+-#define XGBE_RATECHANGE_COUNT 500
+-
+-/* Default SerDes settings */
+-#define XGBE_SPEED_10000_BLWC 0
+-#define XGBE_SPEED_10000_CDR 0x7
+-#define XGBE_SPEED_10000_PLL 0x1
+-#define XGBE_SPEED_10000_PQ 0x12
+-#define XGBE_SPEED_10000_RATE 0x0
+-#define XGBE_SPEED_10000_TXAMP 0xa
+-#define XGBE_SPEED_10000_WORD 0x7
+-#define XGBE_SPEED_10000_DFE_TAP_CONFIG 0x1
+-#define XGBE_SPEED_10000_DFE_TAP_ENABLE 0x7f
+-
+-#define XGBE_SPEED_2500_BLWC 1
+-#define XGBE_SPEED_2500_CDR 0x2
+-#define XGBE_SPEED_2500_PLL 0x0
+-#define XGBE_SPEED_2500_PQ 0xa
+-#define XGBE_SPEED_2500_RATE 0x1
+-#define XGBE_SPEED_2500_TXAMP 0xf
+-#define XGBE_SPEED_2500_WORD 0x1
+-#define XGBE_SPEED_2500_DFE_TAP_CONFIG 0x3
+-#define XGBE_SPEED_2500_DFE_TAP_ENABLE 0x0
+-
+-#define XGBE_SPEED_1000_BLWC 1
+-#define XGBE_SPEED_1000_CDR 0x2
+-#define XGBE_SPEED_1000_PLL 0x0
+-#define XGBE_SPEED_1000_PQ 0xa
+-#define XGBE_SPEED_1000_RATE 0x3
+-#define XGBE_SPEED_1000_TXAMP 0xf
+-#define XGBE_SPEED_1000_WORD 0x1
+-#define XGBE_SPEED_1000_DFE_TAP_CONFIG 0x3
+-#define XGBE_SPEED_1000_DFE_TAP_ENABLE 0x0
++#define XGBE_LINK_TIMEOUT 5
++
++#define XGBE_SGMII_AN_LINK_STATUS BIT(1)
++#define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
++#define XGBE_SGMII_AN_LINK_SPEED_100 0x04
++#define XGBE_SGMII_AN_LINK_SPEED_1000 0x08
++#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4)
++
++/* ECC correctable error notification window (seconds) */
++#define XGBE_ECC_LIMIT 60
++
++/* MDIO port types */
++#define XGMAC_MAX_C22_PORT 3
+
+ struct xgbe_prv_data;
+
+@@ -461,6 +468,7 @@ enum xgbe_state {
+ XGBE_DOWN,
+ XGBE_LINK_INIT,
+ XGBE_LINK_ERR,
++ XGBE_STOPPED,
+ };
+
+ enum xgbe_int {
+@@ -480,6 +488,12 @@ enum xgbe_int_state {
+ XGMAC_INT_STATE_RESTORE,
+ };
+
++enum xgbe_ecc_sec {
++ XGBE_ECC_SEC_TX,
++ XGBE_ECC_SEC_RX,
++ XGBE_ECC_SEC_DESC,
++};
++
+ enum xgbe_speed {
+ XGBE_SPEED_1000 = 0,
+ XGBE_SPEED_2500,
+@@ -487,6 +501,19 @@ enum xgbe_speed {
+ XGBE_SPEEDS,
+ };
+
++enum xgbe_xpcs_access {
++ XGBE_XPCS_ACCESS_V1 = 0,
++ XGBE_XPCS_ACCESS_V2,
++};
++
++enum xgbe_an_mode {
++ XGBE_AN_MODE_CL73 = 0,
++ XGBE_AN_MODE_CL73_REDRV,
++ XGBE_AN_MODE_CL37,
++ XGBE_AN_MODE_CL37_SGMII,
++ XGBE_AN_MODE_NONE,
++};
++
+ enum xgbe_an {
+ XGBE_AN_READY = 0,
+ XGBE_AN_PAGE_RECEIVED,
+@@ -504,8 +531,14 @@ enum xgbe_rx {
+ };
+
+ enum xgbe_mode {
+- XGBE_MODE_KR = 0,
+- XGBE_MODE_KX,
++ XGBE_MODE_KX_1000 = 0,
++ XGBE_MODE_KX_2500,
++ XGBE_MODE_KR,
++ XGBE_MODE_X,
++ XGBE_MODE_SGMII_100,
++ XGBE_MODE_SGMII_1000,
++ XGBE_MODE_SFI,
++ XGBE_MODE_UNKNOWN,
+ };
+
+ enum xgbe_speedset {
+@@ -513,6 +546,12 @@ enum xgbe_speedset {
+ XGBE_SPEEDSET_2500_10000,
+ };
+
++enum xgbe_mdio_mode {
++ XGBE_MDIO_MODE_NONE = 0,
++ XGBE_MDIO_MODE_CL22,
++ XGBE_MDIO_MODE_CL45,
++};
++
+ struct xgbe_phy {
+ u32 supported;
+ u32 advertising;
+@@ -531,6 +570,43 @@ struct xgbe_phy {
+ int rx_pause;
+ };
+
++enum xgbe_i2c_cmd {
++ XGBE_I2C_CMD_READ = 0,
++ XGBE_I2C_CMD_WRITE,
++};
++
++struct xgbe_i2c_op {
++ enum xgbe_i2c_cmd cmd;
++
++ unsigned int target;
++
++ void *buf;
++ unsigned int len;
++};
++
++struct xgbe_i2c_op_state {
++ struct xgbe_i2c_op *op;
++
++ unsigned int tx_len;
++ unsigned char *tx_buf;
++
++ unsigned int rx_len;
++ unsigned char *rx_buf;
++
++ unsigned int tx_abort_source;
++
++ int ret;
++};
++
++struct xgbe_i2c {
++ unsigned int started;
++ unsigned int max_speed_mode;
++ unsigned int rx_fifo_size;
++ unsigned int tx_fifo_size;
++
++ struct xgbe_i2c_op_state op_state;
++};
++
+ struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+@@ -601,9 +677,15 @@ struct xgbe_hw_if {
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+- int (*set_gmii_speed)(struct xgbe_prv_data *);
+- int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+- int (*set_xgmii_speed)(struct xgbe_prv_data *);
++ int (*set_speed)(struct xgbe_prv_data *, int);
++
++ int (*set_ext_mii_mode)(struct xgbe_prv_data *, unsigned int,
++ enum xgbe_mdio_mode);
++ int (*read_ext_mii_regs)(struct xgbe_prv_data *, int, int);
++ int (*write_ext_mii_regs)(struct xgbe_prv_data *, int, int, u16);
++
++ int (*set_gpio)(struct xgbe_prv_data *, unsigned int);
++ int (*clr_gpio)(struct xgbe_prv_data *, unsigned int);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+@@ -682,11 +764,65 @@ struct xgbe_hw_if {
+ int (*disable_rss)(struct xgbe_prv_data *);
+ int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
+ int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
++
++ /* For ECC */
++ void (*disable_ecc_ded)(struct xgbe_prv_data *);
++ void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec);
++};
++
++/* This structure represents implementation specific routines for an
++ * implementation of a PHY. All routines are required unless noted below.
++ * Optional routines:
++ * kr_training_pre, kr_training_post
++ */
++struct xgbe_phy_impl_if {
++ /* Perform Setup/teardown actions */
++ int (*init)(struct xgbe_prv_data *);
++ void (*exit)(struct xgbe_prv_data *);
++
++ /* Perform start/stop specific actions */
++ int (*reset)(struct xgbe_prv_data *);
++ int (*start)(struct xgbe_prv_data *);
++ void (*stop)(struct xgbe_prv_data *);
++
++ /* Return the link status */
++ int (*link_status)(struct xgbe_prv_data *, int *);
++
++ /* Indicate if a particular speed is valid */
++ bool (*valid_speed)(struct xgbe_prv_data *, int);
++
++ /* Check if the specified mode can/should be used */
++ bool (*use_mode)(struct xgbe_prv_data *, enum xgbe_mode);
++ /* Switch the PHY into various modes */
++ void (*set_mode)(struct xgbe_prv_data *, enum xgbe_mode);
++ /* Retrieve mode needed for a specific speed */
++ enum xgbe_mode (*get_mode)(struct xgbe_prv_data *, int);
++ /* Retrieve new/next mode when trying to auto-negotiate */
++ enum xgbe_mode (*switch_mode)(struct xgbe_prv_data *);
++ /* Retrieve current mode */
++ enum xgbe_mode (*cur_mode)(struct xgbe_prv_data *);
++
++ /* Retrieve current auto-negotiation mode */
++ enum xgbe_an_mode (*an_mode)(struct xgbe_prv_data *);
++
++ /* Configure auto-negotiation settings */
++ int (*an_config)(struct xgbe_prv_data *);
++
++ /* Set/override auto-negotiation advertisement settings */
++ unsigned int (*an_advertising)(struct xgbe_prv_data *);
++
++ /* Process results of auto-negotiation */
++ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
++
++ /* Pre/Post KR training enablement support */
++ void (*kr_training_pre)(struct xgbe_prv_data *);
++ void (*kr_training_post)(struct xgbe_prv_data *);
+ };
+
+ struct xgbe_phy_if {
+- /* For initial PHY setup */
+- void (*phy_init)(struct xgbe_prv_data *);
++ /* For PHY setup/teardown */
++ int (*phy_init)(struct xgbe_prv_data *);
++ void (*phy_exit)(struct xgbe_prv_data *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct xgbe_prv_data *);
+@@ -696,6 +832,30 @@ struct xgbe_phy_if {
+ /* For PHY support while device is up */
+ void (*phy_status)(struct xgbe_prv_data *);
+ int (*phy_config_aneg)(struct xgbe_prv_data *);
++
++ /* For PHY settings validation */
++ bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
++
++ /* For single interrupt support */
++ irqreturn_t (*an_isr)(int, struct xgbe_prv_data *);
++
++ /* PHY implementation specific services */
++ struct xgbe_phy_impl_if phy_impl;
++};
++
++struct xgbe_i2c_if {
++ /* For initial I2C setup */
++ int (*i2c_init)(struct xgbe_prv_data *);
++
++ /* For I2C support when setting device up/down */
++ int (*i2c_start)(struct xgbe_prv_data *);
++ void (*i2c_stop)(struct xgbe_prv_data *);
++
++ /* For performing I2C operations */
++ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
++
++ /* For single interrupt support */
++ irqreturn_t (*i2c_isr)(int, struct xgbe_prv_data *);
+ };
+
+ struct xgbe_desc_if {
+@@ -755,11 +915,28 @@ struct xgbe_hw_features {
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+ };
+
++struct xgbe_version_data {
++ void (*init_function_ptrs_phy_impl)(struct xgbe_phy_if *);
++ enum xgbe_xpcs_access xpcs_access;
++ unsigned int mmc_64bit;
++ unsigned int tx_max_fifo_size;
++ unsigned int rx_max_fifo_size;
++ unsigned int tx_tstamp_workaround;
++ unsigned int ecc_support;
++ unsigned int i2c_support;
++};
++
+ struct xgbe_prv_data {
+ struct net_device *netdev;
+- struct platform_device *pdev;
++ struct pci_dev *pcidev;
++ struct platform_device *platdev;
+ struct acpi_device *adev;
+ struct device *dev;
++ struct platform_device *phy_platdev;
++ struct device *phy_dev;
++
++ /* Version related data */
++ struct xgbe_version_data *vdata;
+
+ /* ACPI or DT flag */
+ unsigned int use_acpi;
+@@ -770,12 +947,19 @@ struct xgbe_prv_data {
+ void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
+ void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
+ void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
++ void __iomem *xprop_regs; /* XGBE property registers */
++ void __iomem *xi2c_regs; /* XGBE I2C CSRs */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
++ unsigned int xpcs_window_def_reg;
++ unsigned int xpcs_window_sel_reg;
++ unsigned int xpcs_window;
++ unsigned int xpcs_window_size;
++ unsigned int xpcs_window_mask;
+
+ /* RSS addressing mutex */
+ struct mutex rss_mutex;
+@@ -783,12 +967,39 @@ struct xgbe_prv_data {
+ /* Flags representing xgbe_state */
+ unsigned long dev_state;
+
++ /* ECC support */
++ unsigned long tx_sec_period;
++ unsigned long tx_ded_period;
++ unsigned long rx_sec_period;
++ unsigned long rx_ded_period;
++ unsigned long desc_sec_period;
++ unsigned long desc_ded_period;
++
++ unsigned int tx_sec_count;
++ unsigned int tx_ded_count;
++ unsigned int rx_sec_count;
++ unsigned int rx_ded_count;
++ unsigned int desc_ded_count;
++ unsigned int desc_sec_count;
++
++ struct msix_entry *msix_entries;
+ int dev_irq;
++ int ecc_irq;
++ int i2c_irq;
++ int channel_irq[XGBE_MAX_DMA_CHANNELS];
++
+ unsigned int per_channel_irq;
++ unsigned int irq_shared;
++ unsigned int irq_count;
++ unsigned int channel_irq_count;
++ unsigned int channel_irq_mode;
++
++ char ecc_name[IFNAMSIZ + 32];
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_phy_if phy_if;
+ struct xgbe_desc_if desc_if;
++ struct xgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+@@ -803,12 +1014,16 @@ struct xgbe_prv_data {
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
++ unsigned int tx_max_channel_count;
++ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
++ unsigned int tx_max_q_count;
++ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+@@ -820,11 +1035,13 @@ struct xgbe_prv_data {
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
++ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
++ unsigned int rx_max_fifo_size;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+@@ -842,6 +1059,8 @@ struct xgbe_prv_data {
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
++ unsigned int rx_rfa[XGBE_MAX_QUEUES];
++ unsigned int rx_rfd[XGBE_MAX_QUEUES];
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[XGBE_RSS_HASH_KEY_SIZE];
+@@ -881,13 +1100,16 @@ struct xgbe_prv_data {
+ struct ieee_pfc *pfc;
+ unsigned int q2tc_map[XGBE_MAX_QUEUES];
+ unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
++ unsigned int pfcq[XGBE_MAX_QUEUES];
++ unsigned int pfc_rfa;
+ u8 num_tcs;
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+- /* Device restart work structure */
++ /* Device work structures */
+ struct work_struct restart_work;
++ struct work_struct stopdev_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+@@ -901,9 +1123,14 @@ struct xgbe_prv_data {
+ int phy_speed;
+
+ /* MDIO/PHY related settings */
++ unsigned int phy_started;
++ void *phy_data;
+ struct xgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
++ struct completion mdio_complete;
++
++ unsigned int kr_redrv;
+
+ char an_name[IFNAMSIZ + 32];
+ struct workqueue_struct *an_workqueue;
+@@ -911,23 +1138,9 @@ struct xgbe_prv_data {
+ int an_irq;
+ struct work_struct an_irq_work;
+
+- unsigned int speed_set;
+-
+- /* SerDes UEFI configurable settings.
+- * Switching between modes/speeds requires new values for some
+- * SerDes settings. The values can be supplied as device
+- * properties in array format. The first array entry is for
+- * 1GbE, second for 2.5GbE and third for 10GbE
+- */
+- u32 serdes_blwc[XGBE_SPEEDS];
+- u32 serdes_cdr_rate[XGBE_SPEEDS];
+- u32 serdes_pq_skew[XGBE_SPEEDS];
+- u32 serdes_tx_amp[XGBE_SPEEDS];
+- u32 serdes_dfe_tap_cfg[XGBE_SPEEDS];
+- u32 serdes_dfe_tap_ena[XGBE_SPEEDS];
+-
+ /* Auto-negotiation state machine support */
+ unsigned int an_int;
++ unsigned int an_status;
+ struct mutex an_mutex;
+ enum xgbe_an an_result;
+ enum xgbe_an an_state;
+@@ -938,6 +1151,13 @@ struct xgbe_prv_data {
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
++ enum xgbe_an_mode an_mode;
++
++ /* I2C support */
++ struct xgbe_i2c i2c;
++ struct mutex i2c_mutex;
++ struct completion i2c_complete;
++ char i2c_name[IFNAMSIZ + 32];
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
+@@ -948,14 +1168,36 @@ struct xgbe_prv_data {
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
++
++ unsigned int debugfs_xprop_reg;
++
++ unsigned int debugfs_xi2c_reg;
+ #endif
+ };
+
+ /* Function prototypes*/
++struct xgbe_prv_data *xgbe_alloc_pdata(struct device *);
++void xgbe_free_pdata(struct xgbe_prv_data *);
++void xgbe_set_counts(struct xgbe_prv_data *);
++int xgbe_config_netdev(struct xgbe_prv_data *);
++void xgbe_deconfig_netdev(struct xgbe_prv_data *);
++
++int xgbe_platform_init(void);
++void xgbe_platform_exit(void);
++#ifdef CONFIG_PCI
++int xgbe_pci_init(void);
++void xgbe_pci_exit(void);
++#else
++static inline int xgbe_pci_init(void) { return 0; }
++static inline void xgbe_pci_exit(void) { }
++#endif
+
+ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
++void xgbe_init_function_ptrs_phy_v1(struct xgbe_phy_if *);
++void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *);
+ void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
++void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *);
+ const struct net_device_ops *xgbe_get_netdev_ops(void);
+ const struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
+diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
+old mode 100644
+new mode 100755
+index 201ffa5..99b187e
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -143,13 +143,14 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
+ */
+-static inline int phy_aneg_done(struct phy_device *phydev)
++int phy_aneg_done(struct phy_device *phydev)
+ {
+ if (phydev->drv->aneg_done)
+ return phydev->drv->aneg_done(phydev);
+
+ return genphy_aneg_done(phydev);
+ }
++EXPORT_SYMBOL(phy_aneg_done);
+
+ /* A structure for mapping a particular speed and duplex
+ * combination to a particular SUPPORTED and ADVERTISED value
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+old mode 100644
+new mode 100755
+index bb9b102..aa7c182
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1727,6 +1727,8 @@ struct net_device {
+ unsigned char dma;
+
+ unsigned int mtu;
++ unsigned int min_mtu;
++ unsigned int max_mtu;
+ unsigned short type;
+ unsigned short hard_header_len;
+ unsigned short min_header_len;
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+old mode 100644
+new mode 100755
+index e25f183..e1a98fd
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -764,6 +764,7 @@ void phy_detach(struct phy_device *phydev);
+ void phy_start(struct phy_device *phydev);
+ void phy_stop(struct phy_device *phydev);
+ int phy_start_aneg(struct phy_device *phydev);
++int phy_aneg_done(struct phy_device *phydev);
+
+ int phy_stop_interrupts(struct phy_device *phydev);
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2923-amd-xgbe-clocksource-Use-a-plain-u64-instead-of.patch b/meta-v1000/recipes-kernel/linux/files/2923-amd-xgbe-clocksource-Use-a-plain-u64-instead-of.patch
new file mode 100644
index 00000000..b0ae92f3
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2923-amd-xgbe-clocksource-Use-a-plain-u64-instead-of.patch
@@ -0,0 +1,145 @@
+From 7c9fa69086b3e247680b0049847d9f7a80981789 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:27:07 +0530
+Subject: [PATCH] amd-xgbe : clocksource: Use a plain u64 instead of cycle_t.
+ a5a1d1c2914b5316924c7893eb683a5420ebd3be in mainline
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 --
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 8 +++-----
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 ++----
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 15 +--------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 2 --
+ 5 files changed, 6 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 86f1626..5068d4e 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -891,8 +891,6 @@
+ #define PCS_V1_WINDOW_SELECT 0x03fc
+ #define PCS_V2_WINDOW_DEF 0x9060
+ #define PCS_V2_WINDOW_SELECT 0x9064
+-#define PCS_V2_RV_WINDOW_DEF 0x1060
+-#define PCS_V2_RV_WINDOW_SELECT 0x1064
+
+ /* PCS register entry bit positions and sizes */
+ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index efe3cf9..0d42ba8 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
++ XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+ }
+@@ -3409,10 +3409,8 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+- if (ret) {
+- netdev_err(pdata->netdev, "error flushing TX queues\n");
++ if (ret)
+ return ret;
+- }
+
+ /*
+ * Initialize DMA related features
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index fe36ded..4088544 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -539,7 +539,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ }
+ }
+
+-isr_done:
+ /* If there is not a separate AN irq, handle it here */
+ if (pdata->dev_irq == pdata->an_irq)
+ pdata->phy_if.an_isr(irq, pdata);
+@@ -552,6 +551,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
+ pdata->i2c_if.i2c_isr(irq, pdata);
+
++isr_done:
+ return IRQ_HANDLED;
+ }
+
+@@ -1070,9 +1070,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_start\n");
+
+- ret = hw_if->init(pdata);
+- if (ret)
+- return ret;
++ hw_if->init(pdata);
+
+ xgbe_napi_enable(pdata, 1);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index c2730f1..e76b7f6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -265,7 +265,6 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
+- struct pci_dev *rdev;
+ unsigned int ma_lo, ma_hi;
+ unsigned int reg;
+ int bar_mask;
+@@ -327,20 +326,8 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
+- /* Set the PCS indirect addressing definition registers */
+- rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+- if (rdev &&
+- (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+- pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+- pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+- } else {
+- pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+- pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+- }
+- pci_dev_put(rdev);
+-
+ /* Configure the PCS indirect addressing support */
+- reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
++ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 0010881..f52a9bd 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -955,8 +955,6 @@ struct xgbe_prv_data {
+
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+- unsigned int xpcs_window_def_reg;
+- unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2924-amd-xgbe-Fix-IRQ-processing-when-running-in-single.patch b/meta-v1000/recipes-kernel/linux/files/2924-amd-xgbe-Fix-IRQ-processing-when-running-in-single.patch
new file mode 100644
index 00000000..d6717843
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2924-amd-xgbe-Fix-IRQ-processing-when-running-in-single.patch
@@ -0,0 +1,42 @@
+From 54b6f914346fc36054e0b3caec2eed99397a7290 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:28:24 +0530
+Subject: [PATCH] amd-xgbe: Fix IRQ processing when running in single IRQ mode
+
+When running in single IRQ mode, the additional IRQ routines were being
+skipped because only the XGMAC interrupt status was being checked.
+Update the code so that the additional IRQ routines are checked whenever
+an interrupt is received.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 4088544..97fd4c2 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ }
+ }
+
++isr_done:
+ /* If there is not a separate AN irq, handle it here */
+ if (pdata->dev_irq == pdata->an_irq)
+ pdata->phy_if.an_isr(irq, pdata);
+@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
+ pdata->i2c_if.i2c_isr(irq, pdata);
+
+-isr_done:
+ return IRQ_HANDLED;
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2925-amd-xgbe-Update-PCI-support-to-use-new-IRQ.patch b/meta-v1000/recipes-kernel/linux/files/2925-amd-xgbe-Update-PCI-support-to-use-new-IRQ.patch
new file mode 100644
index 00000000..0d691cd8
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2925-amd-xgbe-Update-PCI-support-to-use-new-IRQ.patch
@@ -0,0 +1,246 @@
+From cda0355fcc704023666b6ad0a479ccded4b9b61a Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:29:54 +0530
+Subject: [PATCH] amd-xgbe: Update PCI support to use new IRQ functions
+
+Some of the PCI MSI/MSI-X functions have been deprecated and it is
+recommended to use the new pci_alloc_irq_vectors() function. Convert the
+code over to use the new function. Also, modify the way in which the IRQs
+are requested - try for multiple MSI-X/MSI first, then a single MSI/legacy
+interrupt.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 128 +++++++++----------------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 8 +-
+ 2 files changed, 41 insertions(+), 95 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index e76b7f6..e436902 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -122,104 +122,40 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
+-static int xgbe_config_msi(struct xgbe_prv_data *pdata)
++static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
+ {
+- unsigned int msi_count;
++ unsigned int vector_count;
+ unsigned int i, j;
+ int ret;
+
+- msi_count = XGBE_MSIX_BASE_COUNT;
+- msi_count += max(pdata->rx_ring_count,
+- pdata->tx_ring_count);
+- msi_count = roundup_pow_of_two(msi_count);
++ vector_count = XGBE_MSI_BASE_COUNT;
++ vector_count += max(pdata->rx_ring_count,
++ pdata->tx_ring_count);
+
+- ret = pci_enable_msi_exact(pdata->pcidev, msi_count);
++ ret = pci_alloc_irq_vectors(pdata->pcidev, XGBE_MSI_MIN_COUNT,
++ vector_count, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+- dev_info(pdata->dev, "MSI request for %u interrupts failed\n",
+- msi_count);
+-
+- ret = pci_enable_msi(pdata->pcidev);
+- if (ret < 0) {
+- dev_info(pdata->dev, "MSI enablement failed\n");
+- return ret;
+- }
+-
+- msi_count = 1;
+- }
+-
+- pdata->irq_count = msi_count;
+-
+- pdata->dev_irq = pdata->pcidev->irq;
+-
+- if (msi_count > 1) {
+- pdata->ecc_irq = pdata->pcidev->irq + 1;
+- pdata->i2c_irq = pdata->pcidev->irq + 2;
+- pdata->an_irq = pdata->pcidev->irq + 3;
+-
+- for (i = XGBE_MSIX_BASE_COUNT, j = 0;
+- (i < msi_count) && (j < XGBE_MAX_DMA_CHANNELS);
+- i++, j++)
+- pdata->channel_irq[j] = pdata->pcidev->irq + i;
+- pdata->channel_irq_count = j;
+-
+- pdata->per_channel_irq = 1;
+- pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+- } else {
+- pdata->ecc_irq = pdata->pcidev->irq;
+- pdata->i2c_irq = pdata->pcidev->irq;
+- pdata->an_irq = pdata->pcidev->irq;
+- }
+-
+- if (netif_msg_probe(pdata))
+- dev_dbg(pdata->dev, "MSI interrupts enabled\n");
+-
+- return 0;
+-}
+-
+-static int xgbe_config_msix(struct xgbe_prv_data *pdata)
+-{
+- unsigned int msix_count;
+- unsigned int i, j;
+- int ret;
+-
+- msix_count = XGBE_MSIX_BASE_COUNT;
+- msix_count += max(pdata->rx_ring_count,
+- pdata->tx_ring_count);
+-
+- pdata->msix_entries = devm_kcalloc(pdata->dev, msix_count,
+- sizeof(struct msix_entry),
+- GFP_KERNEL);
+- if (!pdata->msix_entries)
+- return -ENOMEM;
+-
+- for (i = 0; i < msix_count; i++)
+- pdata->msix_entries[i].entry = i;
+-
+- ret = pci_enable_msix_range(pdata->pcidev, pdata->msix_entries,
+- XGBE_MSIX_MIN_COUNT, msix_count);
+- if (ret < 0) {
+- dev_info(pdata->dev, "MSI-X enablement failed\n");
+- devm_kfree(pdata->dev, pdata->msix_entries);
+- pdata->msix_entries = NULL;
++ dev_info(pdata->dev, "multi MSI/MSI-X enablement failed\n");
+ return ret;
+ }
+
+ pdata->irq_count = ret;
+
+- pdata->dev_irq = pdata->msix_entries[0].vector;
+- pdata->ecc_irq = pdata->msix_entries[1].vector;
+- pdata->i2c_irq = pdata->msix_entries[2].vector;
+- pdata->an_irq = pdata->msix_entries[3].vector;
++ pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
++ pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 1);
++ pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 2);
++ pdata->an_irq = pci_irq_vector(pdata->pcidev, 3);
+
+- for (i = XGBE_MSIX_BASE_COUNT, j = 0; i < ret; i++, j++)
+- pdata->channel_irq[j] = pdata->msix_entries[i].vector;
++ for (i = XGBE_MSI_BASE_COUNT, j = 0; i < ret; i++, j++)
++ pdata->channel_irq[j] = pci_irq_vector(pdata->pcidev, i);
+ pdata->channel_irq_count = j;
+
+ pdata->per_channel_irq = 1;
+ pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL;
+
+ if (netif_msg_probe(pdata))
+- dev_dbg(pdata->dev, "MSI-X interrupts enabled\n");
++ dev_dbg(pdata->dev, "multi %s interrupts enabled\n",
++ pdata->pcidev->msix_enabled ? "MSI-X" : "MSI");
+
+ return 0;
+ }
+@@ -228,21 +164,28 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
+ {
+ int ret;
+
+- ret = xgbe_config_msix(pdata);
++ ret = xgbe_config_multi_msi(pdata);
+ if (!ret)
+ goto out;
+
+- ret = xgbe_config_msi(pdata);
+- if (!ret)
+- goto out;
++ ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
++ PCI_IRQ_LEGACY | PCI_IRQ_MSI);
++ if (ret < 0) {
++ dev_info(pdata->dev, "single IRQ enablement failed\n");
++ return ret;
++ }
+
+ pdata->irq_count = 1;
+- pdata->irq_shared = 1;
++ pdata->channel_irq_count = 1;
++
++ pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
++ pdata->ecc_irq = pci_irq_vector(pdata->pcidev, 0);
++ pdata->i2c_irq = pci_irq_vector(pdata->pcidev, 0);
++ pdata->an_irq = pci_irq_vector(pdata->pcidev, 0);
+
+- pdata->dev_irq = pdata->pcidev->irq;
+- pdata->ecc_irq = pdata->pcidev->irq;
+- pdata->i2c_irq = pdata->pcidev->irq;
+- pdata->an_irq = pdata->pcidev->irq;
++ if (netif_msg_probe(pdata))
++ dev_dbg(pdata->dev, "single %s interrupt enabled\n",
++ pdata->pcidev->msi_enabled ? "MSI" : "legacy");
+
+ out:
+ if (netif_msg_probe(pdata)) {
+@@ -412,12 +355,15 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ /* Configure the netdev resource */
+ ret = xgbe_config_netdev(pdata);
+ if (ret)
+- goto err_pci_enable;
++ goto err_irq_vectors;
+
+ netdev_notice(pdata->netdev, "net device enabled\n");
+
+ return 0;
+
++err_irq_vectors:
++ pci_free_irq_vectors(pdata->pcidev);
++
+ err_pci_enable:
+ xgbe_free_pdata(pdata);
+
+@@ -433,6 +379,8 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
+
+ xgbe_deconfig_netdev(pdata);
+
++ pci_free_irq_vectors(pdata->pcidev);
++
+ xgbe_free_pdata(pdata);
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index f52a9bd..99f1c87 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -211,9 +211,9 @@
+ #define XGBE_MAC_PROP_OFFSET 0x1d000
+ #define XGBE_I2C_CTRL_OFFSET 0x1e000
+
+-/* PCI MSIx support */
+-#define XGBE_MSIX_BASE_COUNT 4
+-#define XGBE_MSIX_MIN_COUNT (XGBE_MSIX_BASE_COUNT + 1)
++/* PCI MSI/MSIx support */
++#define XGBE_MSI_BASE_COUNT 4
++#define XGBE_MSI_MIN_COUNT (XGBE_MSI_BASE_COUNT + 1)
+
+ /* PCI clock frequencies */
+ #define XGBE_V2_DMA_CLOCK_FREQ 500000000 /* 500 MHz */
+@@ -980,14 +980,12 @@ struct xgbe_prv_data {
+ unsigned int desc_ded_count;
+ unsigned int desc_sec_count;
+
+- struct msix_entry *msix_entries;
+ int dev_irq;
+ int ecc_irq;
+ int i2c_irq;
+ int channel_irq[XGBE_MAX_DMA_CHANNELS];
+
+ unsigned int per_channel_irq;
+- unsigned int irq_shared;
+ unsigned int irq_count;
+ unsigned int channel_irq_count;
+ unsigned int channel_irq_mode;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2926-amd-xgbe-Add-a-hardware-quirk-for-register.patch b/meta-v1000/recipes-kernel/linux/files/2926-amd-xgbe-Add-a-hardware-quirk-for-register.patch
new file mode 100644
index 00000000..75060fd5
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2926-amd-xgbe-Add-a-hardware-quirk-for-register.patch
@@ -0,0 +1,107 @@
+From e469aadd0277c011c183cdc733e23d672421951e Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:31:12 +0530
+Subject: [PATCH] amd-xgbe: Add a hardware quirk for register definitions
+
+A newer version of the hardware is using the same PCI ids for the network
+device but has altered register definitions for determining the window
+settings for the indirect PCS access. Add support to check for this
+hardware and if found use the new register values.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 ++
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 ++--
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 15 ++++++++++++++-
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 2 ++
+ 4 files changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 5068d4e..86f1626 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -891,6 +891,8 @@
+ #define PCS_V1_WINDOW_SELECT 0x03fc
+ #define PCS_V2_WINDOW_DEF 0x9060
+ #define PCS_V2_WINDOW_SELECT 0x9064
++#define PCS_V2_RV_WINDOW_DEF 0x1060
++#define PCS_V2_RV_WINDOW_SELECT 0x1064
+
+ /* PCS register entry bit positions and sizes */
+ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 0d42ba8..3190dcf 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+
+@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ spin_lock_irqsave(&pdata->xpcs_lock, flags);
+- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
++ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+ spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index e436902..38392a5 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -208,6 +208,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ struct xgbe_prv_data *pdata;
+ struct device *dev = &pdev->dev;
+ void __iomem * const *iomap_table;
++ struct pci_dev *rdev;
+ unsigned int ma_lo, ma_hi;
+ unsigned int reg;
+ int bar_mask;
+@@ -269,8 +270,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (netif_msg_probe(pdata))
+ dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+
++ /* Set the PCS indirect addressing definition registers */
++ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
++ if (rdev &&
++ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
++ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
++ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
++ } else {
++ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
++ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
++ }
++ pci_dev_put(rdev);
++
+ /* Configure the PCS indirect addressing support */
+- reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
++ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 99f1c87..f9a2463 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -955,6 +955,8 @@ struct xgbe_prv_data {
+
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
++ unsigned int xpcs_window_def_reg;
++ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2927-amd-xgbe-Check-xgbe_init-return-code.patch b/meta-v1000/recipes-kernel/linux/files/2927-amd-xgbe-Check-xgbe_init-return-code.patch
new file mode 100644
index 00000000..0f2fc8a6
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2927-amd-xgbe-Check-xgbe_init-return-code.patch
@@ -0,0 +1,54 @@
+From bcf43cc4e737a180a8eec7d983ba24926c2959eb Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Wed, 6 Dec 2017 19:37:03 +0530
+Subject: [PATCH] amd-xgbe: Check xgbe_init() return code
+
+The xgbe_init() routine returns a return code indicating success or
+failure, but the return code is not checked. Add code to xgbe_init()
+to issue a message when failures are seen and add code to check the
+xgbe_init() return code.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 +++-
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 +++-
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 3190dcf..efe3cf9 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -3409,8 +3409,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+- if (ret)
++ if (ret) {
++ netdev_err(pdata->netdev, "error flushing TX queues\n");
+ return ret;
++ }
+
+ /*
+ * Initialize DMA related features
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 97fd4c2..fe36ded 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_start\n");
+
+- hw_if->init(pdata);
++ ret = hw_if->init(pdata);
++ if (ret)
++ return ret;
+
+ xgbe_napi_enable(pdata, 1);
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2928-amd-xgbe-Stop-the-PHY-before-releasing-interrupts.patch b/meta-v1000/recipes-kernel/linux/files/2928-amd-xgbe-Stop-the-PHY-before-releasing-interrupts.patch
new file mode 100644
index 00000000..041d4221
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2928-amd-xgbe-Stop-the-PHY-before-releasing-interrupts.patch
@@ -0,0 +1,43 @@
+From 8b15f83bbd111d2c04c16ecd20e6087b2c27a179 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 08:53:39 +0530
+Subject: [PATCH] amd-xgbe: Stop the PHY before releasing interrupts
+
+Some configurations require the use of the hardware's MDIO support to
+communicate with external PHYs. The MDIO commands indicate completion
+through the device interrupt. When bringing down the device the interrupts
+were released before stopping the external PHY, resulting in MDIO command
+timeouts. Move the stopping of the PHY to before the releasing of the
+interrupts.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index fe36ded..0532b6b 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1131,12 +1131,12 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
++ phy_if->phy_stop(pdata);
++
+ xgbe_free_irqs(pdata);
+
+ xgbe_napi_disable(pdata, 1);
+
+- phy_if->phy_stop(pdata);
+-
+ hw_if->exit(pdata);
+
+ channel = pdata->channel;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2929-amd-xgbe-Be-sure-to-set-MDIO-modes-on-device.patch b/meta-v1000/recipes-kernel/linux/files/2929-amd-xgbe-Be-sure-to-set-MDIO-modes-on-device.patch
new file mode 100644
index 00000000..0bf4050b
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2929-amd-xgbe-Be-sure-to-set-MDIO-modes-on-device.patch
@@ -0,0 +1,76 @@
+From 98a1a074ce4d70738257cdfdcb735785a9f901d7 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 08:55:12 +0530
+Subject: [PATCH] amd-xgbe: Be sure to set MDIO modes on device (re)start
+
+The MDIO register mode is set when the device is probed. But when the
+device is brought down and then back up, the MDIO register mode has been
+reset. Be sure to reset the mode during device startup and only change
+the mode of the address specified.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 22 ++++++++++++++++++++++
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index efe3cf9..24a687c 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1323,7 +1323,7 @@ static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr,
+ static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
+ enum xgbe_mdio_mode mode)
+ {
+- unsigned int reg_val = 0;
++ unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
+
+ switch (mode) {
+ case XGBE_MDIO_MODE_CL22:
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 9d8c9530..04804cb 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -875,6 +875,16 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+ !phy_data->sfp_phy_avail)
+ return 0;
+
++ /* Set the proper MDIO mode for the PHY */
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
++ phy_data->phydev_mode);
++ if (ret) {
++ netdev_err(pdata->netdev,
++ "mdio port/clause not compatible (%u/%u)\n",
++ phy_data->mdio_addr, phy_data->phydev_mode);
++ return ret;
++ }
++
+ /* Create and connect to the PHY device */
+ phydev = get_phy_device(phy_data->mii, phy_data->mdio_addr,
+ (phy_data->phydev_mode == XGBE_MDIO_MODE_CL45));
+@@ -2722,6 +2732,18 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+ if (ret)
+ return ret;
+
++ /* Set the proper MDIO mode for the re-driver */
++ if (phy_data->redrv && !phy_data->redrv_if) {
++ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
++ XGBE_MDIO_MODE_CL22);
++ if (ret) {
++ netdev_err(pdata->netdev,
++ "redriver mdio port not compatible (%u)\n",
++ phy_data->redrv_addr);
++ return ret;
++ }
++ }
++
+ /* Start in highest supported mode */
+ xgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2930-amd-xgbe-Don-t-overwrite-SFP-PHY-mod_absent.patch b/meta-v1000/recipes-kernel/linux/files/2930-amd-xgbe-Don-t-overwrite-SFP-PHY-mod_absent.patch
new file mode 100644
index 00000000..fb6d43b8
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2930-amd-xgbe-Don-t-overwrite-SFP-PHY-mod_absent.patch
@@ -0,0 +1,34 @@
+From 3f24e79e7ad1efe61c390c94b3dc5980d2a58c28 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 08:57:40 +0530
+Subject: [PATCH] amd-xgbe: Don't overwrite SFP PHY mod_absent settings
+
+If an SFP module is not present, xgbe_phy_sfp_phy_settings() should
+return after applying the default settings. Currently there is no return
+statement and the default settings are overwritten.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 04804cb..e707c49 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -716,6 +716,8 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
++
++ return;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2931-net-busy-poll-allow-preemption-in-sk_busy_loop.patch b/meta-v1000/recipes-kernel/linux/files/2931-net-busy-poll-allow-preemption-in-sk_busy_loop.patch
new file mode 100644
index 00000000..6556d62a
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2931-net-busy-poll-allow-preemption-in-sk_busy_loop.patch
@@ -0,0 +1,268 @@
+From d5d1e77f5858f7c9603151076d656862dde8ee4e Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 08:58:37 +0530
+Subject: [PATCH] net: busy-poll: allow preemption in sk_busy_loop()
+
+After commit 4cd13c21b207 ("softirq: Let ksoftirqd do its job"),
+sk_busy_loop() needs a bit of care :
+softirqs might be delayed since we do not allow preemption yet.
+
+This patch adds preemptiom points in sk_busy_loop(),
+and makes sure no unnecessary cache line dirtying
+or atomic operations are done while looping.
+
+A new flag is added into napi->state : NAPI_STATE_IN_BUSY_POLL
+
+This prevents napi_complete_done() from clearing NAPIF_STATE_SCHED,
+so that sk_busy_loop() does not have to grab it again.
+
+Similarly, netpoll_poll_lock() is done one time.
+
+This gives about 10 to 20 % improvement in various busy polling
+tests, especially when many threads are busy polling in
+configurations with large number of NIC queues.
+
+This should allow experimenting with bigger delays without
+hurting overall latencies.
+
+Tested:
+ On a 40Gb mlx4 NIC, 32 RX/TX queues.
+
+ echo 70 >/proc/sys/net/core/busy_read
+ for i in `seq 1 40`; do echo -n $i: ; ./super_netperf $i -H lpaa24 -t UDP_RR -- -N -n; done
+
+ Before: After:
+ 1: 90072 92819
+ 2: 157289 184007
+ 3: 235772 213504
+ 4: 344074 357513
+ 5: 394755 458267
+ 6: 461151 487819
+ 7: 549116 625963
+ 8: 544423 716219
+ 9: 720460 738446
+10: 794686 837612
+11: 915998 923960
+12: 937507 925107
+13: 1019677 971506
+14: 1046831 1113650
+15: 1114154 1148902
+16: 1105221 1179263
+17: 1266552 1299585
+18: 1258454 1383817
+19: 1341453 1312194
+20: 1363557 1488487
+21: 1387979 1501004
+22: 1417552 1601683
+23: 1550049 1642002
+24: 1568876 1601915
+25: 1560239 1683607
+26: 1640207 1745211
+27: 1706540 1723574
+28: 1638518 1722036
+29: 1734309 1757447
+30: 1782007 1855436
+31: 1724806 1888539
+32: 1717716 1944297
+33: 1778716 1869118
+34: 1805738 1983466
+35: 1815694 2020758
+36: 1893059 2035632
+37: 1843406 2034653
+38: 1888830 2086580
+39: 1972827 2143567
+40: 1877729 2181851
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Adam Belay <abelay@google.com>
+Cc: Tariq Toukan <tariqt@mellanox.com>
+Cc: Yuval Mintz <Yuval.Mintz@cavium.com>
+Cc: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ include/linux/netdevice.h | 10 +++++
+ net/core/dev.c | 102 +++++++++++++++++++++++++++++++++++++---------
+ 2 files changed, 92 insertions(+), 20 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index aa7c182..6b5548c 100755
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -334,6 +334,16 @@ enum {
+ NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
+ NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
+ NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
++ NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
++};
++
++enum {
++ NAPIF_STATE_SCHED = (1UL << NAPI_STATE_SCHED),
++ NAPIF_STATE_DISABLE = (1UL << NAPI_STATE_DISABLE),
++ NAPIF_STATE_NPSVC = (1UL << NAPI_STATE_NPSVC),
++ NAPIF_STATE_HASHED = (1UL << NAPI_STATE_HASHED),
++ NAPIF_STATE_NO_BUSY_POLL = (1UL << NAPI_STATE_NO_BUSY_POLL),
++ NAPIF_STATE_IN_BUSY_POLL = (1UL << NAPI_STATE_IN_BUSY_POLL),
+ };
+
+ enum gro_result {
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2e04fd1..084fe5c 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4940,6 +4940,12 @@ void __napi_complete(struct napi_struct *n)
+ {
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+
++ /* Some drivers call us directly, instead of calling
++ * napi_complete_done().
++ */
++ if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
++ return;
++
+ list_del_init(&n->poll_list);
+ smp_mb__before_atomic();
+ clear_bit(NAPI_STATE_SCHED, &n->state);
+@@ -4951,10 +4957,13 @@ void napi_complete_done(struct napi_struct *n, int work_done)
+ unsigned long flags;
+
+ /*
+- * don't let napi dequeue from the cpu poll list
+- * just in case its running on a different cpu
++ * 1) Don't let napi dequeue from the cpu poll list
++ * just in case its running on a different cpu.
++ * 2) If we are busy polling, do nothing here, we have
++ * the guarantee we will be called later.
+ */
+- if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
++ if (unlikely(n->state & (NAPIF_STATE_NPSVC |
++ NAPIF_STATE_IN_BUSY_POLL)))
+ return;
+
+ if (n->gro_list) {
+@@ -4994,13 +5003,41 @@ static struct napi_struct *napi_by_id(unsigned int napi_id)
+ }
+
+ #if defined(CONFIG_NET_RX_BUSY_POLL)
++
+ #define BUSY_POLL_BUDGET 8
++
++static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
++{
++ int rc;
++
++ clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state);
++
++ local_bh_disable();
++
++ /* All we really want here is to re-enable device interrupts.
++ * Ideally, a new ndo_busy_poll_stop() could avoid another round.
++ */
++ rc = napi->poll(napi, BUSY_POLL_BUDGET);
++ netpoll_poll_unlock(have_poll_lock);
++ if (rc == BUSY_POLL_BUDGET)
++ __napi_schedule(napi);
++ local_bh_enable();
++ if (local_softirq_pending())
++ do_softirq();
++}
++
+ bool sk_busy_loop(struct sock *sk, int nonblock)
+ {
+ unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
++ int (*napi_poll)(struct napi_struct *napi, int budget);
+ int (*busy_poll)(struct napi_struct *dev);
++ void *have_poll_lock = NULL;
+ struct napi_struct *napi;
+- int rc = false;
++ int rc;
++
++restart:
++ rc = false;
++ napi_poll = NULL;
+
+ rcu_read_lock();
+
+@@ -5011,24 +5048,33 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
+ /* Note: ndo_busy_poll method is optional in linux-4.5 */
+ busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
+
+- do {
++ preempt_disable();
++ for (;;) {
+ rc = 0;
+ local_bh_disable();
+ if (busy_poll) {
+ rc = busy_poll(napi);
+- } else if (napi_schedule_prep(napi)) {
+- void *have = netpoll_poll_lock(napi);
+-
+- if (test_bit(NAPI_STATE_SCHED, &napi->state)) {
+- rc = napi->poll(napi, BUSY_POLL_BUDGET);
+- trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
+- if (rc == BUSY_POLL_BUDGET) {
+- napi_complete_done(napi, rc);
+- napi_schedule(napi);
+- }
+- }
+- netpoll_poll_unlock(have);
++ goto count;
+ }
++ if (!napi_poll) {
++ unsigned long val = READ_ONCE(napi->state);
++
++ /* If multiple threads are competing for this napi,
++ * we avoid dirtying napi->state as much as we can.
++ */
++ if (val & (NAPIF_STATE_DISABLE | NAPIF_STATE_SCHED |
++ NAPIF_STATE_IN_BUSY_POLL))
++ goto count;
++ if (cmpxchg(&napi->state, val,
++ val | NAPIF_STATE_IN_BUSY_POLL |
++ NAPIF_STATE_SCHED) != val)
++ goto count;
++ have_poll_lock = netpoll_poll_lock(napi);
++ napi_poll = napi->poll;
++ }
++ rc = napi_poll(napi, BUSY_POLL_BUDGET);
++ trace_napi_poll(napi, rc, BUSY_POLL_BUDGET);
++count:
+ if (rc > 0)
+ __NET_ADD_STATS(sock_net(sk),
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+@@ -5037,10 +5083,26 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
+ if (rc == LL_FLUSH_FAILED)
+ break; /* permanent failure */
+
+- cpu_relax();
+- } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
+- !need_resched() && !busy_loop_timeout(end_time));
++ if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) ||
++ busy_loop_timeout(end_time))
++ break;
+
++ if (unlikely(need_resched())) {
++ if (napi_poll)
++ busy_poll_stop(napi, have_poll_lock);
++ preempt_enable();
++ rcu_read_unlock();
++ cond_resched();
++ rc = !skb_queue_empty(&sk->sk_receive_queue);
++ if (rc || busy_loop_timeout(end_time))
++ return rc;
++ goto restart;
++ }
++ cpu_relax_lowlatency();
++ }
++ if (napi_poll)
++ busy_poll_stop(napi, have_poll_lock);
++ preempt_enable();
+ rc = !skb_queue_empty(&sk->sk_receive_queue);
+ out:
+ rcu_read_unlock();
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2932-net-busy-poll-return-busypolling-status-to.patch b/meta-v1000/recipes-kernel/linux/files/2932-net-busy-poll-return-busypolling-status-to.patch
new file mode 100644
index 00000000..e312e534
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2932-net-busy-poll-return-busypolling-status-to.patch
@@ -0,0 +1,109 @@
+From 85f7cf7f30ad9cf65af674958d47c30e4b2e9e0a Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:00:50 +0530
+Subject: [PATCH] net: busy-poll: return busypolling status to drivers
+
+NAPI drivers use napi_complete_done() or napi_complete() when
+they drained RX ring and right before re-enabling device interrupts.
+
+In busy polling, we can avoid interrupts being delivered since
+we are polling RX ring in a controlled loop.
+
+Drivers can chose to use napi_complete_done() return value
+to reduce interrupts overhead while busy polling is active.
+
+This is optional, legacy drivers should work fine even
+if not updated.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Cc: Adam Belay <abelay@google.com>
+Cc: Tariq Toukan <tariqt@mellanox.com>
+Cc: Yuval Mintz <Yuval.Mintz@cavium.com>
+Cc: Ariel Elior <ariel.elior@cavium.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ include/linux/netdevice.h | 7 ++++---
+ net/core/dev.c | 10 ++++++----
+ 2 files changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 6b5548c..234e6ea 100755
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -463,16 +463,17 @@ static inline bool napi_reschedule(struct napi_struct *napi)
+ return false;
+ }
+
+-void __napi_complete(struct napi_struct *n);
+-void napi_complete_done(struct napi_struct *n, int work_done);
++bool __napi_complete(struct napi_struct *n);
++bool napi_complete_done(struct napi_struct *n, int work_done);
+ /**
+ * napi_complete - NAPI processing complete
+ * @n: NAPI context
+ *
+ * Mark NAPI processing as complete.
+ * Consider using napi_complete_done() instead.
++ * Return false if device should avoid rearming interrupts.
+ */
+-static inline void napi_complete(struct napi_struct *n)
++static inline bool napi_complete(struct napi_struct *n)
+ {
+ return napi_complete_done(n, 0);
+ }
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 084fe5c..5e8891b 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4936,7 +4936,7 @@ void __napi_schedule_irqoff(struct napi_struct *n)
+ }
+ EXPORT_SYMBOL(__napi_schedule_irqoff);
+
+-void __napi_complete(struct napi_struct *n)
++bool __napi_complete(struct napi_struct *n)
+ {
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+
+@@ -4944,15 +4944,16 @@ void __napi_complete(struct napi_struct *n)
+ * napi_complete_done().
+ */
+ if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
+- return;
++ return false;
+
+ list_del_init(&n->poll_list);
+ smp_mb__before_atomic();
+ clear_bit(NAPI_STATE_SCHED, &n->state);
++ return true;
+ }
+ EXPORT_SYMBOL(__napi_complete);
+
+-void napi_complete_done(struct napi_struct *n, int work_done)
++bool napi_complete_done(struct napi_struct *n, int work_done)
+ {
+ unsigned long flags;
+
+@@ -4964,7 +4965,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
+ */
+ if (unlikely(n->state & (NAPIF_STATE_NPSVC |
+ NAPIF_STATE_IN_BUSY_POLL)))
+- return;
++ return false;
+
+ if (n->gro_list) {
+ unsigned long timeout = 0;
+@@ -4986,6 +4987,7 @@ void napi_complete_done(struct napi_struct *n, int work_done)
+ __napi_complete(n);
+ local_irq_restore(flags);
+ }
++ return true;
+ }
+ EXPORT_SYMBOL(napi_complete_done);
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2933-net-remove-__napi_complete-All-__napi_complete.patch b/meta-v1000/recipes-kernel/linux/files/2933-net-remove-__napi_complete-All-__napi_complete.patch
new file mode 100644
index 00000000..82341234
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2933-net-remove-__napi_complete-All-__napi_complete.patch
@@ -0,0 +1,47 @@
+From 2bb730c76b5b82969b5f4c396bef0a790bc1a755 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:01:53 +0530
+Subject: [PATCH] net: remove __napi_complete() All __napi_complete() callers
+ have been converted to use the more standard napi_complete_done(), we can
+ now remove this NAPI method for good.
+
+Modified From 02c1602ee7b3e3d062c3eacd374d6a6e3a2ebb73
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ net/core/dev.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+ mode change 100644 => 100755 net/core/dev.c
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+old mode 100644
+new mode 100755
+index 5e8891b..dd1ca30
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4979,14 +4979,13 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
+ else
+ napi_gro_flush(n, false);
+ }
+- if (likely(list_empty(&n->poll_list))) {
+- WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
+- } else {
++ if (unlikely(!list_empty(&n->poll_list))) {
+ /* If n->poll_list is not empty, we need to mask irqs */
+ local_irq_save(flags);
+- __napi_complete(n);
++ list_del_init(&n->poll_list);
+ local_irq_restore(flags);
+ }
++ WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
+ return true;
+ }
+ EXPORT_SYMBOL(napi_complete_done);
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2934-amd-xgbe-Enable-IRQs-only-if-napi_complete_done.patch b/meta-v1000/recipes-kernel/linux/files/2934-amd-xgbe-Enable-IRQs-only-if-napi_complete_done.patch
new file mode 100644
index 00000000..40b9e2b9
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2934-amd-xgbe-Enable-IRQs-only-if-napi_complete_done.patch
@@ -0,0 +1,54 @@
+From c40f08fdbd4f7f05419adf0c601cdf45c9f3fc6b Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:02:48 +0530
+Subject: [PATCH] amd-xgbe: Enable IRQs only if napi_complete_done() is true
+
+Depending on the hardware, the amd-xgbe driver may use disable_irq_nosync()
+and enable_irq() when an interrupt is received to process Rx packets. If
+the napi_complete_done() return value isn't checked an unbalanced enable
+for the IRQ could result, generating a warning stack trace.
+
+Update the driver to only enable interrupts if napi_complete_done() returns
+true.
+
+Reported-by: Jeremy Linton <jeremy.linton@arm.com>
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ++--------
+ 1 file changed, 2 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 0532b6b..36fd1a1 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -2298,10 +2298,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
+ processed = xgbe_rx_poll(channel, budget);
+
+ /* If we processed everything, we are done */
+- if (processed < budget) {
+- /* Turn off polling */
+- napi_complete_done(napi, processed);
+-
++ if ((processed < budget) && napi_complete_done(napi, processed)) {
+ /* Enable Tx and Rx interrupts */
+ if (pdata->channel_irq_mode)
+ xgbe_enable_rx_tx_int(pdata, channel);
+@@ -2343,10 +2340,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
+ } while ((processed < budget) && (processed != last_processed));
+
+ /* If we processed everything, we are done */
+- if (processed < budget) {
+- /* Turn off polling */
+- napi_complete_done(napi, processed);
+-
++ if ((processed < budget) && napi_complete_done(napi, processed)) {
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2935-amd-xgbe-Fix-the-ECC-related-bit-position.patch b/meta-v1000/recipes-kernel/linux/files/2935-amd-xgbe-Fix-the-ECC-related-bit-position.patch
new file mode 100644
index 00000000..7907d4c5
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2935-amd-xgbe-Fix-the-ECC-related-bit-position.patch
@@ -0,0 +1,69 @@
+From 0e25ad8489b78111548b762eb6810b989516e57b Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:16:22 +0530
+Subject: [PATCH] amd-xgbe: Fix the ECC-related bit position definitions
+
+The ECC bit positions that describe whether the ECC interrupt is for
+Tx, Rx or descriptor memory and whether the it is a single correctable
+or double detected error were defined in incorrectly (reversed order).
+Fix the bit position definitions for these settings so that the proper
+ECC handling is performed.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 86f1626..127adbe 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -984,29 +984,29 @@
+ #define XP_ECC_CNT1_DESC_DED_WIDTH 8
+ #define XP_ECC_CNT1_DESC_SEC_INDEX 0
+ #define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+-#define XP_ECC_IER_DESC_DED_INDEX 0
++#define XP_ECC_IER_DESC_DED_INDEX 5
+ #define XP_ECC_IER_DESC_DED_WIDTH 1
+-#define XP_ECC_IER_DESC_SEC_INDEX 1
++#define XP_ECC_IER_DESC_SEC_INDEX 4
+ #define XP_ECC_IER_DESC_SEC_WIDTH 1
+-#define XP_ECC_IER_RX_DED_INDEX 2
++#define XP_ECC_IER_RX_DED_INDEX 3
+ #define XP_ECC_IER_RX_DED_WIDTH 1
+-#define XP_ECC_IER_RX_SEC_INDEX 3
++#define XP_ECC_IER_RX_SEC_INDEX 2
+ #define XP_ECC_IER_RX_SEC_WIDTH 1
+-#define XP_ECC_IER_TX_DED_INDEX 4
++#define XP_ECC_IER_TX_DED_INDEX 1
+ #define XP_ECC_IER_TX_DED_WIDTH 1
+-#define XP_ECC_IER_TX_SEC_INDEX 5
++#define XP_ECC_IER_TX_SEC_INDEX 0
+ #define XP_ECC_IER_TX_SEC_WIDTH 1
+-#define XP_ECC_ISR_DESC_DED_INDEX 0
++#define XP_ECC_ISR_DESC_DED_INDEX 5
+ #define XP_ECC_ISR_DESC_DED_WIDTH 1
+-#define XP_ECC_ISR_DESC_SEC_INDEX 1
++#define XP_ECC_ISR_DESC_SEC_INDEX 4
+ #define XP_ECC_ISR_DESC_SEC_WIDTH 1
+-#define XP_ECC_ISR_RX_DED_INDEX 2
++#define XP_ECC_ISR_RX_DED_INDEX 3
+ #define XP_ECC_ISR_RX_DED_WIDTH 1
+-#define XP_ECC_ISR_RX_SEC_INDEX 3
++#define XP_ECC_ISR_RX_SEC_INDEX 2
+ #define XP_ECC_ISR_RX_SEC_WIDTH 1
+-#define XP_ECC_ISR_TX_DED_INDEX 4
++#define XP_ECC_ISR_TX_DED_INDEX 1
+ #define XP_ECC_ISR_TX_DED_WIDTH 1
+-#define XP_ECC_ISR_TX_SEC_INDEX 5
++#define XP_ECC_ISR_TX_SEC_INDEX 0
+ #define XP_ECC_ISR_TX_SEC_WIDTH 1
+ #define XP_I2C_MUTEX_BUSY_INDEX 31
+ #define XP_I2C_MUTEX_BUSY_WIDTH 1
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2936-net-ethernet-update-drivers-to-make-both-SW-and.patch b/meta-v1000/recipes-kernel/linux/files/2936-net-ethernet-update-drivers-to-make-both-SW-and.patch
new file mode 100644
index 00000000..4ea4f4d2
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2936-net-ethernet-update-drivers-to-make-both-SW-and.patch
@@ -0,0 +1,40 @@
+From a25ace96a5218a49e94fb97814fbf60d9ed9791a Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:25:29 +0530
+Subject: [PATCH] net: ethernet: update drivers to make both SW and HW TX
+ timestamps
+
+Some drivers were calling the skb_tx_timestamp() function only when
+a hardware timestamp was not requested. Now that applications can use
+the SOF_TIMESTAMPING_OPT_TX_SWHW option to request both software and
+hardware timestamps, the drivers need to be modified to unconditionally
+call skb_tx_timestamp().
+
+CC: Richard Cochran <richardcochran@gmail.com>
+CC: Willem de Bruijn <willemb@google.com>
+Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 36fd1a1..824fcec 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1389,8 +1389,7 @@ static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+- skb_tx_timestamp(skb);
++ skb_tx_timestamp(skb);
+ }
+
+ static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2937-amd-xgbe-use-PAGE_ALLOC_COSTLY_ORDER-in.patch b/meta-v1000/recipes-kernel/linux/files/2937-amd-xgbe-use-PAGE_ALLOC_COSTLY_ORDER-in.patch
new file mode 100644
index 00000000..d29bbf57
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2937-amd-xgbe-use-PAGE_ALLOC_COSTLY_ORDER-in.patch
@@ -0,0 +1,51 @@
+From 252fbb764df24617006b48ef7d99a6d5f3e9fc89 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:27:03 +0530
+Subject: [PATCH] amd-xgbe: use PAGE_ALLOC_COSTLY_ORDER in xgbe_map_rx_buffer
+
+xgbe_map_rx_buffer is rather confused about what PAGE_ALLOC_COSTLY_ORDER
+means. It uses PAGE_ALLOC_COSTLY_ORDER-1 assuming that
+PAGE_ALLOC_COSTLY_ORDER is the first costly order which is not the case
+actually because orders larger than that are costly. And even that
+applies only to sleeping allocations which is not the case here. We
+simply do not perform any costly operations like reclaim or compaction
+for those. Simplify the code by dropping the order calculation and use
+PAGE_ALLOC_COSTLY_ORDER directly.
+
+Signed-off-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index b3bc87f..0a98c36 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring,
+ struct xgbe_ring_data *rdata)
+ {
+- int order, ret;
++ int ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
+@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+- order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
+ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+- order);
++ PAGE_ALLOC_COSTLY_ORDER);
+ if (ret)
+ return ret;
+ }
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2938-amd-xgbe-Simplify-mailbox-interface-rate-change.patch b/meta-v1000/recipes-kernel/linux/files/2938-amd-xgbe-Simplify-mailbox-interface-rate-change.patch
new file mode 100644
index 00000000..863a3fc4
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2938-amd-xgbe-Simplify-mailbox-interface-rate-change.patch
@@ -0,0 +1,289 @@
+From af1067bddc263735b33f65dc787d1757f710f0af Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:28:01 +0530
+Subject: [PATCH] amd-xgbe: Simplify mailbox interface rate change code
+
+Simplify and centralize the mailbox command rate change interface by
+having a single function perform the writes to the mailbox registers
+to issue the request.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 155 ++++++----------------------
+ 1 file changed, 29 insertions(+), 126 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index e707c49..0429840 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1694,19 +1694,25 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+ xgbe_phy_put_comm_ownership(pdata);
+ }
+
+-static void xgbe_phy_start_ratechange(struct xgbe_prv_data *pdata)
++static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
++ unsigned int cmd, unsigned int sub_cmd)
+ {
+- if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+- return;
++ unsigned int s0 = 0;
++ unsigned int wait;
+
+ /* Log if a previous command did not complete */
+- netif_dbg(pdata, link, pdata->netdev,
+- "firmware mailbox not ready for command\n");
+-}
++ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ netif_dbg(pdata, link, pdata->netdev,
++ "firmware mailbox not ready for command\n");
+
+-static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+-{
+- unsigned int wait;
++ /* Construct the command */
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
++ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd);
++
++ /* Issue the command */
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
++ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
++ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ /* Wait for command to complete */
+ wait = XGBE_RATECHANGE_COUNT;
+@@ -1723,21 +1729,8 @@ static void xgbe_phy_complete_ratechange(struct xgbe_prv_data *pdata)
+
+ static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+ {
+- unsigned int s0;
+-
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* Receiver Reset Cycle */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 5, 0);
+
+ netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n");
+ }
+@@ -1746,14 +1739,8 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+- xgbe_phy_start_ratechange(pdata);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ /* Power off */
++ xgbe_phy_perform_ratechange(pdata, 0, 0);
+
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+@@ -1763,33 +1750,21 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 10G/SFI */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) {
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
++ xgbe_phy_perform_ratechange(pdata, 3, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
++ xgbe_phy_perform_ratechange(pdata, 3, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
++ xgbe_phy_perform_ratechange(pdata, 3, 2);
+ else
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
++ xgbe_phy_perform_ratechange(pdata, 3, 3);
+ }
+
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
+-
+ phy_data->cur_mode = XGBE_MODE_SFI;
+
+ netif_dbg(pdata, link, pdata->netdev, "10GbE SFI mode set\n");
+@@ -1798,23 +1773,11 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 1G/X */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_X;
+
+@@ -1824,23 +1787,11 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 1G/SGMII */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 1, 2);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_1000;
+
+@@ -1850,23 +1801,11 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+- /* 1G/SGMII */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ /* 100M/SGMII */
++ xgbe_phy_perform_ratechange(pdata, 1, 1);
+
+ phy_data->cur_mode = XGBE_MODE_SGMII_100;
+
+@@ -1876,23 +1815,11 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 10G/KR */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 4, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KR;
+
+@@ -1902,23 +1829,11 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 2.5G/KX */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 2);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 2, 0);
+
+ phy_data->cur_mode = XGBE_MODE_KX_2500;
+
+@@ -1928,23 +1843,11 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int s0;
+
+ xgbe_phy_set_redrv_mode(pdata);
+
+- xgbe_phy_start_ratechange(pdata);
+-
+ /* 1G/KX */
+- s0 = 0;
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 1);
+- XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+-
+- /* Call FW to make the change */
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+- XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+- XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+-
+- xgbe_phy_complete_ratechange(pdata);
++ xgbe_phy_perform_ratechange(pdata, 1, 3);
+
+ phy_data->cur_mode = XGBE_MODE_KX_1000;
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2939-amd-xgbe-Fix-SFP-PHY-supported-advertised-settings.patch b/meta-v1000/recipes-kernel/linux/files/2939-amd-xgbe-Fix-SFP-PHY-supported-advertised-settings.patch
new file mode 100644
index 00000000..acd00d16
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2939-amd-xgbe-Fix-SFP-PHY-supported-advertised-settings.patch
@@ -0,0 +1,170 @@
+From fcd07e29f1434a114feb35581b3ce0264965bbbc Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:28:59 +0530
+Subject: [PATCH] amd-xgbe: Fix SFP PHY supported/advertised settings
+
+When using SFPs, the supported and advertised settings should be initially
+based on the SFP that has been detected. The code currently indicates the
+overall support of the device as opposed to what the SFP is capable of.
+Update the code to change the supported link modes, auto-negotiation, etc.
+to be based on the installed SFP.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 69 ++++++++++++++++++++---------
+ 1 file changed, 47 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 0429840..756e116 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -711,23 +711,39 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
++ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
++ return;
++
++ pdata->phy.supported &= ~SUPPORTED_Autoneg;
++ pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
++ pdata->phy.supported &= ~SUPPORTED_TP;
++ pdata->phy.supported &= ~SUPPORTED_FIBRE;
++ pdata->phy.supported &= ~SUPPORTED_100baseT_Full;
++ pdata->phy.supported &= ~SUPPORTED_1000baseT_Full;
++ pdata->phy.supported &= ~SUPPORTED_10000baseT_Full;
++
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
++ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
++
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ pdata->phy.supported |= SUPPORTED_TP;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++
+ pdata->phy.advertising = pdata->phy.supported;
+
+ return;
+ }
+
+- pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+- pdata->phy.advertising &= ~ADVERTISED_TP;
+- pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+- pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+- pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+- pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+- pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+-
+ switch (phy_data->sfp_base) {
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_SX:
+@@ -736,17 +752,25 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+- pdata->phy.advertising |= ADVERTISED_Autoneg;
++ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
++ pdata->phy.supported |= SUPPORTED_Autoneg;
++ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+ case XGBE_SFP_BASE_10000_LRM:
+ case XGBE_SFP_BASE_10000_ER:
+ case XGBE_SFP_BASE_10000_CR:
+- default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
++ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
++ break;
++ default:
++ pdata->phy.speed = SPEED_UNKNOWN;
++ pdata->phy.duplex = DUPLEX_UNKNOWN;
++ pdata->phy.autoneg = AUTONEG_DISABLE;
++ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+@@ -754,36 +778,38 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_CX:
+ case XGBE_SFP_BASE_10000_CR:
+- pdata->phy.advertising |= ADVERTISED_TP;
++ pdata->phy.supported |= SUPPORTED_TP;
+ break;
+ default:
+- pdata->phy.advertising |= ADVERTISED_FIBRE;
++ pdata->phy.supported |= SUPPORTED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case XGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ break;
+ case XGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ break;
+ case XGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+- pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
++ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
++ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+- pdata->phy.advertising |= ADVERTISED_100baseT_Full;
++ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ }
++
++ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+@@ -2113,6 +2139,8 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case XGBE_MODE_SFI:
++ if (phy_data->sfp_mod_absent)
++ return true;
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+@@ -2916,9 +2944,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = XGBE_MODE_SFI;
+- if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+- pdata->phy.supported |=
+- SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2940-amd-xgbe-Use-the-proper-register-during-PTP.patch b/meta-v1000/recipes-kernel/linux/files/2940-amd-xgbe-Use-the-proper-register-during-PTP.patch
new file mode 100644
index 00000000..e5cb744a
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2940-amd-xgbe-Use-the-proper-register-during-PTP.patch
@@ -0,0 +1,36 @@
+From 600f18132fb5958d3762c3df8a91156ba072ed66 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:30:01 +0530
+Subject: [PATCH] amd-xgbe: Use the proper register during PTP initialization
+
+During PTP initialization, the Timestamp Control register should be
+cleared and not the Tx Configuration register. While this typo causes
+the wrong register to be cleared, the default value of each register and
+and the fact that the Tx Configuration register is programmed afterwards
+doesn't result in a bug, hence only fixing in net-next.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-ptp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+index a533a6c..d06d260 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
+@@ -267,7 +267,7 @@ void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+ ktime_to_ns(ktime_get_real()));
+
+ /* Disable all timestamping to start */
+- XGMAC_IOWRITE(pdata, MAC_TCR, 0);
++ XGMAC_IOWRITE(pdata, MAC_TSCR, 0);
+ pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2941-amd-xgbe-Add-a-check-for-an-skb-in-the-timestamp.patch b/meta-v1000/recipes-kernel/linux/files/2941-amd-xgbe-Add-a-check-for-an-skb-in-the-timestamp.patch
new file mode 100644
index 00000000..1d9c3dbb
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2941-amd-xgbe-Add-a-check-for-an-skb-in-the-timestamp.patch
@@ -0,0 +1,47 @@
+From b911ee90cb9d593f340643c88e97c1a592fc908e Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:31:19 +0530
+Subject: [PATCH] amd-xgbe: Add a check for an skb in the timestamp path
+
+Spurious Tx timestamp interrupts can cause an oops in the Tx timestamp
+processing function if a Tx timestamp skb is NULL. Add a check to insure
+a Tx timestamp skb is present before attempting to use it.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 824fcec..78064ad 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1211,6 +1211,10 @@ static void xgbe_tx_tstamp(struct work_struct *work)
+ u64 nsec;
+ unsigned long flags;
+
++ spin_lock_irqsave(&pdata->tstamp_lock, flags);
++ if (!pdata->tx_tstamp_skb)
++ goto unlock;
++
+ if (pdata->tx_tstamp) {
+ nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+ pdata->tx_tstamp);
+@@ -1222,8 +1226,9 @@ static void xgbe_tx_tstamp(struct work_struct *work)
+
+ dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+- spin_lock_irqsave(&pdata->tstamp_lock, flags);
+ pdata->tx_tstamp_skb = NULL;
++
++unlock:
+ spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2942-amd-xgbe-Prevent-looping-forever-if-timestamp.patch b/meta-v1000/recipes-kernel/linux/files/2942-amd-xgbe-Prevent-looping-forever-if-timestamp.patch
new file mode 100644
index 00000000..62b4dc82
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2942-amd-xgbe-Prevent-looping-forever-if-timestamp.patch
@@ -0,0 +1,65 @@
+From 21f25131ff1200b7bac486e00150e6a988208b11 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:32:39 +0530
+Subject: [PATCH] amd-xgbe: Prevent looping forever if timestamp update fails
+
+Just to be on the safe side, should the update of the timestamp registers
+not complete, issue a warning rather than looping forever waiting for the
+update to complete.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 24a687c..3ad4036 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1497,26 +1497,37 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+ static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+ unsigned int addend)
+ {
++ unsigned int count = 10000;
++
+ /* Set the addend register value and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+ /* Wait for addend update to complete */
+- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
++ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+ udelay(5);
++
++ if (!count)
++ netdev_err(pdata->netdev,
++ "timed out updating timestamp addend register\n");
+ }
+
+ static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+ unsigned int nsec)
+ {
++ unsigned int count = 10000;
++
+ /* Set the time values and tell the device */
+ XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+ XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+ XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+ /* Wait for time update to complete */
+- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
++ while (--count && XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+ udelay(5);
++
++ if (!count)
++ netdev_err(pdata->netdev, "timed out initializing timestamp\n");
+ }
+
+ static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2943-amd-xgbe-Handle-return-code-from-software-reset.patch b/meta-v1000/recipes-kernel/linux/files/2943-amd-xgbe-Handle-return-code-from-software-reset.patch
new file mode 100644
index 00000000..87111865
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2943-amd-xgbe-Handle-return-code-from-software-reset.patch
@@ -0,0 +1,38 @@
+From cab7a2e151798a469c545a8e9364821d851eb0c5 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:33:33 +0530
+Subject: [PATCH] amd-xgbe: Handle return code from software reset function
+
+Currently the function that performs a software reset of the hardware
+provides a return code. During driver probe check this return code and
+exit with an error if the software reset fails.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 17ac8f9..982368b 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -277,7 +277,11 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ pdata->desc_ded_period = jiffies;
+
+ /* Issue software reset to device */
+- pdata->hw_if.exit(pdata);
++ ret = pdata->hw_if.exit(pdata);
++ if (ret) {
++ dev_err(dev, "software reset failed\n");
++ return ret;
++ }
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2944-amd-xgbe-Fixes-for-working-with-PHYs-that-support.patch b/meta-v1000/recipes-kernel/linux/files/2944-amd-xgbe-Fixes-for-working-with-PHYs-that-support.patch
new file mode 100644
index 00000000..c589558f
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2944-amd-xgbe-Fixes-for-working-with-PHYs-that-support.patch
@@ -0,0 +1,52 @@
+From d7be05f09ff9d64dea057f51a0da1035ad4538dc Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:34:32 +0530
+Subject: [PATCH] amd-xgbe: Fixes for working with PHYs that support 2.5GbE
+
+The driver has some missing functionality when operating in the mode that
+supports 2.5GbE. Fix the driver to fully recognize and support this speed.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 756e116..b8be62e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1966,6 +1966,8 @@ static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data,
+ return XGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return XGBE_MODE_SGMII_1000;
++ case SPEED_2500:
++ return XGBE_MODE_KX_2500;
+ case SPEED_10000:
+ return XGBE_MODE_KR;
+ default:
+@@ -2109,6 +2111,9 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
+ case XGBE_MODE_SGMII_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
++ case XGBE_MODE_KX_2500:
++ return xgbe_phy_check_mode(pdata, mode,
++ ADVERTISED_2500baseX_Full);
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+@@ -2218,6 +2223,8 @@ static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data,
+ case SPEED_100:
+ case SPEED_1000:
+ return true;
++ case SPEED_2500:
++ return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T);
+ case SPEED_10000:
+ return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T);
+ default:
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2945-amd-xgbe-Limit-the-I2C-error-messages-that-are.patch b/meta-v1000/recipes-kernel/linux/files/2945-amd-xgbe-Limit-the-I2C-error-messages-that-are.patch
new file mode 100644
index 00000000..8cefa0d1
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2945-amd-xgbe-Limit-the-I2C-error-messages-that-are.patch
@@ -0,0 +1,55 @@
+From 75d8b8c3686700c1e22534b9c65472cf5539683c Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:35:51 +0530
+Subject: [PATCH] amd-xgbe: Limit the I2C error messages that are output
+
+When I2C communication fails, it tends to always fail. Rather than
+continuously issue an error message (once per second in most cases),
+change the message to be issued just once.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index b8be62e..04b5c14 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1121,7 +1121,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+
+ ret = xgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+- netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
++ dev_err_once(pdata->dev, "%s: I2C error setting SFP MUX\n",
++ netdev_name(pdata->netdev));
+ return ret;
+ }
+
+@@ -1131,7 +1132,8 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+- netdev_err(pdata->netdev, "I2C error reading SFP EEPROM\n");
++ dev_err_once(pdata->dev, "%s: I2C error reading SFP EEPROM\n",
++ netdev_name(pdata->netdev));
+ goto put;
+ }
+
+@@ -1190,7 +1192,8 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata)
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+- netdev_err(pdata->netdev, "I2C error reading SFP GPIOs\n");
++ dev_err_once(pdata->dev, "%s: I2C error reading SFP GPIOs\n",
++ netdev_name(pdata->netdev));
+ return;
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2946-amd-xgbe-Re-issue-interrupt-if-interrupt-status.patch b/meta-v1000/recipes-kernel/linux/files/2946-amd-xgbe-Re-issue-interrupt-if-interrupt-status.patch
new file mode 100644
index 00000000..9d6deb85
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2946-amd-xgbe-Re-issue-interrupt-if-interrupt-status.patch
@@ -0,0 +1,368 @@
+From 86e1b3ca7c8081261eb18f7b40804da2d1a45f57 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:36:50 +0530
+Subject: [PATCH] amd-xgbe: Re-issue interrupt if interrupt status not cleared
+
+Some of the device interrupts should function as level interrupts. For
+some hardware configurations this requires setting some control bits
+so that if the interrupt status has not been cleared the interrupt
+should be reissued.
+
+Additionally, when using MSI or MSI-X interrupts, run the interrupt
+service routine as a tasklet so that the re-issuance of the interrupt
+is handled properly.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 1 +
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 53 +++++++++++++++++++++++++----
+ drivers/net/ethernet/amd/xgbe/xgbe-i2c.c | 30 +++++++++++++---
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 33 ++++++++++++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 4 +++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 11 ++++--
+ 6 files changed, 115 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 127adbe..e7b6804 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -959,6 +959,7 @@
+ #define XP_DRIVER_INT_RO 0x0064
+ #define XP_DRIVER_SCRATCH_0 0x0068
+ #define XP_DRIVER_SCRATCH_1 0x006c
++#define XP_INT_REISSUE_EN 0x0074
+ #define XP_INT_EN 0x0078
+ #define XP_I2C_MUTEX 0x0080
+ #define XP_MDIO_MUTEX 0x0084
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 78064ad..0c0d6ed 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -381,9 +381,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
+ return false;
+ }
+
+-static irqreturn_t xgbe_ecc_isr(int irq, void *data)
++static void xgbe_ecc_isr_task(unsigned long data)
+ {
+- struct xgbe_prv_data *pdata = data;
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ unsigned int ecc_isr;
+ bool stop = false;
+
+@@ -434,12 +434,26 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+ /* Clear all ECC interrupts */
+ XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
+
+- return IRQ_HANDLED;
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support)
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
+ }
+
+-static irqreturn_t xgbe_isr(int irq, void *data)
++static irqreturn_t xgbe_ecc_isr(int irq, void *data)
+ {
+ struct xgbe_prv_data *pdata = data;
++
++ if (pdata->isr_as_tasklet)
++ tasklet_schedule(&pdata->tasklet_ecc);
++ else
++ xgbe_ecc_isr_task((unsigned long)pdata);
++
++ return IRQ_HANDLED;
++}
++
++static void xgbe_isr_task(unsigned long data)
++{
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+@@ -542,15 +556,36 @@ static irqreturn_t xgbe_isr(int irq, void *data)
+ isr_done:
+ /* If there is not a separate AN irq, handle it here */
+ if (pdata->dev_irq == pdata->an_irq)
+- pdata->phy_if.an_isr(irq, pdata);
++ pdata->phy_if.an_isr(pdata);
+
+ /* If there is not a separate ECC irq, handle it here */
+ if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
+- xgbe_ecc_isr(irq, pdata);
++ xgbe_ecc_isr_task((unsigned long)pdata);
+
+ /* If there is not a separate I2C irq, handle it here */
+ if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
+- pdata->i2c_if.i2c_isr(irq, pdata);
++ pdata->i2c_if.i2c_isr(pdata);
++
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support) {
++ unsigned int reissue_mask;
++
++ reissue_mask = 1 << 0;
++ if (!pdata->per_channel_irq)
++ reissue_mask |= 0xffff < 4;
++
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
++ }
++}
++
++static irqreturn_t xgbe_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = data;
++
++ if (pdata->isr_as_tasklet)
++ tasklet_schedule(&pdata->tasklet_dev);
++ else
++ xgbe_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
+ }
+@@ -825,6 +860,10 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ unsigned int i;
+ int ret;
+
++ tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
++ tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
++ (unsigned long)pdata);
++
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+index 0c7088a..dc74341 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+@@ -273,13 +273,16 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+ }
+
+-static irqreturn_t xgbe_i2c_isr(int irq, void *data)
++static void xgbe_i2c_isr_task(unsigned long data)
+ {
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
++ if (!isr)
++ goto reissue_check;
++
+ netif_dbg(pdata, intr, pdata->netdev,
+ "I2C interrupt received: status=%#010x\n", isr);
+
+@@ -307,6 +310,21 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ complete(&pdata->i2c_complete);
+
++reissue_check:
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support)
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2);
++}
++
++static irqreturn_t xgbe_i2c_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++
++ if (pdata->isr_as_tasklet)
++ tasklet_schedule(&pdata->tasklet_i2c);
++ else
++ xgbe_i2c_isr_task((unsigned long)pdata);
++
+ return IRQ_HANDLED;
+ }
+
+@@ -348,12 +366,11 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+ }
+
+-static irqreturn_t xgbe_i2c_combined_isr(int irq, struct xgbe_prv_data *pdata)
++static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
+ {
+- if (!XI2C_IOREAD(pdata, IC_RAW_INTR_STAT))
+- return IRQ_HANDLED;
++ xgbe_i2c_isr_task((unsigned long)pdata);
+
+- return xgbe_i2c_isr(irq, pdata);
++ return IRQ_HANDLED;
+ }
+
+ static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op)
+@@ -444,6 +461,9 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
+
+ /* If we have a separate I2C irq, enable it */
+ if (pdata->dev_irq != pdata->i2c_irq) {
++ tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
++ (unsigned long)pdata);
++
+ ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
+ xgbe_i2c_isr, 0, pdata->i2c_name,
+ pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 4c5b90e..d0b9950 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -664,6 +664,10 @@ static void xgbe_an37_isr(struct xgbe_prv_data *pdata)
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an37_enable_interrupts(pdata);
++
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support)
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+ }
+
+@@ -683,10 +687,14 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
+ } else {
+ /* Enable AN interrupts */
+ xgbe_an73_enable_interrupts(pdata);
++
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support)
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
+ }
+ }
+
+-static irqreturn_t xgbe_an_isr(int irq, void *data)
++static void xgbe_an_isr_task(unsigned long data)
+ {
+ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+
+@@ -704,13 +712,25 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
+ default:
+ break;
+ }
++}
++
++static irqreturn_t xgbe_an_isr(int irq, void *data)
++{
++ struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++
++ if (pdata->isr_as_tasklet)
++ tasklet_schedule(&pdata->tasklet_an);
++ else
++ xgbe_an_isr_task((unsigned long)pdata);
+
+ return IRQ_HANDLED;
+ }
+
+-static irqreturn_t xgbe_an_combined_isr(int irq, struct xgbe_prv_data *pdata)
++static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
+ {
+- return xgbe_an_isr(irq, pdata);
++ xgbe_an_isr_task((unsigned long)pdata);
++
++ return IRQ_HANDLED;
+ }
+
+ static void xgbe_an_irq_work(struct work_struct *work)
+@@ -914,6 +934,10 @@ static void xgbe_an_state_machine(struct work_struct *work)
+ break;
+ }
+
++ /* Reissue interrupt if status is not clear */
++ if (pdata->vdata->irq_reissue_support)
++ XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3);
++
+ mutex_unlock(&pdata->an_mutex);
+ }
+
+@@ -1378,6 +1402,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
+
+ /* If we have a separate AN irq, enable it */
+ if (pdata->dev_irq != pdata->an_irq) {
++ tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
++ (unsigned long)pdata);
++
+ ret = devm_request_irq(pdata->dev, pdata->an_irq,
+ xgbe_an_isr, 0, pdata->an_name,
+ pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 38392a5..f0c2e88 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -139,6 +139,7 @@ static int xgbe_config_multi_msi(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
++ pdata->isr_as_tasklet = 1;
+ pdata->irq_count = ret;
+
+ pdata->dev_irq = pci_irq_vector(pdata->pcidev, 0);
+@@ -175,6 +176,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
++ pdata->isr_as_tasklet = pdata->pcidev->msi_enabled ? 1 : 0;
+ pdata->irq_count = 1;
+ pdata->channel_irq_count = 1;
+
+@@ -445,6 +447,7 @@ static const struct xgbe_version_data xgbe_v2a = {
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
++ .irq_reissue_support = 1,
+ };
+
+ static const struct xgbe_version_data xgbe_v2b = {
+@@ -456,6 +459,7 @@ static const struct xgbe_version_data xgbe_v2b = {
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
++ .irq_reissue_support = 1,
+ };
+
+ static const struct pci_device_id xgbe_pci_table[] = {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index f9a2463..2834961 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -837,7 +837,7 @@ struct xgbe_phy_if {
+ bool (*phy_valid_speed)(struct xgbe_prv_data *, int);
+
+ /* For single interrupt support */
+- irqreturn_t (*an_isr)(int, struct xgbe_prv_data *);
++ irqreturn_t (*an_isr)(struct xgbe_prv_data *);
+
+ /* PHY implementation specific services */
+ struct xgbe_phy_impl_if phy_impl;
+@@ -855,7 +855,7 @@ struct xgbe_i2c_if {
+ int (*i2c_xfer)(struct xgbe_prv_data *, struct xgbe_i2c_op *);
+
+ /* For single interrupt support */
+- irqreturn_t (*i2c_isr)(int, struct xgbe_prv_data *);
++ irqreturn_t (*i2c_isr)(struct xgbe_prv_data *);
+ };
+
+ struct xgbe_desc_if {
+@@ -924,6 +924,7 @@ struct xgbe_version_data {
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
++ unsigned int irq_reissue_support;
+ };
+
+ struct xgbe_prv_data {
+@@ -1159,6 +1160,12 @@ struct xgbe_prv_data {
+
+ unsigned int lpm_ctrl; /* CTRL1 for resume */
+
++ unsigned int isr_as_tasklet;
++ struct tasklet_struct tasklet_dev;
++ struct tasklet_struct tasklet_ecc;
++ struct tasklet_struct tasklet_i2c;
++ struct tasklet_struct tasklet_an;
++
+ #ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2947-amd-xgbe-Add-NUMA-affinity-support-for-memory.patch b/meta-v1000/recipes-kernel/linux/files/2947-amd-xgbe-Add-NUMA-affinity-support-for-memory.patch
new file mode 100644
index 00000000..cef4874b
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2947-amd-xgbe-Add-NUMA-affinity-support-for-memory.patch
@@ -0,0 +1,933 @@
+From 4a1b9b51cd2a34e8e4a9fdc3aa9b36e2a7e3117b Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:37:52 +0530
+Subject: [PATCH] amd-xgbe: Add NUMA affinity support for memory allocations
+
+Add support to perform memory allocations on the node of the device. The
+original allocation or the ring structure and Tx/Rx queues allocated all
+of the memory at once and then carved it up for each channel and queue.
+To best ensure that we get as much memory from the NUMA node as we can,
+break the channel and ring allocations into individual allocations.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 94 +++++++++++-----
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 135 +++++++++--------------
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 177 ++++++++++++++++--------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +-
+ 4 files changed, 217 insertions(+), 194 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index 0a98c36..45d9230 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -176,8 +176,8 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+@@ -185,34 +185,60 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+ DBGPR("<--xgbe_free_ring_resources\n");
+ }
+
++static void *xgbe_alloc_node(size_t size, int node)
++{
++ void *mem;
++
++ mem = kzalloc_node(size, GFP_KERNEL, node);
++ if (!mem)
++ mem = kzalloc(size, GFP_KERNEL);
++
++ return mem;
++}
++
++static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
++ dma_addr_t *dma, int node)
++{
++ void *mem;
++ int cur_node = dev_to_node(dev);
++
++ set_dev_node(dev, node);
++ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
++ set_dev_node(dev, cur_node);
++
++ if (!mem)
++ mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
++
++ return mem;
++}
++
+ static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+ {
+- DBGPR("-->xgbe_init_ring\n");
++ size_t size;
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
++ size = rdesc_count * sizeof(struct xgbe_ring_desc);
++
+ ring->rdesc_count = rdesc_count;
+- ring->rdesc = dma_alloc_coherent(pdata->dev,
+- (sizeof(struct xgbe_ring_desc) *
+- rdesc_count), &ring->rdesc_dma,
+- GFP_KERNEL);
++ ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
++ ring->node);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+- ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+- GFP_KERNEL);
++ size = rdesc_count * sizeof(struct xgbe_ring_data);
++
++ ring->rdata = xgbe_alloc_node(size, ring->node);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ netif_dbg(pdata, drv, pdata->netdev,
+- "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
+- ring->rdesc, &ring->rdesc_dma, ring->rdata);
+-
+- DBGPR("<--xgbe_init_ring\n");
++ "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
++ ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
+
+ return 0;
+ }
+@@ -223,10 +249,8 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+ unsigned int i;
+ int ret;
+
+- DBGPR("-->xgbe_alloc_ring_resources\n");
+-
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
+ channel->name);
+
+@@ -250,8 +274,6 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+ }
+ }
+
+- DBGPR("<--xgbe_alloc_ring_resources\n");
+-
+ return 0;
+
+ err_ring:
+@@ -261,21 +283,33 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+ }
+
+ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+- struct xgbe_page_alloc *pa, gfp_t gfp, int order)
++ struct xgbe_page_alloc *pa, int alloc_order,
++ int node)
+ {
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+- int ret;
++ gfp_t gfp;
++ int order, ret;
++
++again:
++ order = alloc_order;
+
+ /* Try to obtain pages, decreasing order if necessary */
+- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
++ gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+- pages = alloc_pages(gfp, order);
++ pages = alloc_pages_node(node, gfp, order);
+ if (pages)
+ break;
+
+ order--;
+ }
++
++ /* If we couldn't get local pages, try getting from anywhere */
++ if (!pages && (node != NUMA_NO_NODE)) {
++ node = NUMA_NO_NODE;
++ goto again;
++ }
++
+ if (!pages)
+ return -ENOMEM;
+
+@@ -327,14 +361,14 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
+ int ret;
+
+ if (!ring->rx_hdr_pa.pages) {
+- ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
++ ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
+ if (ret)
+ return ret;
+ }
+
+ if (!ring->rx_buf_pa.pages) {
+- ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
+- PAGE_ALLOC_COSTLY_ORDER);
++ ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
++ PAGE_ALLOC_COSTLY_ORDER, ring->node);
+ if (ret)
+ return ret;
+ }
+@@ -362,8 +396,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+@@ -403,8 +437,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 3ad4036..b05393f 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -176,12 +176,10 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+
+ static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
++ for (i = 0; i < pdata->channel_count; i++)
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+@@ -189,20 +187,18 @@ static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+
+ static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+ {
+- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
++ return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_TCR, PBL);
+ }
+
+ static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+@@ -211,20 +207,18 @@ static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+
+ static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+ {
+- return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
++ return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_RCR, PBL);
+ }
+
+ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+@@ -233,15 +227,13 @@ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+
+ static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+@@ -292,15 +284,13 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+
+ static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+@@ -314,44 +304,38 @@ static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+
+ static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+ }
+
+ static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
+ }
+ }
+
+ static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
+ }
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
+@@ -651,8 +635,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+ pdata->channel_irq_mode);
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
++
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+@@ -3213,16 +3198,14 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
+
+ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+@@ -3236,7 +3219,6 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+
+ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+@@ -3251,12 +3233,11 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
+ }
+ }
+
+@@ -3288,16 +3269,14 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
+
+ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+@@ -3315,7 +3294,6 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+
+ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+@@ -3332,27 +3310,24 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
+ }
+ }
+
+ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+@@ -3361,7 +3336,6 @@ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+
+ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Prepare for Tx DMA channel stop */
+@@ -3372,42 +3346,37 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->tx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->tx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
+ }
+ }
+
+ static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
+ }
+ }
+
+ static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- if (!channel->rx_ring)
++ for (i = 0; i < pdata->channel_count; i++) {
++ if (!pdata->channel[i]->rx_ring)
+ break;
+
+- XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 0c0d6ed..73e513f 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -157,81 +157,100 @@ static int xgbe_one_poll(struct napi_struct *, int);
+ static int xgbe_all_poll(struct napi_struct *, int);
+ static void xgbe_stop(struct xgbe_prv_data *);
+
+-static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
++static void *xgbe_alloc_node(size_t size, int node)
+ {
+- struct xgbe_channel *channel_mem, *channel;
+- struct xgbe_ring *tx_ring, *rx_ring;
+- unsigned int count, i;
+- int ret = -ENOMEM;
++ void *mem;
+
+- count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
++ mem = kzalloc_node(size, GFP_KERNEL, node);
++ if (!mem)
++ mem = kzalloc(size, GFP_KERNEL);
++
++ return mem;
++}
++
++static void xgbe_free_channels(struct xgbe_prv_data *pdata)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
++ if (!pdata->channel[i])
++ continue;
++
++ kfree(pdata->channel[i]->rx_ring);
++ kfree(pdata->channel[i]->tx_ring);
++ kfree(pdata->channel[i]);
++
++ pdata->channel[i] = NULL;
++ }
+
+- channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
+- if (!channel_mem)
+- goto err_channel;
++ pdata->channel_count = 0;
++}
++
++static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_channel *channel;
++ struct xgbe_ring *ring;
++ unsigned int count, i;
++ int node;
+
+- tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
+- GFP_KERNEL);
+- if (!tx_ring)
+- goto err_tx_ring;
++ node = dev_to_node(pdata->dev);
+
+- rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
+- GFP_KERNEL);
+- if (!rx_ring)
+- goto err_rx_ring;
++ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
++ for (i = 0; i < count; i++) {
++ channel = xgbe_alloc_node(sizeof(*channel), node);
++ if (!channel)
++ goto err_mem;
++ pdata->channel[i] = channel;
+
+- for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
++ channel->node = node;
+
+ if (pdata->per_channel_irq)
+ channel->dma_irq = pdata->channel_irq[i];
+
+ if (i < pdata->tx_ring_count) {
+- spin_lock_init(&tx_ring->lock);
+- channel->tx_ring = tx_ring++;
++ ring = xgbe_alloc_node(sizeof(*ring), node);
++ if (!ring)
++ goto err_mem;
++
++ spin_lock_init(&ring->lock);
++ ring->node = node;
++
++ channel->tx_ring = ring;
+ }
+
+ if (i < pdata->rx_ring_count) {
+- spin_lock_init(&rx_ring->lock);
+- channel->rx_ring = rx_ring++;
++ ring = xgbe_alloc_node(sizeof(*ring), node);
++ if (!ring)
++ goto err_mem;
++
++ spin_lock_init(&ring->lock);
++ ring->node = node;
++
++ channel->rx_ring = ring;
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev,
++ "%s: node=%d\n", channel->name, node);
++
++ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+ channel->name, channel->dma_regs, channel->dma_irq,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+- pdata->channel = channel_mem;
+ pdata->channel_count = count;
+
+ return 0;
+
+-err_rx_ring:
+- kfree(tx_ring);
+-
+-err_tx_ring:
+- kfree(channel_mem);
+-
+-err_channel:
+- return ret;
+-}
+-
+-static void xgbe_free_channels(struct xgbe_prv_data *pdata)
+-{
+- if (!pdata->channel)
+- return;
+-
+- kfree(pdata->channel->rx_ring);
+- kfree(pdata->channel->tx_ring);
+- kfree(pdata->channel);
++err_mem:
++ xgbe_free_channels(pdata);
+
+- pdata->channel = NULL;
+- pdata->channel_count = 0;
++ return -ENOMEM;
+ }
+
+ static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+@@ -300,12 +319,10 @@ static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
+
+ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
+- xgbe_enable_rx_tx_int(pdata, channel);
++ for (i = 0; i < pdata->channel_count; i++)
++ xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
+ }
+
+ static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
+@@ -328,12 +345,10 @@ static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
+
+ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_channel *channel;
+ unsigned int i;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
+- xgbe_disable_rx_tx_int(pdata, channel);
++ for (i = 0; i < pdata->channel_count; i++)
++ xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
+ }
+
+ static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
+@@ -474,7 +489,7 @@ static void xgbe_isr_task(unsigned long data)
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+- channel = pdata->channel + i;
++ channel = pdata->channel[i];
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
+@@ -674,8 +689,8 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+ setup_timer(&pdata->service_timer, xgbe_service_timer,
+ (unsigned long)pdata);
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ break;
+
+@@ -696,8 +711,8 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
+
+ del_timer_sync(&pdata->service_timer);
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ break;
+
+@@ -815,8 +830,8 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ if (add)
+ netif_napi_add(pdata->netdev, &channel->napi,
+ xgbe_one_poll, NAPI_POLL_WEIGHT);
+@@ -838,8 +853,8 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
+ unsigned int i;
+
+ if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ napi_disable(&channel->napi);
+
+ if (del)
+@@ -885,8 +900,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ if (!pdata->per_channel_irq)
+ return 0;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ snprintf(channel->dma_irq_name,
+ sizeof(channel->dma_irq_name) - 1,
+ "%s-TxRx-%u", netdev_name(netdev),
+@@ -906,8 +921,11 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+
+ err_dma_irq:
+ /* Using an unsigned int, 'i' will go to UINT_MAX and exit */
+- for (i--, channel--; i < pdata->channel_count; i--, channel--)
++ for (i--; i < pdata->channel_count; i--) {
++ channel = pdata->channel[i];
++
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
++ }
+
+ if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
+ devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
+@@ -931,9 +949,10 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+ if (!pdata->per_channel_irq)
+ return;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
++ }
+ }
+
+ void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+@@ -968,16 +987,14 @@ void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+- struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_data\n");
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- ring = channel->tx_ring;
++ for (i = 0; i < pdata->channel_count; i++) {
++ ring = pdata->channel[i]->tx_ring;
+ if (!ring)
+ break;
+
+@@ -993,16 +1010,14 @@ static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
+ static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+- struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_data\n");
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
+- ring = channel->rx_ring;
++ for (i = 0; i < pdata->channel_count; i++) {
++ ring = pdata->channel[i]->rx_ring;
+ if (!ring)
+ break;
+
+@@ -1178,8 +1193,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+
+ hw_if->exit(pdata);
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ continue;
+
+@@ -1665,7 +1680,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+- channel = pdata->channel + skb->queue_mapping;
++ channel = pdata->channel[skb->queue_mapping];
+ txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+@@ -1877,9 +1892,10 @@ static void xgbe_poll_controller(struct net_device *netdev)
+ DBGPR("-->xgbe_poll_controller\n");
+
+ if (pdata->per_channel_irq) {
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++)
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
+ xgbe_dma_isr(channel->dma_irq, channel);
++ }
+ } else {
+ disable_irq(pdata->dev_irq);
+ xgbe_isr(pdata->dev_irq, pdata);
+@@ -2370,8 +2386,9 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
+ do {
+ last_processed = processed;
+
+- channel = pdata->channel;
+- for (i = 0; i < pdata->channel_count; i++, channel++) {
++ for (i = 0; i < pdata->channel_count; i++) {
++ channel = pdata->channel[i];
++
+ /* Cleanup Tx ring first */
+ xgbe_tx_poll(channel);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 2834961..ac3b558 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -412,6 +412,7 @@ struct xgbe_ring {
+ /* Page allocation for RX buffers */
+ struct xgbe_page_alloc rx_hdr_pa;
+ struct xgbe_page_alloc rx_buf_pa;
++ int node;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+@@ -462,6 +463,8 @@ struct xgbe_channel {
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
++
++ int node;
+ } ____cacheline_aligned;
+
+ enum xgbe_state {
+@@ -1012,7 +1015,7 @@ struct xgbe_prv_data {
+ struct timer_list service_timer;
+
+ /* Rings for Tx/Rx on a DMA channel */
+- struct xgbe_channel *channel;
++ struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS];
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2948-amd-xgbe-Add-NUMA-affinity-support-for-IRQ-hints.patch b/meta-v1000/recipes-kernel/linux/files/2948-amd-xgbe-Add-NUMA-affinity-support-for-IRQ-hints.patch
new file mode 100644
index 00000000..7a48c51a
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2948-amd-xgbe-Add-NUMA-affinity-support-for-IRQ-hints.patch
@@ -0,0 +1,109 @@
+From 6756f7a57caefba382d2e364ef1718a13e92c6f4 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:38:45 +0530
+Subject: [PATCH] amd-xgbe: Add NUMA affinity support for IRQ hints
+
+For IRQ affinity, set the affinity hints for the IRQs to be (initially) on
+the processors corresponding to the NUMA node of the device.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 18 +++++++++++++++---
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 2 ++
+ 2 files changed, 17 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 73e513f..f4e1860 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -191,12 +191,17 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ unsigned int count, i;
++ unsigned int cpu;
+ int node;
+
+- node = dev_to_node(pdata->dev);
+-
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+ for (i = 0; i < count; i++) {
++ /* Attempt to use a CPU on the node the device is on */
++ cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
++
++ /* Set the allocation node based on the returned CPU */
++ node = cpu_to_node(cpu);
++
+ channel = xgbe_alloc_node(sizeof(*channel), node);
+ if (!channel)
+ goto err_mem;
+@@ -208,6 +213,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+ channel->node = node;
++ cpumask_set_cpu(cpu, &channel->affinity_mask);
+
+ if (pdata->per_channel_irq)
+ channel->dma_irq = pdata->channel_irq[i];
+@@ -235,7 +241,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
+ }
+
+ netif_dbg(pdata, drv, pdata->netdev,
+- "%s: node=%d\n", channel->name, node);
++ "%s: cpu=%u, node=%d\n", channel->name, cpu, node);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
+@@ -915,6 +921,9 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ channel->dma_irq);
+ goto err_dma_irq;
+ }
++
++ irq_set_affinity_hint(channel->dma_irq,
++ &channel->affinity_mask);
+ }
+
+ return 0;
+@@ -924,6 +933,7 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ for (i--; i < pdata->channel_count; i--) {
+ channel = pdata->channel[i];
+
++ irq_set_affinity_hint(channel->dma_irq, NULL);
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+
+@@ -951,6 +961,8 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
++
++ irq_set_affinity_hint(channel->dma_irq, NULL);
+ devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ }
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index ac3b558..7b50469 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -128,6 +128,7 @@
+ #include <linux/net_tstamp.h>
+ #include <net/dcbnl.h>
+ #include <linux/completion.h>
++#include <linux/cpumask.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+ #define XGBE_DRV_VERSION "1.0.3"
+@@ -465,6 +466,7 @@ struct xgbe_channel {
+ struct xgbe_ring *rx_ring;
+
+ int node;
++ cpumask_t affinity_mask;
+ } ____cacheline_aligned;
+
+ enum xgbe_state {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2949-amd-xgbe-Prepare-for-more-fine-grained-cache.patch b/meta-v1000/recipes-kernel/linux/files/2949-amd-xgbe-Prepare-for-more-fine-grained-cache.patch
new file mode 100644
index 00000000..3ac44e0b
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2949-amd-xgbe-Prepare-for-more-fine-grained-cache.patch
@@ -0,0 +1,172 @@
+From 00e22a0f2799e5543344719a3d94dd9054c9b1e6 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:39:40 +0530
+Subject: [PATCH] amd-xgbe: Prepare for more fine grained cache coherency
+ controls
+
+In prep for setting fine grained read and write DMA cache coherency
+controls, allow specific values to be used to set the cache coherency
+registers.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 28 ---------------------------
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 23 ++--------------------
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 5 ++---
+ drivers/net/ethernet/amd/xgbe/xgbe-platform.c | 10 ++++------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 15 ++++++--------
+ 5 files changed, 14 insertions(+), 67 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index e7b6804..dc09883 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -127,34 +127,6 @@
+ #define DMA_DSR1 0x3024
+
+ /* DMA register entry bit positions and sizes */
+-#define DMA_AXIARCR_DRC_INDEX 0
+-#define DMA_AXIARCR_DRC_WIDTH 4
+-#define DMA_AXIARCR_DRD_INDEX 4
+-#define DMA_AXIARCR_DRD_WIDTH 2
+-#define DMA_AXIARCR_TEC_INDEX 8
+-#define DMA_AXIARCR_TEC_WIDTH 4
+-#define DMA_AXIARCR_TED_INDEX 12
+-#define DMA_AXIARCR_TED_WIDTH 2
+-#define DMA_AXIARCR_THC_INDEX 16
+-#define DMA_AXIARCR_THC_WIDTH 4
+-#define DMA_AXIARCR_THD_INDEX 20
+-#define DMA_AXIARCR_THD_WIDTH 2
+-#define DMA_AXIAWCR_DWC_INDEX 0
+-#define DMA_AXIAWCR_DWC_WIDTH 4
+-#define DMA_AXIAWCR_DWD_INDEX 4
+-#define DMA_AXIAWCR_DWD_WIDTH 2
+-#define DMA_AXIAWCR_RPC_INDEX 8
+-#define DMA_AXIAWCR_RPC_WIDTH 4
+-#define DMA_AXIAWCR_RPD_INDEX 12
+-#define DMA_AXIAWCR_RPD_WIDTH 2
+-#define DMA_AXIAWCR_RHC_INDEX 16
+-#define DMA_AXIAWCR_RHC_WIDTH 4
+-#define DMA_AXIAWCR_RHD_INDEX 20
+-#define DMA_AXIAWCR_RHD_WIDTH 2
+-#define DMA_AXIAWCR_TDC_INDEX 24
+-#define DMA_AXIAWCR_TDC_WIDTH 4
+-#define DMA_AXIAWCR_TDD_INDEX 28
+-#define DMA_AXIAWCR_TDD_WIDTH 2
+ #define DMA_ISR_MACIS_INDEX 17
+ #define DMA_ISR_MACIS_WIDTH 1
+ #define DMA_ISR_MTLIS_INDEX 16
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index b05393f..98da249 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -2146,27 +2146,8 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+
+ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+ {
+- unsigned int arcache, awcache;
+-
+- arcache = 0;
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+- XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
+- XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+-
+- awcache = 0;
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+- XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
+- XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
++ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
++ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
+ }
+
+ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index f0c2e88..1e73768 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -327,9 +327,8 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+- pdata->arcache = XGBE_DMA_OS_ARCACHE;
+- pdata->awcache = XGBE_DMA_OS_AWCACHE;
++ pdata->arcr = XGBE_DMA_OS_ARCR;
++ pdata->awcr = XGBE_DMA_OS_AWCR;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+index 84d4c51..d0f3dfb 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+@@ -448,13 +448,11 @@ static int xgbe_platform_probe(struct platform_device *pdev)
+ }
+ pdata->coherent = (attr == DEV_DMA_COHERENT);
+ if (pdata->coherent) {
+- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+- pdata->arcache = XGBE_DMA_OS_ARCACHE;
+- pdata->awcache = XGBE_DMA_OS_AWCACHE;
++ pdata->arcr = XGBE_DMA_OS_ARCR;
++ pdata->awcr = XGBE_DMA_OS_AWCR;
+ } else {
+- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
++ pdata->arcr = XGBE_DMA_SYS_ARCR;
++ pdata->awcr = XGBE_DMA_SYS_AWCR;
+ }
+
+ /* Set the maximum fifo amounts */
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 7b50469..46780aa 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -164,14 +164,12 @@
+ #define XGBE_DMA_STOP_TIMEOUT 1
+
+ /* DMA cache settings - Outer sharable, write-back, write-allocate */
+-#define XGBE_DMA_OS_AXDOMAIN 0x2
+-#define XGBE_DMA_OS_ARCACHE 0xb
+-#define XGBE_DMA_OS_AWCACHE 0xf
++#define XGBE_DMA_OS_ARCR 0x002b2b2b
++#define XGBE_DMA_OS_AWCR 0x2f2f2f2f
+
+ /* DMA cache settings - System, no caches used */
+-#define XGBE_DMA_SYS_AXDOMAIN 0x3
+-#define XGBE_DMA_SYS_ARCACHE 0x0
+-#define XGBE_DMA_SYS_AWCACHE 0x0
++#define XGBE_DMA_SYS_ARCR 0x00303030
++#define XGBE_DMA_SYS_AWCR 0x30303030
+
+ /* DMA channel interrupt modes */
+ #define XGBE_IRQ_MODE_EDGE 0
+@@ -1007,9 +1005,8 @@ struct xgbe_prv_data {
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+- unsigned int axdomain;
+- unsigned int arcache;
+- unsigned int awcache;
++ unsigned int arcr;
++ unsigned int awcr;
+
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2950-amd-xgbe-Simplify-the-burst-length-settings.patch b/meta-v1000/recipes-kernel/linux/files/2950-amd-xgbe-Simplify-the-burst-length-settings.patch
new file mode 100644
index 00000000..f94f72f7
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2950-amd-xgbe-Simplify-the-burst-length-settings.patch
@@ -0,0 +1,223 @@
+From 39d45bab52d136408e202c0eb14512f451ea8e76 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:42:47 +0530
+Subject: [PATCH] amd-xgbe: Simplify the burst length settings
+
+Currently the driver hardcodes the PBLx8 setting. Remove the need for
+specifying the PBLx8 setting and automatically calculate based on the
+specified PBL value. Since the PBLx8 setting applies to both Tx and Rx
+use the same PBL value for both of them.
+
+Also, the driver currently uses a bit field to set the AXI master burst
+len setting. Change to the full bit field range and set the burst length
+based on the specified value.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 11 ++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 67 ++++++++---------------------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 5 +--
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 12 +-----
+ 4 files changed, 31 insertions(+), 64 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index dc09883..6b5c72d 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -137,12 +137,19 @@
+ #define DMA_MR_SWR_WIDTH 1
+ #define DMA_SBMR_EAME_INDEX 11
+ #define DMA_SBMR_EAME_WIDTH 1
+-#define DMA_SBMR_BLEN_256_INDEX 7
+-#define DMA_SBMR_BLEN_256_WIDTH 1
++#define DMA_SBMR_BLEN_INDEX 1
++#define DMA_SBMR_BLEN_WIDTH 7
+ #define DMA_SBMR_UNDEF_INDEX 0
+ #define DMA_SBMR_UNDEF_WIDTH 1
+
+ /* DMA register values */
++#define DMA_SBMR_BLEN_256 256
++#define DMA_SBMR_BLEN_128 128
++#define DMA_SBMR_BLEN_64 64
++#define DMA_SBMR_BLEN_32 32
++#define DMA_SBMR_BLEN_16 16
++#define DMA_SBMR_BLEN_8 8
++#define DMA_SBMR_BLEN_4 4
+ #define DMA_DSR_RPS_WIDTH 4
+ #define DMA_DSR_TPS_WIDTH 4
+ #define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 98da249..a51ece5 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -174,52 +174,30 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ return ret;
+ }
+
+-static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
++static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
+ {
++ unsigned int pblx8, pbl;
+ unsigned int i;
+
+- for (i = 0; i < pdata->channel_count; i++)
+- XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
+- pdata->pblx8);
+-
+- return 0;
+-}
+-
+-static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+-{
+- return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_TCR, PBL);
+-}
+-
+-static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+-{
+- unsigned int i;
+-
+- for (i = 0; i < pdata->channel_count; i++) {
+- if (!pdata->channel[i]->tx_ring)
+- break;
++ pblx8 = DMA_PBL_X8_DISABLE;
++ pbl = pdata->pbl;
+
+- XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, PBL,
+- pdata->tx_pbl);
++ if (pdata->pbl > 32) {
++ pblx8 = DMA_PBL_X8_ENABLE;
++ pbl >>= 3;
+ }
+
+- return 0;
+-}
+-
+-static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+-{
+- return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_RCR, PBL);
+-}
+-
+-static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+-{
+- unsigned int i;
+-
+ for (i = 0; i < pdata->channel_count; i++) {
+- if (!pdata->channel[i]->rx_ring)
+- break;
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
++ pblx8);
++
++ if (pdata->channel[i]->tx_ring)
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
++ PBL, pbl);
+
+- XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, PBL,
+- pdata->rx_pbl);
++ if (pdata->channel[i]->rx_ring)
++ XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
++ PBL, pbl);
+ }
+
+ return 0;
+@@ -2141,7 +2119,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
++ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN, pdata->blen >> 2);
+ }
+
+ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+@@ -3381,9 +3359,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+- xgbe_config_pblx8(pdata);
+- xgbe_config_tx_pbl_val(pdata);
+- xgbe_config_rx_pbl_val(pdata);
++ xgbe_config_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+@@ -3511,13 +3487,6 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+- /* For RX and TX PBL config */
+- hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+- hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+- hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+- hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+- hw_if->config_pblx8 = xgbe_config_pblx8;
+-
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 982368b..8eec9f5 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -140,14 +140,13 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
+ {
+ DBGPR("-->xgbe_default_config\n");
+
+- pdata->pblx8 = DMA_PBL_X8_ENABLE;
++ pdata->blen = DMA_SBMR_BLEN_256;
++ pdata->pbl = DMA_PBL_128;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+- pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+- pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 46780aa..4bf82eb 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -737,13 +737,6 @@ struct xgbe_hw_if {
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+- /* For RX and TX PBL config */
+- int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+- int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+- int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+- int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+- int (*config_pblx8)(struct xgbe_prv_data *);
+-
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+@@ -1029,19 +1022,18 @@ struct xgbe_prv_data {
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+- unsigned int pblx8;
++ unsigned int blen;
++ unsigned int pbl;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+- unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+- unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
+
+ /* Tx coalescing settings */
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2951-amd-xgbe-Adjust-register-settings-to-improve.patch b/meta-v1000/recipes-kernel/linux/files/2951-amd-xgbe-Adjust-register-settings-to-improve.patch
new file mode 100644
index 00000000..c84cec60
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2951-amd-xgbe-Adjust-register-settings-to-improve.patch
@@ -0,0 +1,212 @@
+From f20c2419adafb44013ae66e49a52a00a97f5562d Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:43:47 +0530
+Subject: [PATCH] amd-xgbe: Adjust register settings to improve performance
+
+Add support to change some general performance settings and to provide
+some performance settings based on the device that is probed.
+
+This includes:
+
+- Setting the maximum read/write outstanding request limit
+- Reducing the AXI interface burst length size
+- Selectively setting the Tx and Rx descriptor pre-fetch threshold
+- Selectively setting additional cache coherency controls
+
+Tested and verified on all versions of the hardware.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 13 +++++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 26 +++++++++++++++++++++++---
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 5 ++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 9 +++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 11 +++++++++++
+ 5 files changed, 58 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 6b5c72d..9795419 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -123,8 +123,11 @@
+ #define DMA_ISR 0x3008
+ #define DMA_AXIARCR 0x3010
+ #define DMA_AXIAWCR 0x3018
++#define DMA_AXIAWARCR 0x301c
+ #define DMA_DSR0 0x3020
+ #define DMA_DSR1 0x3024
++#define DMA_TXEDMACR 0x3040
++#define DMA_RXEDMACR 0x3044
+
+ /* DMA register entry bit positions and sizes */
+ #define DMA_ISR_MACIS_INDEX 17
+@@ -135,12 +138,22 @@
+ #define DMA_MR_INTM_WIDTH 2
+ #define DMA_MR_SWR_INDEX 0
+ #define DMA_MR_SWR_WIDTH 1
++#define DMA_RXEDMACR_RDPS_INDEX 0
++#define DMA_RXEDMACR_RDPS_WIDTH 3
++#define DMA_SBMR_AAL_INDEX 12
++#define DMA_SBMR_AAL_WIDTH 1
+ #define DMA_SBMR_EAME_INDEX 11
+ #define DMA_SBMR_EAME_WIDTH 1
+ #define DMA_SBMR_BLEN_INDEX 1
+ #define DMA_SBMR_BLEN_WIDTH 7
++#define DMA_SBMR_RD_OSR_LMT_INDEX 16
++#define DMA_SBMR_RD_OSR_LMT_WIDTH 6
+ #define DMA_SBMR_UNDEF_INDEX 0
+ #define DMA_SBMR_UNDEF_WIDTH 1
++#define DMA_SBMR_WR_OSR_LMT_INDEX 24
++#define DMA_SBMR_WR_OSR_LMT_WIDTH 6
++#define DMA_TXEDMACR_TDPS_INDEX 0
++#define DMA_TXEDMACR_TDPS_WIDTH 3
+
+ /* DMA register values */
+ #define DMA_SBMR_BLEN_256 256
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index a51ece5..06f953e 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -2114,18 +2114,38 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+
+ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+ {
++ unsigned int sbmr;
++
++ sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
++
+ /* Set enhanced addressing mode */
+- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+- XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN, pdata->blen >> 2);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
++ XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
++
++ XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
++
++ /* Set descriptor fetching threshold */
++ if (pdata->vdata->tx_desc_prefetch)
++ XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
++ pdata->vdata->tx_desc_prefetch);
++
++ if (pdata->vdata->rx_desc_prefetch)
++ XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
++ pdata->vdata->rx_desc_prefetch);
+ }
+
+ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+ {
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
++ if (pdata->awarcr)
++ XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
+ }
+
+ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 8eec9f5..500147d 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -140,8 +140,11 @@ static void xgbe_default_config(struct xgbe_prv_data *pdata)
+ {
+ DBGPR("-->xgbe_default_config\n");
+
+- pdata->blen = DMA_SBMR_BLEN_256;
++ pdata->blen = DMA_SBMR_BLEN_64;
+ pdata->pbl = DMA_PBL_128;
++ pdata->aal = 1;
++ pdata->rd_osr_limit = 8;
++ pdata->wr_osr_limit = 8;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 1e73768..1e56ad7 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -327,8 +327,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+- pdata->arcr = XGBE_DMA_OS_ARCR;
+- pdata->awcr = XGBE_DMA_OS_AWCR;
++ pdata->arcr = XGBE_DMA_PCI_ARCR;
++ pdata->awcr = XGBE_DMA_PCI_AWCR;
++ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+@@ -447,6 +448,8 @@ static const struct xgbe_version_data xgbe_v2a = {
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
++ .tx_desc_prefetch = 5,
++ .rx_desc_prefetch = 5,
+ };
+
+ static const struct xgbe_version_data xgbe_v2b = {
+@@ -459,6 +462,8 @@ static const struct xgbe_version_data xgbe_v2b = {
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .irq_reissue_support = 1,
++ .tx_desc_prefetch = 5,
++ .rx_desc_prefetch = 5,
+ };
+
+ static const struct pci_device_id xgbe_pci_table[] = {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 4bf82eb..0938294 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -171,6 +171,11 @@
+ #define XGBE_DMA_SYS_ARCR 0x00303030
+ #define XGBE_DMA_SYS_AWCR 0x30303030
+
++/* DMA cache settings - PCI device */
++#define XGBE_DMA_PCI_ARCR 0x00000003
++#define XGBE_DMA_PCI_AWCR 0x13131313
++#define XGBE_DMA_PCI_AWARCR 0x00000313
++
+ /* DMA channel interrupt modes */
+ #define XGBE_IRQ_MODE_EDGE 0
+ #define XGBE_IRQ_MODE_LEVEL 1
+@@ -921,6 +926,8 @@ struct xgbe_version_data {
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int irq_reissue_support;
++ unsigned int tx_desc_prefetch;
++ unsigned int rx_desc_prefetch;
+ };
+
+ struct xgbe_prv_data {
+@@ -1000,6 +1007,7 @@ struct xgbe_prv_data {
+ unsigned int coherent;
+ unsigned int arcr;
+ unsigned int awcr;
++ unsigned int awarcr;
+
+ /* Service routine support */
+ struct workqueue_struct *dev_workqueue;
+@@ -1024,6 +1032,9 @@ struct xgbe_prv_data {
+ /* Tx/Rx common settings */
+ unsigned int blen;
+ unsigned int pbl;
++ unsigned int aal;
++ unsigned int rd_osr_limit;
++ unsigned int wr_osr_limit;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2952-amd-xgbe-fix-spelling-mistake-avialable.patch b/meta-v1000/recipes-kernel/linux/files/2952-amd-xgbe-fix-spelling-mistake-avialable.patch
new file mode 100644
index 00000000..ef95b78a
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2952-amd-xgbe-fix-spelling-mistake-avialable.patch
@@ -0,0 +1,32 @@
+From e654c3ff5b52282eee52c78d45b8b6b40b90e641 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:44:35 +0530
+Subject: [PATCH] amd-xgbe: fix spelling mistake: "avialable" -> "available"
+
+Trivial fix to spelling mistake in netdev_err message
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 920566a..67a2e52 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -247,7 +247,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
+
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+ netdev_err(netdev,
+- "autoneg disabled, pause autoneg not avialable\n");
++ "autoneg disabled, pause autoneg not available\n");
+ return -EINVAL;
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2953-drivers-net-add-missing-interrupt.h-include.patch b/meta-v1000/recipes-kernel/linux/files/2953-drivers-net-add-missing-interrupt.h-include.patch
new file mode 100644
index 00000000..da623f5c
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2953-drivers-net-add-missing-interrupt.h-include.patch
@@ -0,0 +1,33 @@
+From 8ff0596db8aee10c99a810cd63a9fe8317bdc76f Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:46:19 +0530
+Subject: [PATCH] drivers: net: add missing interrupt.h include
+
+these drivers use tasklets or irq apis, but don't include interrupt.h.
+Once flow cache is removed the implicit interrupt.h inclusion goes away
+which will break the build.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 0938294..e9282c9 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -129,6 +129,7 @@
+ #include <net/dcbnl.h>
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
++#include <linux/interrupt.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+ #define XGBE_DRV_VERSION "1.0.3"
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2954-amd-xgbe-Set-the-MDIO-mode-for-10000Base-T.patch b/meta-v1000/recipes-kernel/linux/files/2954-amd-xgbe-Set-the-MDIO-mode-for-10000Base-T.patch
new file mode 100644
index 00000000..c97b67a2
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2954-amd-xgbe-Set-the-MDIO-mode-for-10000Base-T.patch
@@ -0,0 +1,34 @@
+From eced6434e3271f50d32660d9dcbe14a5a26845da Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:47:11 +0530
+Subject: [PATCH] amd-xgbe: Set the MDIO mode for 10000Base-T configuration
+
+Currently the MDIO mode is set to none for the 10000Base-T, which is
+incorrect. The MDIO mode should for this configuration should be
+clause 45.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 04b5c14..81c45fa 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -2921,7 +2921,7 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+- phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
++ phy_data->phydev_mode = XGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-R support */
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2955-amd-xgbe-Set-the-MII-control-width-for-the-MAC.patch b/meta-v1000/recipes-kernel/linux/files/2955-amd-xgbe-Set-the-MII-control-width-for-the-MAC.patch
new file mode 100644
index 00000000..f3bac883
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2955-amd-xgbe-Set-the-MII-control-width-for-the-MAC.patch
@@ -0,0 +1,47 @@
+From 2a0825d13a4c9016008abb96952e2f5704ba0516 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:47:59 +0530
+Subject: [PATCH] amd-xgbe: Set the MII control width for the MAC interface
+
+When running in SGMII mode at speeds below 1000Mbps, the auto-negotition
+control register must set the MII control width for the MAC interface
+to be 8-bits wide. By default the width is 4-bits.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 1 +
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 9795419..d07edf9 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1339,6 +1339,7 @@
+ #define XGBE_AN_CL37_PCS_MODE_BASEX 0x00
+ #define XGBE_AN_CL37_PCS_MODE_SGMII 0x04
+ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08
++#define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100
+
+ /* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index d0b9950..4528838 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -981,6 +981,8 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+ break;
+ }
+
++ reg |= XGBE_AN_CL37_MII_CTRL_8BIT;
++
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n",
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2956-amd-xgbe-Be-sure-driver-shuts-down-cleanly-on.patch b/meta-v1000/recipes-kernel/linux/files/2956-amd-xgbe-Be-sure-driver-shuts-down-cleanly-on.patch
new file mode 100644
index 00000000..bbd0a1ac
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2956-amd-xgbe-Be-sure-driver-shuts-down-cleanly-on.patch
@@ -0,0 +1,61 @@
+From 7ebe1ed5e5f7b1378cfcce7acf63bb8eb491f1f9 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:48:42 +0530
+Subject: [PATCH] amd-xgbe: Be sure driver shuts down cleanly on module
+ removal
+
+Sometimes when the driver is being unloaded while the devices are still
+up the driver can issue errors. This is based on timing and the double
+invocation of some routines. The phy_exit() call needs to be run after
+the network device has been closed and unregistered from the system.
+Also, the phy_exit() does not need to invoke phy_stop() since that will
+be called as part of the device closing, so remove that call.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 4 ++--
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 2 --
+ 2 files changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 500147d..53a425c 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -458,6 +458,8 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_unregister(pdata);
+
++ unregister_netdev(netdev);
++
+ pdata->phy_if.phy_exit(pdata);
+
+ flush_workqueue(pdata->an_workqueue);
+@@ -465,8 +467,6 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+
+ flush_workqueue(pdata->dev_workqueue);
+ destroy_workqueue(pdata->dev_workqueue);
+-
+- unregister_netdev(netdev);
+ }
+
+ static int __init xgbe_mod_init(void)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 4528838..7953586 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1532,8 +1532,6 @@ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
+
+ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+ {
+- xgbe_phy_stop(pdata);
+-
+ pdata->phy_if.phy_impl.exit(pdata);
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2957-amd-xgbe-Update-TSO-packet-statistics-accuracy.patch b/meta-v1000/recipes-kernel/linux/files/2957-amd-xgbe-Update-TSO-packet-statistics-accuracy.patch
new file mode 100644
index 00000000..b97ea0c4
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2957-amd-xgbe-Update-TSO-packet-statistics-accuracy.patch
@@ -0,0 +1,35 @@
+From 5ff39c7ccd05cd18107b7d8b8d9769acd60d8b84 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:49:34 +0530
+Subject: [PATCH] amd-xgbe: Update TSO packet statistics accuracy
+
+When transmitting a TSO packet, the driver only increments the TSO packet
+statistic by one rather than the number of total packets that were sent.
+Update the driver to record the total number of packets that resulted from
+TSO transmit.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 06f953e..bb60507 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1740,7 +1740,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+
+- pdata->ext_stats.tx_tso_packets++;
++ pdata->ext_stats.tx_tso_packets += packet->tx_packets;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2958-amd-xgbe-Add-support-to-handle-device-renaming.patch b/meta-v1000/recipes-kernel/linux/files/2958-amd-xgbe-Add-support-to-handle-device-renaming.patch
new file mode 100644
index 00000000..c216b619
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2958-amd-xgbe-Add-support-to-handle-device-renaming.patch
@@ -0,0 +1,305 @@
+From f0223c48a36c23b2ed4d21e060aec739a6c4b03d Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:50:40 +0530
+Subject: [PATCH] amd-xgbe: Add support to handle device renaming
+
+Many of the names used by the driver are based upon the name of the device
+found during device probe. Move the formatting of the names into the
+device open function so that any renaming that occurs before the device is
+brought up will be accounted for. This also means moving the creation of
+some named workqueues into the device open path.
+
+Add support to register for net events so that if a device is renamed
+the corresponding debugfs directory can be renamed.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 25 ++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 44 +++++++++++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 72 ++++++++++++----------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +-
+ 4 files changed, 100 insertions(+), 46 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+index 7546b66..7d128be 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
+@@ -527,3 +527,28 @@ void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+ }
++
++void xgbe_debugfs_rename(struct xgbe_prv_data *pdata)
++{
++ struct dentry *pfile;
++ char *buf;
++
++ if (!pdata->xgbe_debugfs)
++ return;
++
++ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
++ if (!buf)
++ return;
++
++ if (!strcmp(pdata->xgbe_debugfs->d_name.name, buf))
++ goto out;
++
++ pfile = debugfs_rename(pdata->xgbe_debugfs->d_parent,
++ pdata->xgbe_debugfs,
++ pdata->xgbe_debugfs->d_parent, buf);
++ if (!pfile)
++ netdev_err(pdata->netdev, "debugfs_rename failed\n");
++
++out:
++ kfree(buf);
++}
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index f4e1860..82bb058b 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -886,7 +886,7 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
+ (unsigned long)pdata);
+
+ ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
+- netdev->name, pdata);
++ netdev_name(netdev), pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->dev_irq);
+@@ -1587,16 +1587,42 @@ static int xgbe_open(struct net_device *netdev)
+
+ DBGPR("-->xgbe_open\n");
+
++ /* Create the various names based on netdev name */
++ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
++ netdev_name(netdev));
++
++ snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
++ netdev_name(netdev));
++
++ snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
++ netdev_name(netdev));
++
++ /* Create workqueues */
++ pdata->dev_workqueue =
++ create_singlethread_workqueue(netdev_name(netdev));
++ if (!pdata->dev_workqueue) {
++ netdev_err(netdev, "device workqueue creation failed\n");
++ return -ENOMEM;
++ }
++
++ pdata->an_workqueue =
++ create_singlethread_workqueue(pdata->an_name);
++ if (!pdata->an_workqueue) {
++ netdev_err(netdev, "phy workqueue creation failed\n");
++ ret = -ENOMEM;
++ goto err_dev_wq;
++ }
++
+ /* Reset the phy settings */
+ ret = xgbe_phy_reset(pdata);
+ if (ret)
+- return ret;
++ goto err_an_wq;
+
+ /* Enable the clocks */
+ ret = clk_prepare_enable(pdata->sysclk);
+ if (ret) {
+ netdev_alert(netdev, "dma clk_prepare_enable failed\n");
+- return ret;
++ goto err_an_wq;
+ }
+
+ ret = clk_prepare_enable(pdata->ptpclk);
+@@ -1649,6 +1675,12 @@ static int xgbe_open(struct net_device *netdev)
+ err_sysclk:
+ clk_disable_unprepare(pdata->sysclk);
+
++err_an_wq:
++ destroy_workqueue(pdata->an_workqueue);
++
++err_dev_wq:
++ destroy_workqueue(pdata->dev_workqueue);
++
+ return ret;
+ }
+
+@@ -1672,6 +1704,12 @@ static int xgbe_close(struct net_device *netdev)
+ clk_disable_unprepare(pdata->ptpclk);
+ clk_disable_unprepare(pdata->sysclk);
+
++ flush_workqueue(pdata->an_workqueue);
++ destroy_workqueue(pdata->an_workqueue);
++
++ flush_workqueue(pdata->dev_workqueue);
++ destroy_workqueue(pdata->dev_workqueue);
++
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
+ DBGPR("<--xgbe_close\n");
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 53a425c..c5ff385 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -120,6 +120,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/io.h>
++#include <linux/notifier.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -399,35 +400,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
+- /* Create the PHY/ANEG name based on netdev name */
+- snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+- netdev_name(netdev));
+-
+- /* Create the ECC name based on netdev name */
+- snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
+- netdev_name(netdev));
+-
+- /* Create the I2C name based on netdev name */
+- snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
+- netdev_name(netdev));
+-
+- /* Create workqueues */
+- pdata->dev_workqueue =
+- create_singlethread_workqueue(netdev_name(netdev));
+- if (!pdata->dev_workqueue) {
+- netdev_err(netdev, "device workqueue creation failed\n");
+- ret = -ENOMEM;
+- goto err_netdev;
+- }
+-
+- pdata->an_workqueue =
+- create_singlethread_workqueue(pdata->an_name);
+- if (!pdata->an_workqueue) {
+- netdev_err(netdev, "phy workqueue creation failed\n");
+- ret = -ENOMEM;
+- goto err_wq;
+- }
+-
+ if (IS_REACHABLE(CONFIG_PTP_1588_CLOCK))
+ xgbe_ptp_register(pdata);
+
+@@ -439,14 +411,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ pdata->rx_ring_count);
+
+ return 0;
+-
+-err_wq:
+- destroy_workqueue(pdata->dev_workqueue);
+-
+-err_netdev:
+- unregister_netdev(netdev);
+-
+- return ret;
+ }
+
+ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+@@ -461,18 +425,42 @@ void xgbe_deconfig_netdev(struct xgbe_prv_data *pdata)
+ unregister_netdev(netdev);
+
+ pdata->phy_if.phy_exit(pdata);
++}
+
+- flush_workqueue(pdata->an_workqueue);
+- destroy_workqueue(pdata->an_workqueue);
++static int xgbe_netdev_event(struct notifier_block *nb, unsigned long event,
++ void *data)
++{
++ struct net_device *netdev = netdev_notifier_info_to_dev(data);
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+- flush_workqueue(pdata->dev_workqueue);
+- destroy_workqueue(pdata->dev_workqueue);
++ if (netdev->netdev_ops != xgbe_get_netdev_ops())
++ goto out;
++
++ switch (event) {
++ case NETDEV_CHANGENAME:
++ xgbe_debugfs_rename(pdata);
++ break;
++
++ default:
++ break;
++ }
++
++out:
++ return NOTIFY_DONE;
+ }
+
++static struct notifier_block xgbe_netdev_notifier = {
++ .notifier_call = xgbe_netdev_event,
++};
++
+ static int __init xgbe_mod_init(void)
+ {
+ int ret;
+
++ ret = register_netdevice_notifier(&xgbe_netdev_notifier);
++ if (ret)
++ return ret;
++
+ ret = xgbe_platform_init();
+ if (ret)
+ return ret;
+@@ -489,6 +477,8 @@ static void __exit xgbe_mod_exit(void)
+ xgbe_pci_exit();
+
+ xgbe_platform_exit();
++
++ unregister_netdevice_notifier(&xgbe_netdev_notifier);
+ }
+
+ module_init(xgbe_mod_init);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index e9282c9..9a80f20 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -130,6 +130,7 @@
+ #include <linux/completion.h>
+ #include <linux/cpumask.h>
+ #include <linux/interrupt.h>
++#include <linux/dcache.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+ #define XGBE_DRV_VERSION "1.0.3"
+@@ -1172,7 +1173,6 @@ struct xgbe_prv_data {
+ struct tasklet_struct tasklet_i2c;
+ struct tasklet_struct tasklet_an;
+
+-#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+@@ -1183,7 +1183,6 @@ struct xgbe_prv_data {
+ unsigned int debugfs_xprop_reg;
+
+ unsigned int debugfs_xi2c_reg;
+-#endif
+ };
+
+ /* Function prototypes*/
+@@ -1232,9 +1231,11 @@ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+ #ifdef CONFIG_DEBUG_FS
+ void xgbe_debugfs_init(struct xgbe_prv_data *);
+ void xgbe_debugfs_exit(struct xgbe_prv_data *);
++void xgbe_debugfs_rename(struct xgbe_prv_data *pdata);
+ #else
+ static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+ static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
++static inline void xgbe_debugfs_rename(struct xgbe_prv_data *pdata) {}
+ #endif /* CONFIG_DEBUG_FS */
+
+ /* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2959-amd-xgbe-Add-additional-dynamic-debug-messages.patch b/meta-v1000/recipes-kernel/linux/files/2959-amd-xgbe-Add-additional-dynamic-debug-messages.patch
new file mode 100644
index 00000000..37bc9974
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2959-amd-xgbe-Add-additional-dynamic-debug-messages.patch
@@ -0,0 +1,35 @@
+From 7a33d03a4743ac01e8681fc53f0edec2a4b76beb Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:51:09 +0530
+Subject: [PATCH] amd-xgbe: Add additional dynamic debug messages
+
+Add some additional dynamic debug message to the driver. The new messages
+will provide additional information about the PCS window calculation.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 1e56ad7..3e5833c 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -292,6 +292,10 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ if (netif_msg_probe(pdata)) {
++ dev_dbg(dev, "xpcs window def = %#010x\n",
++ pdata->xpcs_window_def_reg);
++ dev_dbg(dev, "xpcs window sel = %#010x\n",
++ pdata->xpcs_window_sel_reg);
+ dev_dbg(dev, "xpcs window = %#010x\n",
+ pdata->xpcs_window);
+ dev_dbg(dev, "xpcs window size = %#010x\n",
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2960-amd-xgbe-Optimize-DMA-channel-interrupt-enablement.patch b/meta-v1000/recipes-kernel/linux/files/2960-amd-xgbe-Optimize-DMA-channel-interrupt-enablement.patch
new file mode 100644
index 00000000..194a3342
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2960-amd-xgbe-Optimize-DMA-channel-interrupt-enablement.patch
@@ -0,0 +1,230 @@
+From 7a89724ed3feefad4b8f6adae2dd66a60659db1c Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:51:59 +0530
+Subject: [PATCH] amd-xgbe: Optimize DMA channel interrupt enablement
+
+Currently whenever the driver needs to enable or disable interrupts for
+a DMA channel it reads the interrupt enable register (IER), updates the
+value and then writes the new value back to the IER. Since the hardware
+does not change the IER, software can track this value and elimiate the
+need to read it each time.
+
+Add the IER value to the channel related data structure and use that as
+the base for enabling and disabling interrupts, thus removing the need
+for the MMIO read.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 77 +++++++++++++++-----------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 4 +-
+ 2 files changed, 37 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index bb60507..75a479c 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -605,7 +605,6 @@ static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_channel *channel;
+- unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ /* Set the interrupt mode if supported */
+@@ -617,20 +616,20 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ channel = pdata->channel[i];
+
+ /* Clear all the interrupts which are set */
+- dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+- XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
++ XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
+
+ /* Clear all interrupt enable bits */
+- dma_ch_ier = 0;
++ channel->curr_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+@@ -639,7 +638,8 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ * mode)
+ */
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier,
++ DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+@@ -648,12 +648,13 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+ if (!pdata->per_channel_irq || pdata->channel_irq_mode)
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier,
++ DMA_CH_IER, RIE, 1);
+ }
+
+- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+ }
+ }
+
+@@ -1964,44 +1965,40 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+ static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+ {
+- unsigned int dma_ch_ier;
+-
+- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+-
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+- dma_ch_ier |= channel->saved_ier;
++ channel->curr_ier |= channel->saved_ier;
+ break;
+ default:
+ return -1;
+ }
+
+- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ return 0;
+ }
+@@ -2009,45 +2006,41 @@ static int xgbe_enable_int(struct xgbe_channel *channel,
+ static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+ {
+- unsigned int dma_ch_ier;
+-
+- dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+-
+ switch (int_id) {
+ case XGMAC_INT_DMA_CH_SR_TI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI_RI:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+- XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+- channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+- dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
++ channel->saved_ier = channel->curr_ier;
++ channel->curr_ier = 0;
+ break;
+ default:
+ return -1;
+ }
+
+- XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
++ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 9a80f20..58bb455 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -182,8 +182,6 @@
+ #define XGBE_IRQ_MODE_EDGE 0
+ #define XGBE_IRQ_MODE_LEVEL 1
+
+-#define XGBE_DMA_INTERRUPT_MASK 0x31c7
+-
+ #define XGMAC_MIN_PACKET 60
+ #define XGMAC_STD_PACKET_MTU 1500
+ #define XGMAC_MAX_STD_PACKET 1518
+@@ -462,6 +460,8 @@ struct xgbe_channel {
+ /* Netdev related settings */
+ struct napi_struct napi;
+
++ /* Per channel interrupt enablement tracker */
++ unsigned int curr_ier;
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2961-amd-xgbe-Add-hardware-features-debug-output.patch b/meta-v1000/recipes-kernel/linux/files/2961-amd-xgbe-Add-hardware-features-debug-output.patch
new file mode 100644
index 00000000..98882b40
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2961-amd-xgbe-Add-hardware-features-debug-output.patch
@@ -0,0 +1,116 @@
+From d4c988aa7b04ddfcd1b0d56360b511539383444f Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:52:52 +0530
+Subject: [PATCH] amd-xgbe: Add hardware features debug output
+
+Use the dynamic debug support to output information about the hardware
+features reported by the device.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 78 ++++++++++++++++++++++++++++++--
+ 1 file changed, 75 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 82bb058b..392ea8a 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -731,8 +731,6 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+- DBGPR("-->xgbe_get_all_hw_features\n");
+-
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+@@ -827,7 +825,81 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+
+- DBGPR("<--xgbe_get_all_hw_features\n");
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(pdata->dev, "Hardware features:\n");
++
++ /* Hardware feature register 0 */
++ dev_dbg(pdata->dev, " 1GbE support : %s\n",
++ hw_feat->gmii ? "yes" : "no");
++ dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
++ hw_feat->vlhash ? "yes" : "no");
++ dev_dbg(pdata->dev, " MDIO interface : %s\n",
++ hw_feat->sma ? "yes" : "no");
++ dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
++ hw_feat->rwk ? "yes" : "no");
++ dev_dbg(pdata->dev, " Magic packet support : %s\n",
++ hw_feat->mgk ? "yes" : "no");
++ dev_dbg(pdata->dev, " Management counters : %s\n",
++ hw_feat->mmc ? "yes" : "no");
++ dev_dbg(pdata->dev, " ARP offload : %s\n",
++ hw_feat->aoe ? "yes" : "no");
++ dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
++ hw_feat->ts ? "yes" : "no");
++ dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
++ hw_feat->eee ? "yes" : "no");
++ dev_dbg(pdata->dev, " TX checksum offload : %s\n",
++ hw_feat->tx_coe ? "yes" : "no");
++ dev_dbg(pdata->dev, " RX checksum offload : %s\n",
++ hw_feat->rx_coe ? "yes" : "no");
++ dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
++ hw_feat->addn_mac);
++ dev_dbg(pdata->dev, " Timestamp source : %s\n",
++ (hw_feat->ts_src == 1) ? "internal" :
++ (hw_feat->ts_src == 2) ? "external" :
++ (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
++ dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
++ hw_feat->sa_vlan_ins ? "yes" : "no");
++
++ /* Hardware feature register 1 */
++ dev_dbg(pdata->dev, " RX fifo size : %u\n",
++ hw_feat->rx_fifo_size);
++ dev_dbg(pdata->dev, " TX fifo size : %u\n",
++ hw_feat->tx_fifo_size);
++ dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
++ hw_feat->adv_ts_hi ? "yes" : "no");
++ dev_dbg(pdata->dev, " DMA width : %u\n",
++ hw_feat->dma_width);
++ dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
++ hw_feat->dcb ? "yes" : "no");
++ dev_dbg(pdata->dev, " Split header : %s\n",
++ hw_feat->sph ? "yes" : "no");
++ dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
++ hw_feat->tso ? "yes" : "no");
++ dev_dbg(pdata->dev, " Debug memory interface : %s\n",
++ hw_feat->dma_debug ? "yes" : "no");
++ dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
++ hw_feat->rss ? "yes" : "no");
++ dev_dbg(pdata->dev, " Traffic Class count : %u\n",
++ hw_feat->tc_cnt);
++ dev_dbg(pdata->dev, " Hash table size : %u\n",
++ hw_feat->hash_table_size);
++ dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
++ hw_feat->l3l4_filter_num);
++
++ /* Hardware feature register 2 */
++ dev_dbg(pdata->dev, " RX queue count : %u\n",
++ hw_feat->rx_q_cnt);
++ dev_dbg(pdata->dev, " TX queue count : %u\n",
++ hw_feat->tx_q_cnt);
++ dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
++ hw_feat->rx_ch_cnt);
++ dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
++ hw_feat->rx_ch_cnt);
++ dev_dbg(pdata->dev, " PPS outputs : %u\n",
++ hw_feat->pps_out_num);
++ dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
++ hw_feat->aux_snap_num);
++ }
+ }
+
+ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2962-amd-xgbe-Add-per-queue-Tx-and-Rx-statistics.patch b/meta-v1000/recipes-kernel/linux/files/2962-amd-xgbe-Add-per-queue-Tx-and-Rx-statistics.patch
new file mode 100644
index 00000000..b07e2d1c
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2962-amd-xgbe-Add-per-queue-Tx-and-Rx-statistics.patch
@@ -0,0 +1,168 @@
+From d7892979d0339721c8b3767ef9e1b15de8a3431d Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:53:32 +0530
+Subject: [PATCH] amd-xgbe: Add per queue Tx and Rx statistics
+
+Add per queue Tx and Rx packet and byte counts.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 23 ++++++++++++++++-------
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 26 +++++++++++++++++++++++++-
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++
+ 3 files changed, 46 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 75a479c..a978408 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1609,6 +1609,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
++ unsigned int tx_packets, tx_bytes;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+@@ -1618,6 +1619,9 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+
+ DBGPR("-->xgbe_dev_xmit\n");
+
++ tx_packets = packet->tx_packets;
++ tx_bytes = packet->tx_bytes;
++
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+@@ -1645,13 +1649,12 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ * - Addition of Tx frame count to the frame count since the
+ * last interrupt was set does not exceed the frame count setting
+ */
+- ring->coalesce_count += packet->tx_packets;
++ ring->coalesce_count += tx_packets;
+ if (!pdata->tx_frames)
+ tx_set_ic = 0;
+- else if (packet->tx_packets > pdata->tx_frames)
++ else if (tx_packets > pdata->tx_frames)
+ tx_set_ic = 1;
+- else if ((ring->coalesce_count % pdata->tx_frames) <
+- packet->tx_packets)
++ else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
+ tx_set_ic = 1;
+ else
+ tx_set_ic = 0;
+@@ -1741,7 +1744,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+
+- pdata->ext_stats.tx_tso_packets += packet->tx_packets;
++ pdata->ext_stats.tx_tso_packets += tx_packets;
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+@@ -1789,8 +1792,11 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+
+ /* Save the Tx info to report back during cleanup */
+- rdata->tx.packets = packet->tx_packets;
+- rdata->tx.bytes = packet->tx_bytes;
++ rdata->tx.packets = tx_packets;
++ rdata->tx.bytes = tx_bytes;
++
++ pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
++ pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+@@ -1944,6 +1950,9 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ FRAME, 1);
+ }
+
++ pdata->ext_stats.rxq_packets[channel->queue_index]++;
++ pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
++
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 67a2e52..f80b186 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -186,6 +186,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
+
+ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+ {
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int i;
+
+ switch (stringset) {
+@@ -195,6 +196,18 @@ static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
++ for (i = 0; i < pdata->tx_ring_count; i++) {
++ sprintf(data, "txq_%u_packets", i);
++ data += ETH_GSTRING_LEN;
++ sprintf(data, "txq_%u_bytes", i);
++ data += ETH_GSTRING_LEN;
++ }
++ for (i = 0; i < pdata->rx_ring_count; i++) {
++ sprintf(data, "rxq_%u_packets", i);
++ data += ETH_GSTRING_LEN;
++ sprintf(data, "rxq_%u_bytes", i);
++ data += ETH_GSTRING_LEN;
++ }
+ break;
+ }
+ }
+@@ -211,15 +224,26 @@ static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
++ for (i = 0; i < pdata->tx_ring_count; i++) {
++ *data++ = pdata->ext_stats.txq_packets[i];
++ *data++ = pdata->ext_stats.txq_bytes[i];
++ }
++ for (i = 0; i < pdata->rx_ring_count; i++) {
++ *data++ = pdata->ext_stats.rxq_packets[i];
++ *data++ = pdata->ext_stats.rxq_bytes[i];
++ }
+ }
+
+ static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+ {
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+- ret = XGBE_STATS_COUNT;
++ ret = XGBE_STATS_COUNT +
++ (pdata->tx_ring_count * 2) +
++ (pdata->rx_ring_count * 2);
+ break;
+
+ default:
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 58bb455..0e93155 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -668,6 +668,11 @@ struct xgbe_ext_stats {
+ u64 tx_tso_packets;
+ u64 rx_split_header_packets;
+ u64 rx_buffer_unavailable;
++
++ u64 txq_packets[XGBE_MAX_DMA_CHANNELS];
++ u64 txq_bytes[XGBE_MAX_DMA_CHANNELS];
++ u64 rxq_packets[XGBE_MAX_DMA_CHANNELS];
++ u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS];
+ };
+
+ struct xgbe_hw_if {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2963-amd-xgbe-Convert-to-using-the-new-link-mode.patch b/meta-v1000/recipes-kernel/linux/files/2963-amd-xgbe-Convert-to-using-the-new-link-mode.patch
new file mode 100644
index 00000000..17508276
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2963-amd-xgbe-Convert-to-using-the-new-link-mode.patch
@@ -0,0 +1,1349 @@
+From 5fb6030d804bfb1176d2d35ab9b9fae2a3cc5fe4 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:54:25 +0530
+Subject: [PATCH] amd-xgbe: Convert to using the new link mode settings
+
+Convert from using the old u32 supported, advertising, etc. link settings
+to the new link mode settings that support bit positions / settings
+greater than 32 bits.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 56 +++--
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 77 +++---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c | 54 +++--
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 350 +++++++++++++++------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 50 +++-
+ 5 files changed, 345 insertions(+), 242 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index f80b186..cea25ac 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -267,6 +267,7 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ int ret = 0;
+
+ if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
+@@ -279,16 +280,21 @@ static int xgbe_set_pauseparam(struct net_device *netdev,
+ pdata->phy.tx_pause = pause->tx_pause;
+ pdata->phy.rx_pause = pause->rx_pause;
+
+- pdata->phy.advertising &= ~ADVERTISED_Pause;
+- pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
++ XGBE_CLR_ADV(lks, Pause);
++ XGBE_CLR_ADV(lks, Asym_Pause);
+
+ if (pause->rx_pause) {
+- pdata->phy.advertising |= ADVERTISED_Pause;
+- pdata->phy.advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_ADV(lks, Pause);
++ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+- if (pause->tx_pause)
+- pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
++ if (pause->tx_pause) {
++ /* Equivalent to XOR of Asym_Pause */
++ if (XGBE_ADV(lks, Asym_Pause))
++ XGBE_CLR_ADV(lks, Asym_Pause);
++ else
++ XGBE_SET_ADV(lks, Asym_Pause);
++ }
+
+ if (netif_running(netdev))
+ ret = pdata->phy_if.phy_config_aneg(pdata);
+@@ -300,22 +306,20 @@ static int xgbe_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+
+ cmd->base.phy_address = pdata->phy.address;
+
+- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+- pdata->phy.supported);
+- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+- pdata->phy.advertising);
+- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+- pdata->phy.lp_advertising);
+-
+ cmd->base.autoneg = pdata->phy.autoneg;
+ cmd->base.speed = pdata->phy.speed;
+ cmd->base.duplex = pdata->phy.duplex;
+
+ cmd->base.port = PORT_NONE;
+
++ XGBE_LM_COPY(cmd, supported, lks, supported);
++ XGBE_LM_COPY(cmd, advertising, lks, advertising);
++ XGBE_LM_COPY(cmd, lp_advertising, lks, lp_advertising);
++
+ return 0;
+ }
+
+@@ -323,7 +327,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- u32 advertising;
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u32 speed;
+ int ret;
+
+@@ -355,15 +360,17 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
+ }
+ }
+
+- ethtool_convert_link_mode_to_legacy_u32(&advertising,
+- cmd->link_modes.advertising);
+-
+ netif_dbg(pdata, link, netdev,
+- "requested advertisement %#x, phy supported %#x\n",
+- advertising, pdata->phy.supported);
++ "requested advertisement 0x%*pb, phy supported 0x%*pb\n",
++ __ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
++ __ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
++
++ bitmap_and(advertising,
++ cmd->link_modes.advertising, lks->link_modes.supported,
++ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+- advertising &= pdata->phy.supported;
+- if ((cmd->base.autoneg == AUTONEG_ENABLE) && !advertising) {
++ if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
++ bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ netdev_err(netdev,
+ "unsupported requested advertisement\n");
+ return -EINVAL;
+@@ -373,12 +380,13 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
+ pdata->phy.autoneg = cmd->base.autoneg;
+ pdata->phy.speed = speed;
+ pdata->phy.duplex = cmd->base.duplex;
+- pdata->phy.advertising = advertising;
++ bitmap_copy(lks->link_modes.advertising, advertising,
++ __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE)
+- pdata->phy.advertising |= ADVERTISED_Autoneg;
++ XGBE_SET_ADV(lks, Autoneg);
+ else
+- pdata->phy.advertising &= ~ADVERTISED_Autoneg;
++ XGBE_CLR_ADV(lks, Autoneg);
+
+ if (netif_running(netdev))
+ ret = pdata->phy_if.phy_config_aneg(pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 7953586..0893531 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -614,12 +614,14 @@ static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata)
+
+ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ /* Be sure we aren't looping trying to negotiate */
+ if (xgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = XGBE_RX_ERROR;
+
+- if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+- !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
++ if (!XGBE_ADV(lks, 1000baseKX_Full) &&
++ !XGBE_ADV(lks, 2500baseX_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != XGBE_RX_BPA)
+@@ -627,7 +629,7 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
+ } else {
+ pdata->kx_state = XGBE_RX_ERROR;
+
+- if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
++ if (!XGBE_ADV(lks, 10000baseKR_Full))
+ return XGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != XGBE_RX_BPA)
+@@ -943,18 +945,19 @@ static void xgbe_an_state_machine(struct work_struct *work)
+
+ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+ {
+- unsigned int advertising, reg;
++ struct ethtool_link_ksettings lks;
++ unsigned int reg;
+
+- advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
++ pdata->phy_if.phy_impl.an_advertising(pdata, &lks);
+
+ /* Set up Advertisement register */
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+- if (advertising & ADVERTISED_Pause)
++ if (XGBE_ADV(&lks, Pause))
+ reg |= 0x100;
+ else
+ reg &= ~0x100;
+
+- if (advertising & ADVERTISED_Asym_Pause)
++ if (XGBE_ADV(&lks, Asym_Pause))
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+@@ -991,13 +994,14 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata)
+
+ static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+ {
+- unsigned int advertising, reg;
++ struct ethtool_link_ksettings lks;
++ unsigned int reg;
+
+- advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
++ pdata->phy_if.phy_impl.an_advertising(pdata, &lks);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+- if (advertising & ADVERTISED_10000baseR_FEC)
++ if (XGBE_ADV(&lks, 10000baseR_FEC))
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+@@ -1006,13 +1010,13 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+- if (advertising & ADVERTISED_10000baseKR_Full)
++ if (XGBE_ADV(&lks, 10000baseKR_Full))
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+- if ((advertising & ADVERTISED_1000baseKX_Full) ||
+- (advertising & ADVERTISED_2500baseX_Full))
++ if (XGBE_ADV(&lks, 1000baseKX_Full) ||
++ XGBE_ADV(&lks, 2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+@@ -1021,12 +1025,12 @@ static void xgbe_an73_init(struct xgbe_prv_data *pdata)
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+- if (advertising & ADVERTISED_Pause)
++ if (XGBE_ADV(&lks, Pause))
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+- if (advertising & ADVERTISED_Asym_Pause)
++ if (XGBE_ADV(&lks, Asym_Pause))
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+@@ -1282,9 +1286,10 @@ static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata)
+
+ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+
+- pdata->phy.lp_advertising = 0;
++ XGBE_ZERO_LP_ADV(lks);
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = xgbe_cur_mode(pdata);
+@@ -1514,17 +1519,21 @@ static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+
+ static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata)
+ {
+- if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
++ if (XGBE_ADV(lks, 10000baseKR_Full))
+ return SPEED_10000;
+- else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
++ else if (XGBE_ADV(lks, 10000baseT_Full))
+ return SPEED_10000;
+- else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
++ else if (XGBE_ADV(lks, 2500baseX_Full))
+ return SPEED_2500;
+- else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
++ else if (XGBE_ADV(lks, 2500baseT_Full))
++ return SPEED_2500;
++ else if (XGBE_ADV(lks, 1000baseKX_Full))
+ return SPEED_1000;
+- else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
++ else if (XGBE_ADV(lks, 1000baseT_Full))
+ return SPEED_1000;
+- else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
++ else if (XGBE_ADV(lks, 100baseT_Full))
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+@@ -1537,6 +1546,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+
+ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ int ret;
+
+ mutex_init(&pdata->an_mutex);
+@@ -1554,11 +1564,13 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+- pdata->phy.advertising = pdata->phy.supported;
++
++ /* Copy supported link modes to advertising link modes */
++ XGBE_LM_COPY(lks, advertising, lks, supported);
+
+ pdata->phy.address = 0;
+
+- if (pdata->phy.advertising & ADVERTISED_Autoneg) {
++ if (XGBE_ADV(lks, Autoneg)) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+@@ -1575,16 +1587,21 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+- pdata->phy.advertising &= ~ADVERTISED_Pause;
+- pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
++ XGBE_CLR_ADV(lks, Pause);
++ XGBE_CLR_ADV(lks, Asym_Pause);
+
+ if (pdata->rx_pause) {
+- pdata->phy.advertising |= ADVERTISED_Pause;
+- pdata->phy.advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_ADV(lks, Pause);
++ XGBE_SET_ADV(lks, Asym_Pause);
+ }
+
+- if (pdata->tx_pause)
+- pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
++ if (pdata->tx_pause) {
++ /* Equivalent to XOR of Asym_Pause */
++ if (XGBE_ADV(lks, Asym_Pause))
++ XGBE_CLR_ADV(lks, Asym_Pause);
++ else
++ XGBE_SET_ADV(lks, Asym_Pause);
++ }
+
+ if (netif_msg_drv(pdata))
+ xgbe_dump_phy_registers(pdata);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+index c75edca..d16eae4 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c
+@@ -231,20 +231,21 @@ static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata)
+
+ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++ XGBE_SET_LP_ADV(lks, Autoneg);
++ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+- pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x800)
+- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+@@ -266,12 +267,12 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20) {
+ if (phy_data->speed_set == XGBE_SPEEDSET_2500_10000)
+- pdata->phy.lp_advertising |= ADVERTISED_2500baseX_Full;
++ XGBE_SET_LP_ADV(lks, 2500baseX_Full);
+ else
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+ }
+
+ ad_reg &= lp_reg;
+@@ -290,14 +291,17 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+ }
+
+-static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
++static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
++ struct ethtool_link_ksettings *dlks)
+ {
+- return pdata->phy.advertising;
++ struct ethtool_link_ksettings *slks = &pdata->phy.lks;
++
++ XGBE_LM_COPY(dlks, advertising, slks, advertising);
+ }
+
+ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+@@ -565,11 +569,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+ }
+
+ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+- enum xgbe_mode mode, u32 advert)
++ enum xgbe_mode mode, bool advert)
+ {
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+- if (pdata->phy.advertising & advert)
+- return true;
++ return advert;
+ } else {
+ enum xgbe_mode cur_mode;
+
+@@ -583,16 +586,18 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+
+ static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseKX_Full);
++ XGBE_ADV(lks, 1000baseKX_Full));
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_2500baseX_Full);
++ XGBE_ADV(lks, 2500baseX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_10000baseKR_Full);
++ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+@@ -672,6 +677,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+
+ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data;
+ int ret;
+
+@@ -790,21 +796,23 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ }
+
+ /* Initialize supported features */
+- pdata->phy.supported = SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_Backplane;
+- pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
++ XGBE_ZERO_SUP(lks);
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, Backplane);
++ XGBE_SET_SUP(lks, 10000baseKR_Full);
+ switch (phy_data->speed_set) {
+ case XGBE_SPEEDSET_1000_10000:
+- pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
++ XGBE_SET_SUP(lks, 1000baseKX_Full);
+ break;
+ case XGBE_SPEEDSET_2500_10000:
+- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ XGBE_SET_SUP(lks, 2500baseX_Full);
+ break;
+ }
+
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+- pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
++ XGBE_SET_SUP(lks, 10000baseR_FEC);
+
+ pdata->phy_data = phy_data;
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 81c45fa..3304a29 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -709,18 +709,13 @@ static int xgbe_phy_mii_read(struct mii_bus *mii, int addr, int reg)
+
+ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed)
+ return;
+
+- pdata->phy.supported &= ~SUPPORTED_Autoneg;
+- pdata->phy.supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+- pdata->phy.supported &= ~SUPPORTED_TP;
+- pdata->phy.supported &= ~SUPPORTED_FIBRE;
+- pdata->phy.supported &= ~SUPPORTED_100baseT_Full;
+- pdata->phy.supported &= ~SUPPORTED_1000baseT_Full;
+- pdata->phy.supported &= ~SUPPORTED_10000baseT_Full;
++ XGBE_ZERO_SUP(lks);
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+@@ -728,18 +723,13 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
+- pdata->phy.supported |= SUPPORTED_FIBRE;
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, TP);
++ XGBE_SET_SUP(lks, FIBRE);
+
+- pdata->phy.advertising = pdata->phy.supported;
++ XGBE_LM_COPY(lks, advertising, lks, supported);
+
+ return;
+ }
+@@ -753,8 +743,18 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.pause_autoneg = AUTONEG_ENABLE;
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) {
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
++ XGBE_SET_SUP(lks, 100baseT_Full);
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ XGBE_SET_SUP(lks, 1000baseT_Full);
++ } else {
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
++ XGBE_SET_SUP(lks, 1000baseX_Full);
++ }
+ break;
+ case XGBE_SFP_BASE_10000_SR:
+ case XGBE_SFP_BASE_10000_LR:
+@@ -765,6 +765,27 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.pause_autoneg = AUTONEG_DISABLE;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
++ switch (phy_data->sfp_base) {
++ case XGBE_SFP_BASE_10000_SR:
++ XGBE_SET_SUP(lks, 10000baseSR_Full);
++ break;
++ case XGBE_SFP_BASE_10000_LR:
++ XGBE_SET_SUP(lks, 10000baseLR_Full);
++ break;
++ case XGBE_SFP_BASE_10000_LRM:
++ XGBE_SET_SUP(lks, 10000baseLRM_Full);
++ break;
++ case XGBE_SFP_BASE_10000_ER:
++ XGBE_SET_SUP(lks, 10000baseER_Full);
++ break;
++ case XGBE_SFP_BASE_10000_CR:
++ XGBE_SET_SUP(lks, 10000baseCR_Full);
++ break;
++ default:
++ break;
++ }
++ }
+ break;
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+@@ -778,38 +799,14 @@ static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata)
+ case XGBE_SFP_BASE_1000_T:
+ case XGBE_SFP_BASE_1000_CX:
+ case XGBE_SFP_BASE_10000_CR:
+- pdata->phy.supported |= SUPPORTED_TP;
++ XGBE_SET_SUP(lks, TP);
+ break;
+ default:
+- pdata->phy.supported |= SUPPORTED_FIBRE;
+- }
+-
+- switch (phy_data->sfp_speed) {
+- case XGBE_SFP_SPEED_100_1000:
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+- break;
+- case XGBE_SFP_SPEED_1000:
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ XGBE_SET_SUP(lks, FIBRE);
+ break;
+- case XGBE_SFP_SPEED_10000:
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+- break;
+- default:
+- /* Choose the fastest supported speed */
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+- else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+- else if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ }
+
+- pdata->phy.advertising = pdata->phy.supported;
++ XGBE_LM_COPY(lks, advertising, lks, supported);
+ }
+
+ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom,
+@@ -886,8 +883,10 @@ static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
+
+ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ struct phy_device *phydev;
++ u32 advertising;
+ int ret;
+
+ /* If we already have a PHY, just return */
+@@ -943,7 +942,10 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+ phy_data->phydev = phydev;
+
+ xgbe_phy_external_phy_quirks(pdata);
+- phydev->advertising &= pdata->phy.advertising;
++
++ ethtool_convert_link_mode_to_legacy_u32(&advertising,
++ lks->link_modes.advertising);
++ phydev->advertising &= advertising;
+
+ phy_start_aneg(phy_data->phydev);
+
+@@ -1277,6 +1279,7 @@ static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
+
+ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 fc;
+@@ -1293,11 +1296,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ if (phy_data->phydev->pause) {
+- pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ XGBE_SET_LP_ADV(lks, Pause);
+ rmt_adv |= LPA_PAUSE_CAP;
+ }
+ if (phy_data->phydev->asym_pause) {
+- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_LP_ADV(lks, Asym_Pause);
+ rmt_adv |= LPA_PAUSE_ASYM;
+ }
+
+@@ -1310,10 +1313,11 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+
+ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_TP;
++ XGBE_SET_LP_ADV(lks, Autoneg);
++ XGBE_SET_LP_ADV(lks, TP);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+@@ -1322,21 +1326,21 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+ switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) {
+ case XGBE_SGMII_AN_LINK_SPEED_100:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+- pdata->phy.lp_advertising |= ADVERTISED_100baseT_Full;
++ XGBE_SET_LP_ADV(lks, 100baseT_Full);
+ mode = XGBE_MODE_SGMII_100;
+ } else {
+ /* Half-duplex not supported */
+- pdata->phy.lp_advertising |= ADVERTISED_100baseT_Half;
++ XGBE_SET_LP_ADV(lks, 100baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+ case XGBE_SGMII_AN_LINK_SPEED_1000:
+ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) {
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
++ XGBE_SET_LP_ADV(lks, 1000baseT_Full);
+ mode = XGBE_MODE_SGMII_1000;
+ } else {
+ /* Half-duplex not supported */
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
++ XGBE_SET_LP_ADV(lks, 1000baseT_Half);
+ mode = XGBE_MODE_UNKNOWN;
+ }
+ break;
+@@ -1349,19 +1353,20 @@ static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata)
+
+ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_FIBRE;
++ XGBE_SET_LP_ADV(lks, Autoneg);
++ XGBE_SET_LP_ADV(lks, FIBRE);
+
+ /* Compare Advertisement and Link Partner register */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY);
+ if (lp_reg & 0x100)
+- pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x80)
+- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+@@ -1379,10 +1384,8 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+ }
+ }
+
+- if (lp_reg & 0x40)
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Half;
+ if (lp_reg & 0x20)
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseT_Full;
++ XGBE_SET_LP_ADV(lks, 1000baseX_Full);
+
+ /* Half duplex is not supported */
+ ad_reg &= lp_reg;
+@@ -1393,12 +1396,13 @@ static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata)
+
+ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++ XGBE_SET_LP_ADV(lks, Autoneg);
++ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+@@ -1408,9 +1412,9 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+@@ -1463,26 +1467,27 @@ static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+ }
+
+ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ enum xgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+- pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+- pdata->phy.lp_advertising |= ADVERTISED_Backplane;
++ XGBE_SET_LP_ADV(lks, Autoneg);
++ XGBE_SET_LP_ADV(lks, Backplane);
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+- pdata->phy.lp_advertising |= ADVERTISED_Pause;
++ XGBE_SET_LP_ADV(lks, Pause);
+ if (lp_reg & 0x800)
+- pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
++ XGBE_SET_LP_ADV(lks, Asym_Pause);
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+@@ -1504,9 +1509,9 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_LP_ADV(lks, 10000baseKR_Full);
+ if (lp_reg & 0x20)
+- pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_LP_ADV(lks, 1000baseKX_Full);
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+@@ -1520,7 +1525,7 @@ static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata)
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
++ XGBE_SET_LP_ADV(lks, 10000baseR_FEC);
+
+ return mode;
+ }
+@@ -1541,41 +1546,43 @@ static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata)
+ }
+ }
+
+-static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
++static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
++ struct ethtool_link_ksettings *dlks)
+ {
++ struct ethtool_link_ksettings *slks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int advertising;
++
++ XGBE_LM_COPY(dlks, advertising, slks, advertising);
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+- return pdata->phy.advertising;
++ return;
+
+ /* With the KR re-driver we need to advertise a single speed */
+- advertising = pdata->phy.advertising;
+- advertising &= ~ADVERTISED_1000baseKX_Full;
+- advertising &= ~ADVERTISED_10000baseKR_Full;
++ XGBE_CLR_ADV(dlks, 1000baseKX_Full);
++ XGBE_CLR_ADV(dlks, 10000baseKR_Full);
+
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+- advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+- advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_1000BASE_T:
+ case XGBE_PORT_MODE_1000BASE_X:
+ case XGBE_PORT_MODE_NBASE_T:
+- advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_T:
+ if (phy_data->phydev &&
+ (phy_data->phydev->speed == SPEED_10000))
+- advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ else
+- advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ case XGBE_PORT_MODE_10GBASE_R:
+- advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ case XGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+@@ -1583,24 +1590,24 @@ static unsigned int xgbe_phy_an_advertising(struct xgbe_prv_data *pdata)
+ case XGBE_SFP_BASE_1000_SX:
+ case XGBE_SFP_BASE_1000_LX:
+ case XGBE_SFP_BASE_1000_CX:
+- advertising |= ADVERTISED_1000baseKX_Full;
++ XGBE_SET_ADV(dlks, 1000baseKX_Full);
+ break;
+ default:
+- advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ }
+ break;
+ default:
+- advertising |= ADVERTISED_10000baseKR_Full;
++ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+ break;
+ }
+-
+- return advertising;
+ }
+
+ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ u32 advertising;
+ int ret;
+
+ ret = xgbe_phy_find_phy_device(pdata);
+@@ -1610,9 +1617,12 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata)
+ if (!phy_data->phydev)
+ return 0;
+
++ ethtool_convert_link_mode_to_legacy_u32(&advertising,
++ lks->link_modes.advertising);
++
+ phy_data->phydev->autoneg = pdata->phy.autoneg;
+ phy_data->phydev->advertising = phy_data->phydev->supported &
+- pdata->phy.advertising;
++ advertising;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ phy_data->phydev->speed = pdata->phy.speed;
+@@ -2073,11 +2083,10 @@ static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode)
+ }
+
+ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+- enum xgbe_mode mode, u32 advert)
++ enum xgbe_mode mode, bool advert)
+ {
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+- if (pdata->phy.advertising & advert)
+- return true;
++ return advert;
+ } else {
+ enum xgbe_mode cur_mode;
+
+@@ -2092,13 +2101,15 @@ static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata,
+ static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ switch (mode) {
+ case XGBE_MODE_X:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseT_Full);
++ XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_10000baseT_Full);
++ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+@@ -2107,19 +2118,21 @@ static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata,
+ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ switch (mode) {
+ case XGBE_MODE_SGMII_100:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_100baseT_Full);
++ XGBE_ADV(lks, 100baseT_Full));
+ case XGBE_MODE_SGMII_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseT_Full);
++ XGBE_ADV(lks, 1000baseT_Full));
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_2500baseX_Full);
++ XGBE_ADV(lks, 2500baseT_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_10000baseT_Full);
++ XGBE_ADV(lks, 10000baseT_Full));
+ default:
+ return false;
+ }
+@@ -2128,6 +2141,7 @@ static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata,
+ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+@@ -2135,22 +2149,26 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseT_Full);
++ XGBE_ADV(lks, 1000baseX_Full));
+ case XGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_100baseT_Full);
++ XGBE_ADV(lks, 100baseT_Full));
+ case XGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T)
+ return false;
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseT_Full);
++ XGBE_ADV(lks, 1000baseT_Full));
+ case XGBE_MODE_SFI:
+ if (phy_data->sfp_mod_absent)
+ return true;
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_10000baseT_Full);
++ XGBE_ADV(lks, 10000baseSR_Full) ||
++ XGBE_ADV(lks, 10000baseLR_Full) ||
++ XGBE_ADV(lks, 10000baseLRM_Full) ||
++ XGBE_ADV(lks, 10000baseER_Full) ||
++ XGBE_ADV(lks, 10000baseCR_Full));
+ default:
+ return false;
+ }
+@@ -2159,10 +2177,12 @@ static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata,
+ static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ switch (mode) {
+ case XGBE_MODE_KX_2500:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_2500baseX_Full);
++ XGBE_ADV(lks, 2500baseX_Full));
+ default:
+ return false;
+ }
+@@ -2171,13 +2191,15 @@ static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata,
+ static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
++
+ switch (mode) {
+ case XGBE_MODE_KX_1000:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_1000baseKX_Full);
++ XGBE_ADV(lks, 1000baseKX_Full));
+ case XGBE_MODE_KR:
+ return xgbe_phy_check_mode(pdata, mode,
+- ADVERTISED_10000baseKR_Full);
++ XGBE_ADV(lks, 10000baseKR_Full));
+ default:
+ return false;
+ }
+@@ -2744,6 +2766,7 @@ static void xgbe_phy_exit(struct xgbe_prv_data *pdata)
+
+ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ {
++ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data;
+ struct mii_bus *mii;
+ unsigned int reg;
+@@ -2823,32 +2846,33 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ phy_data->cur_mode = XGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+- pdata->phy.supported = 0;
++ XGBE_ZERO_SUP(lks);
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case XGBE_PORT_MODE_BACKPLANE:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_Backplane;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, Backplane);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+- pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
++ XGBE_SET_SUP(lks, 1000baseKX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+- pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
++ XGBE_SET_SUP(lks, 10000baseKR_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+- pdata->phy.supported |=
+- SUPPORTED_10000baseR_FEC;
++ XGBE_SET_SUP(lks, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+ break;
+ case XGBE_PORT_MODE_BACKPLANE_2500:
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_Backplane;
+- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, Backplane);
++ XGBE_SET_SUP(lks, 2500baseX_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+@@ -2856,15 +2880,16 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* MDIO 1GBase-T support */
+ case XGBE_PORT_MODE_1000BASE_T:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+
+@@ -2873,10 +2898,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* MDIO Base-X support */
+ case XGBE_PORT_MODE_1000BASE_X:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_FIBRE;
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, FIBRE);
++ XGBE_SET_SUP(lks, 1000baseX_Full);
+ phy_data->start_mode = XGBE_MODE_X;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+@@ -2884,19 +2910,20 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* MDIO NBase-T support */
+ case XGBE_PORT_MODE_NBASE_T:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) {
+- pdata->phy.supported |= SUPPORTED_2500baseX_Full;
++ XGBE_SET_SUP(lks, 2500baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KX_2500;
+ }
+
+@@ -2905,19 +2932,20 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* 10GBase-T support */
+ case XGBE_PORT_MODE_10GBASE_T:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, TP);
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ XGBE_SET_SUP(lks, 100baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ XGBE_SET_SUP(lks, 1000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ XGBE_SET_SUP(lks, 10000baseT_Full);
+ phy_data->start_mode = XGBE_MODE_KR;
+ }
+
+@@ -2926,12 +2954,16 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* 10GBase-R support */
+ case XGBE_PORT_MODE_10GBASE_R:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, FIBRE);
++ XGBE_SET_SUP(lks, 10000baseSR_Full);
++ XGBE_SET_SUP(lks, 10000baseLR_Full);
++ XGBE_SET_SUP(lks, 10000baseLRM_Full);
++ XGBE_SET_SUP(lks, 10000baseER_Full);
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+- pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
++ XGBE_SET_SUP(lks, 10000baseR_FEC);
+ phy_data->start_mode = XGBE_MODE_SFI;
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_NONE;
+@@ -2939,22 +2971,17 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+
+ /* SFP support */
+ case XGBE_PORT_MODE_SFP:
+- pdata->phy.supported |= SUPPORTED_Autoneg;
+- pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- pdata->phy.supported |= SUPPORTED_TP;
+- pdata->phy.supported |= SUPPORTED_FIBRE;
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) {
+- pdata->phy.supported |= SUPPORTED_100baseT_Full;
++ XGBE_SET_SUP(lks, Autoneg);
++ XGBE_SET_SUP(lks, Pause);
++ XGBE_SET_SUP(lks, Asym_Pause);
++ XGBE_SET_SUP(lks, TP);
++ XGBE_SET_SUP(lks, FIBRE);
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100)
+ phy_data->start_mode = XGBE_MODE_SGMII_100;
+- }
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) {
+- pdata->phy.supported |= SUPPORTED_1000baseT_Full;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)
+ phy_data->start_mode = XGBE_MODE_SGMII_1000;
+- }
+- if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) {
+- pdata->phy.supported |= SUPPORTED_10000baseT_Full;
++ if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)
+ phy_data->start_mode = XGBE_MODE_SFI;
+- }
+
+ phy_data->phydev_mode = XGBE_MDIO_MODE_CL22;
+
+@@ -2965,8 +2992,9 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ }
+
+ if (netif_msg_probe(pdata))
+- dev_dbg(pdata->dev, "phy supported=%#x\n",
+- pdata->phy.supported);
++ dev_dbg(pdata->dev, "phy supported=0x%*pb\n",
++ __ETHTOOL_LINK_MODE_MASK_NBITS,
++ lks->link_modes.supported);
+
+ if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) {
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 0e93155..48a46a7 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -131,6 +131,7 @@
+ #include <linux/cpumask.h>
+ #include <linux/interrupt.h>
+ #include <linux/dcache.h>
++#include <linux/ethtool.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+ #define XGBE_DRV_VERSION "1.0.3"
+@@ -296,6 +297,48 @@
+ /* MDIO port types */
+ #define XGMAC_MAX_C22_PORT 3
+
++/* Link mode bit operations */
++#define XGBE_ZERO_SUP(_ls) \
++ ethtool_link_ksettings_zero_link_mode((_ls), supported)
++
++#define XGBE_SET_SUP(_ls, _mode) \
++ ethtool_link_ksettings_add_link_mode((_ls), supported, _mode)
++
++#define XGBE_CLR_SUP(_ls, _mode) \
++ ethtool_link_ksettings_del_link_mode((_ls), supported, _mode)
++
++#define XGBE_IS_SUP(_ls, _mode) \
++ ethtool_link_ksettings_test_link_mode((_ls), supported, _mode)
++
++#define XGBE_ZERO_ADV(_ls) \
++ ethtool_link_ksettings_zero_link_mode((_ls), advertising)
++
++#define XGBE_SET_ADV(_ls, _mode) \
++ ethtool_link_ksettings_add_link_mode((_ls), advertising, _mode)
++
++#define XGBE_CLR_ADV(_ls, _mode) \
++ ethtool_link_ksettings_del_link_mode((_ls), advertising, _mode)
++
++#define XGBE_ADV(_ls, _mode) \
++ ethtool_link_ksettings_test_link_mode((_ls), advertising, _mode)
++
++#define XGBE_ZERO_LP_ADV(_ls) \
++ ethtool_link_ksettings_zero_link_mode((_ls), lp_advertising)
++
++#define XGBE_SET_LP_ADV(_ls, _mode) \
++ ethtool_link_ksettings_add_link_mode((_ls), lp_advertising, _mode)
++
++#define XGBE_CLR_LP_ADV(_ls, _mode) \
++ ethtool_link_ksettings_del_link_mode((_ls), lp_advertising, _mode)
++
++#define XGBE_LP_ADV(_ls, _mode) \
++ ethtool_link_ksettings_test_link_mode((_ls), lp_advertising, _mode)
++
++#define XGBE_LM_COPY(_dst, _dname, _src, _sname) \
++ bitmap_copy((_dst)->link_modes._dname, \
++ (_src)->link_modes._sname, \
++ __ETHTOOL_LINK_MODE_MASK_NBITS)
++
+ struct xgbe_prv_data;
+
+ struct xgbe_packet_data {
+@@ -563,9 +606,7 @@ enum xgbe_mdio_mode {
+ };
+
+ struct xgbe_phy {
+- u32 supported;
+- u32 advertising;
+- u32 lp_advertising;
++ struct ethtool_link_ksettings lks;
+
+ int address;
+
+@@ -817,7 +858,8 @@ struct xgbe_phy_impl_if {
+ int (*an_config)(struct xgbe_prv_data *);
+
+ /* Set/override auto-negotiation advertisement settings */
+- unsigned int (*an_advertising)(struct xgbe_prv_data *);
++ void (*an_advertising)(struct xgbe_prv_data *,
++ struct ethtool_link_ksettings *);
+
+ /* Process results of auto-negotiation */
+ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *);
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2964-net-ethtool-Add-macro-to-clear-a-link-mode.patch b/meta-v1000/recipes-kernel/linux/files/2964-net-ethtool-Add-macro-to-clear-a-link-mode.patch
new file mode 100644
index 00000000..88a54128
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2964-net-ethtool-Add-macro-to-clear-a-link-mode.patch
@@ -0,0 +1,42 @@
+From ed6485788e0ec6f7bca598234c06fb31e0ccc7be Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:55:05 +0530
+Subject: [PATCH] net: ethtool: Add macro to clear a link mode setting
+
+There are currently macros to set and test an ETHTOOL_LINK_MODE_ setting,
+but not to clear one. Add a macro to clear an ETHTOOL_LINK_MODE_ setting.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ include/linux/ethtool.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
+index 9ded8c6..2f0909c 100644
+--- a/include/linux/ethtool.h
++++ b/include/linux/ethtool.h
+@@ -135,6 +135,17 @@ struct ethtool_link_ksettings {
+ __set_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
+
+ /**
++ * ethtool_link_ksettings_del_link_mode - clear bit in link_ksettings
++ * link mode mask
++ * @ptr : pointer to struct ethtool_link_ksettings
++ * @name : one of supported/advertising/lp_advertising
++ * @mode : one of the ETHTOOL_LINK_MODE_*_BIT
++ * (not atomic, no bound checking)
++ */
++#define ethtool_link_ksettings_del_link_mode(ptr, name, mode) \
++ __clear_bit(ETHTOOL_LINK_MODE_ ## mode ## _BIT, (ptr)->link_modes.name)
++
++/**
+ * ethtool_link_ksettings_test_link_mode - test bit in ksettings link mode mask
+ * @ptr : pointer to struct ethtool_link_ksettings
+ * @name : one of supported/advertising/lp_advertising
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2965-Fix-for-build-error.patch b/meta-v1000/recipes-kernel/linux/files/2965-Fix-for-build-error.patch
new file mode 100644
index 00000000..6b489852
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2965-Fix-for-build-error.patch
@@ -0,0 +1,57 @@
+From 5c59108b783c458fe74cf94b65f0e6f98bca2a02 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:55:58 +0530
+Subject: [PATCH] Fix for build error
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ include/linux/netdev_features.h | 2 ++
+ include/uapi/linux/ethtool.h | 5 +++++
+ 2 files changed, 7 insertions(+)
+ mode change 100644 => 100755 include/linux/netdev_features.h
+ mode change 100644 => 100755 include/uapi/linux/ethtool.h
+
+diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
+old mode 100644
+new mode 100755
+index 9c6c8ef..c553adc
+--- a/include/linux/netdev_features.h
++++ b/include/linux/netdev_features.h
+@@ -74,6 +74,7 @@ enum {
+ NETIF_F_BUSY_POLL_BIT, /* Busy poll */
+
+ NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
++ NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
+
+ /*
+ * Add your fresh new feature above and remember to update
+@@ -136,6 +137,7 @@ enum {
+ #define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
+ #define NETIF_F_BUSY_POLL __NETIF_F(BUSY_POLL)
+ #define NETIF_F_HW_TC __NETIF_F(HW_TC)
++#define NETIF_F_RX_UDP_TUNNEL_PORT __NETIF_F(RX_UDP_TUNNEL_PORT)
+
+ #define for_each_netdev_feature(mask_addr, bit) \
+ for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
+old mode 100644
+new mode 100755
+index 8e54723..03ef1b1
+--- a/include/uapi/linux/ethtool.h
++++ b/include/uapi/linux/ethtool.h
+@@ -1368,7 +1368,12 @@ enum ethtool_link_mode_bit_indices {
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
+ ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
+ ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
++ ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
++ ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
+
++ ETHTOOL_LINK_MODE_FEC_NONE_BIT = 49,
++ ETHTOOL_LINK_MODE_FEC_RS_BIT = 50,
++ ETHTOOL_LINK_MODE_FEC_BASER_BIT = 51,
+
+ /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
+ * 31. Please do NOT define any SUPPORTED_* or ADVERTISED_*
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2966-amd-xgbe-Add-support-for-VXLAN-offload.patch b/meta-v1000/recipes-kernel/linux/files/2966-amd-xgbe-Add-support-for-VXLAN-offload.patch
new file mode 100644
index 00000000..30f3be38
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2966-amd-xgbe-Add-support-for-VXLAN-offload.patch
@@ -0,0 +1,857 @@
+From 749de163049d686577354abd6dcc249dad7b8630 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:57:09 +0530
+Subject: [PATCH] amd-xgbe: Add support for VXLAN offload capabilities
+
+The hardware has the capability to perform checksum offload support
+(both Tx and Rx) and TSO support for VXLAN packets. Add the support
+required to enable this.
+
+The hardware can only support a single VXLAN port for offload. If more
+than one VXLAN port is added then the offload capabilities have to be
+disabled and can no longer be advertised.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 24 ++
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 92 ++++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 365 +++++++++++++++++++++++++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 23 ++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 22 ++
+ 5 files changed, 520 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index d07edf9..9431330 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -298,6 +298,7 @@
+ #define MAC_RWKPFR 0x00c4
+ #define MAC_LPICSR 0x00d0
+ #define MAC_LPITCR 0x00d4
++#define MAC_TIR 0x00e0
+ #define MAC_VR 0x0110
+ #define MAC_DR 0x0114
+ #define MAC_HWF0R 0x011c
+@@ -364,6 +365,8 @@
+ #define MAC_HWF0R_TXCOESEL_WIDTH 1
+ #define MAC_HWF0R_VLHASH_INDEX 4
+ #define MAC_HWF0R_VLHASH_WIDTH 1
++#define MAC_HWF0R_VXN_INDEX 29
++#define MAC_HWF0R_VXN_WIDTH 1
+ #define MAC_HWF1R_ADDR64_INDEX 14
+ #define MAC_HWF1R_ADDR64_WIDTH 2
+ #define MAC_HWF1R_ADVTHWORD_INDEX 13
+@@ -448,6 +451,8 @@
+ #define MAC_PFR_PR_WIDTH 1
+ #define MAC_PFR_VTFE_INDEX 16
+ #define MAC_PFR_VTFE_WIDTH 1
++#define MAC_PFR_VUCC_INDEX 22
++#define MAC_PFR_VUCC_WIDTH 1
+ #define MAC_PMTCSR_MGKPKTEN_INDEX 1
+ #define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+ #define MAC_PMTCSR_PWRDWN_INDEX 0
+@@ -510,6 +515,12 @@
+ #define MAC_TCR_SS_WIDTH 2
+ #define MAC_TCR_TE_INDEX 0
+ #define MAC_TCR_TE_WIDTH 1
++#define MAC_TCR_VNE_INDEX 24
++#define MAC_TCR_VNE_WIDTH 1
++#define MAC_TCR_VNM_INDEX 25
++#define MAC_TCR_VNM_WIDTH 1
++#define MAC_TIR_TNID_INDEX 0
++#define MAC_TIR_TNID_WIDTH 16
+ #define MAC_TSCR_AV8021ASMEN_INDEX 28
+ #define MAC_TSCR_AV8021ASMEN_WIDTH 1
+ #define MAC_TSCR_SNAPTYPSEL_INDEX 16
+@@ -1153,11 +1164,17 @@
+ #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+ #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+ #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_TNP_INDEX 8
++#define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1
++#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9
++#define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1
+
+ #define RX_NORMAL_DESC0_OVT_INDEX 0
+ #define RX_NORMAL_DESC0_OVT_WIDTH 16
+ #define RX_NORMAL_DESC2_HL_INDEX 0
+ #define RX_NORMAL_DESC2_HL_WIDTH 10
++#define RX_NORMAL_DESC2_TNP_INDEX 11
++#define RX_NORMAL_DESC2_TNP_WIDTH 1
+ #define RX_NORMAL_DESC3_CDA_INDEX 27
+ #define RX_NORMAL_DESC3_CDA_WIDTH 1
+ #define RX_NORMAL_DESC3_CTXT_INDEX 30
+@@ -1184,9 +1201,11 @@
+ #define RX_DESC3_L34T_IPV4_TCP 1
+ #define RX_DESC3_L34T_IPV4_UDP 2
+ #define RX_DESC3_L34T_IPV4_ICMP 3
++#define RX_DESC3_L34T_IPV4_UNKNOWN 7
+ #define RX_DESC3_L34T_IPV6_TCP 9
+ #define RX_DESC3_L34T_IPV6_UDP 10
+ #define RX_DESC3_L34T_IPV6_ICMP 11
++#define RX_DESC3_L34T_IPV6_UNKNOWN 15
+
+ #define RX_CONTEXT_DESC3_TSA_INDEX 4
+ #define RX_CONTEXT_DESC3_TSA_WIDTH 1
+@@ -1201,6 +1220,8 @@
+ #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+ #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+ #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
++#define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4
++#define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1
+
+ #define TX_CONTEXT_DESC2_MSS_INDEX 0
+ #define TX_CONTEXT_DESC2_MSS_WIDTH 15
+@@ -1241,8 +1262,11 @@
+ #define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+ #define TX_NORMAL_DESC3_TSE_INDEX 18
+ #define TX_NORMAL_DESC3_TSE_WIDTH 1
++#define TX_NORMAL_DESC3_VNP_INDEX 23
++#define TX_NORMAL_DESC3_VNP_WIDTH 3
+
+ #define TX_NORMAL_DESC2_VLAN_INSERT 0x2
++#define TX_NORMAL_DESC3_VXLAN_PACKET 0x3
+
+ /* MDIO undefined or vendor specific registers */
+ #ifndef MDIO_PMA_10GBR_PMD_CTRL
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index a978408..1bf671e 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -479,6 +479,50 @@ static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
+ return false;
+ }
+
++static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
++{
++ /* Program the VXLAN port */
++ XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
++
++ netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
++ pdata->vxlan_port);
++}
++
++static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
++{
++ if (!pdata->hw_feat.vxn)
++ return;
++
++ /* Program the VXLAN port */
++ xgbe_set_vxlan_id(pdata);
++
++ /* Allow for IPv6/UDP zero-checksum VXLAN packets */
++ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
++
++ /* Enable VXLAN tunneling mode */
++ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
++ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
++
++ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
++}
++
++static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
++{
++ if (!pdata->hw_feat.vxn)
++ return;
++
++ /* Disable tunneling mode */
++ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
++
++ /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
++ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
++
++ /* Clear the VXLAN port */
++ XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
++
++ netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
++}
++
+ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+ {
+ unsigned int max_q_count, q_count;
+@@ -1610,7 +1654,7 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int tx_packets, tx_bytes;
+- unsigned int csum, tso, vlan;
++ unsigned int csum, tso, vlan, vxlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_set_ic;
+ int start_index = ring->cur;
+@@ -1628,6 +1672,8 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
++ vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
++ VXLAN);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+@@ -1759,6 +1805,10 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ packet->length);
+ }
+
++ if (vxlan)
++ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
++ TX_NORMAL_DESC3_VXLAN_PACKET);
++
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+@@ -1920,9 +1970,27 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ /* Set checksum done indicator as appropriate */
+- if (netdev->features & NETIF_F_RXCSUM)
++ if (netdev->features & NETIF_F_RXCSUM) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ TNPCSUM_DONE, 1);
++ }
++
++ /* Set the tunneled packet indicator */
++ if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ TNP, 1);
++
++ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
++ switch (l34t) {
++ case RX_DESC3_L34T_IPV4_UNKNOWN:
++ case RX_DESC3_L34T_IPV6_UNKNOWN:
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ TNPCSUM_DONE, 0);
++ break;
++ }
++ }
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+@@ -1942,12 +2010,23 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ packet->vlan_ctag);
+ }
+ } else {
+- if ((etlt == 0x05) || (etlt == 0x06))
++ unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
++ RX_PACKET_ATTRIBUTES, TNP);
++
++ if ((etlt == 0x05) || (etlt == 0x06)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+- else
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ TNPCSUM_DONE, 0);
++ } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ CSUM_DONE, 0);
++ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
++ TNPCSUM_DONE, 0);
++ } else {
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
++ }
+ }
+
+ pdata->ext_stats.rxq_packets[channel->queue_index]++;
+@@ -3536,5 +3615,10 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+ hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
+ hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
+
++ /* For VXLAN */
++ hw_if->enable_vxlan = xgbe_enable_vxlan;
++ hw_if->disable_vxlan = xgbe_disable_vxlan;
++ hw_if->set_vxlan_id = xgbe_set_vxlan_id;
++
+ DBGPR("<--xgbe_init_function_ptrs\n");
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 392ea8a..1cb532b 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -123,6 +123,7 @@
+ #include <linux/if_ether.h>
+ #include <linux/net_tstamp.h>
+ #include <linux/phy.h>
++#include <net/vxlan.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -755,6 +756,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
++ hw_feat->vxn = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VXN);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+@@ -859,6 +861,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ (hw_feat->ts_src == 3) ? "internal/external" : "n/a");
+ dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
+ hw_feat->sa_vlan_ins ? "yes" : "no");
++ dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
++ hw_feat->vxn ? "yes" : "no");
+
+ /* Hardware feature register 1 */
+ dev_dbg(pdata->dev, " RX fifo size : %u\n",
+@@ -902,6 +906,116 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+ }
+ }
+
++static void xgbe_disable_vxlan_offloads(struct xgbe_prv_data *pdata)
++{
++ struct net_device *netdev = pdata->netdev;
++
++ if (!pdata->vxlan_offloads_set)
++ return;
++
++ netdev_info(netdev, "disabling VXLAN offloads\n");
++
++ netdev->hw_enc_features &= ~(NETIF_F_SG |
++ NETIF_F_IP_CSUM |
++ NETIF_F_IPV6_CSUM |
++ NETIF_F_RXCSUM |
++ NETIF_F_TSO |
++ NETIF_F_TSO6 |
++ NETIF_F_GRO |
++ NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM);
++
++ netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM);
++
++ pdata->vxlan_offloads_set = 0;
++}
++
++static void xgbe_disable_vxlan_hw(struct xgbe_prv_data *pdata)
++{
++ if (!pdata->vxlan_port_set)
++ return;
++
++ pdata->hw_if.disable_vxlan(pdata);
++
++ pdata->vxlan_port_set = 0;
++ pdata->vxlan_port = 0;
++}
++
++static void xgbe_disable_vxlan_accel(struct xgbe_prv_data *pdata)
++{
++ xgbe_disable_vxlan_offloads(pdata);
++
++ xgbe_disable_vxlan_hw(pdata);
++}
++
++static void xgbe_enable_vxlan_offloads(struct xgbe_prv_data *pdata)
++{
++ struct net_device *netdev = pdata->netdev;
++
++ if (pdata->vxlan_offloads_set)
++ return;
++
++ netdev_info(netdev, "enabling VXLAN offloads\n");
++
++ netdev->hw_enc_features |= NETIF_F_SG |
++ NETIF_F_IP_CSUM |
++ NETIF_F_IPV6_CSUM |
++ NETIF_F_RXCSUM |
++ NETIF_F_TSO |
++ NETIF_F_TSO6 |
++ NETIF_F_GRO |
++ pdata->vxlan_features;
++
++ netdev->features |= pdata->vxlan_features;
++
++ pdata->vxlan_offloads_set = 1;
++}
++
++static void xgbe_enable_vxlan_hw(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_vxlan_data *vdata;
++
++ if (pdata->vxlan_port_set)
++ return;
++
++ if (list_empty(&pdata->vxlan_ports))
++ return;
++
++ vdata = list_first_entry(&pdata->vxlan_ports,
++ struct xgbe_vxlan_data, list);
++
++ pdata->vxlan_port_set = 1;
++ pdata->vxlan_port = be16_to_cpu(vdata->port);
++
++ pdata->hw_if.enable_vxlan(pdata);
++}
++
++static void xgbe_enable_vxlan_accel(struct xgbe_prv_data *pdata)
++{
++ /* VXLAN acceleration desired? */
++ if (!pdata->vxlan_features)
++ return;
++
++ /* VXLAN acceleration possible? */
++ if (pdata->vxlan_force_disable)
++ return;
++
++ xgbe_enable_vxlan_hw(pdata);
++
++ xgbe_enable_vxlan_offloads(pdata);
++}
++
++static void xgbe_reset_vxlan_accel(struct xgbe_prv_data *pdata)
++{
++ xgbe_disable_vxlan_hw(pdata);
++
++ if (pdata->vxlan_features)
++ xgbe_enable_vxlan_offloads(pdata);
++
++ pdata->vxlan_force_disable = 0;
++}
++
+ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+ {
+ struct xgbe_channel *channel;
+@@ -1225,6 +1339,8 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
++ udp_tunnel_get_rx_info(netdev);
++
+ netif_tx_start_all_queues(netdev);
+
+ xgbe_start_timers(pdata);
+@@ -1266,6 +1382,8 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+
++ xgbe_reset_vxlan_accel(pdata);
++
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+@@ -1553,10 +1671,18 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+ if (ret)
+ return ret;
+
+- packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+- packet->tcp_header_len = tcp_hdrlen(skb);
++ if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, VXLAN)) {
++ packet->header_len = skb_inner_transport_offset(skb) +
++ inner_tcp_hdrlen(skb);
++ packet->tcp_header_len = inner_tcp_hdrlen(skb);
++ } else {
++ packet->header_len = skb_transport_offset(skb) +
++ tcp_hdrlen(skb);
++ packet->tcp_header_len = tcp_hdrlen(skb);
++ }
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
++
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+@@ -1571,6 +1697,49 @@ static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+ return 0;
+ }
+
++static bool xgbe_is_vxlan(struct xgbe_prv_data *pdata, struct sk_buff *skb)
++{
++ struct xgbe_vxlan_data *vdata;
++
++ if (pdata->vxlan_force_disable)
++ return false;
++
++ if (!skb->encapsulation)
++ return false;
++
++ if (skb->ip_summed != CHECKSUM_PARTIAL)
++ return false;
++
++ switch (skb->protocol) {
++ case htons(ETH_P_IP):
++ if (ip_hdr(skb)->protocol != IPPROTO_UDP)
++ return false;
++ break;
++
++ case htons(ETH_P_IPV6):
++ if (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP)
++ return false;
++ break;
++
++ default:
++ return false;
++ }
++
++ /* See if we have the UDP port in our list */
++ list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
++ if ((skb->protocol == htons(ETH_P_IP)) &&
++ (vdata->sa_family == AF_INET) &&
++ (vdata->port == udp_hdr(skb)->dest))
++ return true;
++ else if ((skb->protocol == htons(ETH_P_IPV6)) &&
++ (vdata->sa_family == AF_INET6) &&
++ (vdata->port == udp_hdr(skb)->dest))
++ return true;
++ }
++
++ return false;
++}
++
+ static int xgbe_is_tso(struct sk_buff *skb)
+ {
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+@@ -1619,6 +1788,10 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
++ if (xgbe_is_vxlan(pdata, skb))
++ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
++ VXLAN, 1);
++
+ if (skb_vlan_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
+@@ -2048,18 +2221,83 @@ static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
+ return 0;
+ }
+
++static netdev_features_t xgbe_fix_features(struct net_device *netdev,
++ netdev_features_t features)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ netdev_features_t vxlan_base, vxlan_mask;
++
++ vxlan_base = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RX_UDP_TUNNEL_PORT;
++ vxlan_mask = vxlan_base | NETIF_F_GSO_UDP_TUNNEL_CSUM;
++
++ pdata->vxlan_features = features & vxlan_mask;
++
++ /* Only fix VXLAN-related features */
++ if (!pdata->vxlan_features)
++ return features;
++
++ /* If VXLAN isn't supported then clear any features:
++ * This is needed because NETIF_F_RX_UDP_TUNNEL_PORT gets
++ * automatically set if ndo_udp_tunnel_add is set.
++ */
++ if (!pdata->hw_feat.vxn)
++ return features & ~vxlan_mask;
++
++ /* VXLAN CSUM requires VXLAN base */
++ if ((features & NETIF_F_GSO_UDP_TUNNEL_CSUM) &&
++ !(features & NETIF_F_GSO_UDP_TUNNEL)) {
++ netdev_notice(netdev,
++ "forcing tx udp tunnel support\n");
++ features |= NETIF_F_GSO_UDP_TUNNEL;
++ }
++
++ /* Can't do one without doing the other */
++ if ((features & vxlan_base) != vxlan_base) {
++ netdev_notice(netdev,
++ "forcing both tx and rx udp tunnel support\n");
++ features |= vxlan_base;
++ }
++
++ if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
++ if (!(features & NETIF_F_GSO_UDP_TUNNEL_CSUM)) {
++ netdev_notice(netdev,
++ "forcing tx udp tunnel checksumming on\n");
++ features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
++ }
++ } else {
++ if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM) {
++ netdev_notice(netdev,
++ "forcing tx udp tunnel checksumming off\n");
++ features &= ~NETIF_F_GSO_UDP_TUNNEL_CSUM;
++ }
++ }
++
++ pdata->vxlan_features = features & vxlan_mask;
++
++ /* Adjust UDP Tunnel based on current state */
++ if (pdata->vxlan_force_disable) {
++ netdev_notice(netdev,
++ "VXLAN acceleration disabled, turning off udp tunnel features\n");
++ features &= ~vxlan_mask;
++ }
++
++ return features;
++}
++
+ static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
++ netdev_features_t udp_tunnel;
+ int ret = 0;
+
+ rxhash = pdata->netdev_features & NETIF_F_RXHASH;
+ rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+ rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+ rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
++ udp_tunnel = pdata->netdev_features & NETIF_F_GSO_UDP_TUNNEL;
+
+ if ((features & NETIF_F_RXHASH) && !rxhash)
+ ret = hw_if->enable_rss(pdata);
+@@ -2083,6 +2321,11 @@ static int xgbe_set_features(struct net_device *netdev,
+ else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+ hw_if->disable_rx_vlan_filtering(pdata);
+
++ if ((features & NETIF_F_GSO_UDP_TUNNEL) && !udp_tunnel)
++ xgbe_enable_vxlan_accel(pdata);
++ else if (!(features & NETIF_F_GSO_UDP_TUNNEL) && udp_tunnel)
++ xgbe_disable_vxlan_accel(pdata);
++
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+@@ -2090,6 +2333,111 @@ static int xgbe_set_features(struct net_device *netdev,
+ return 0;
+ }
+
++static void xgbe_udp_tunnel_add(struct net_device *netdev,
++ struct udp_tunnel_info *ti)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ struct xgbe_vxlan_data *vdata;
++
++ if (!pdata->hw_feat.vxn)
++ return;
++
++ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
++ return;
++
++ pdata->vxlan_port_count++;
++
++ netif_dbg(pdata, drv, netdev,
++ "adding VXLAN tunnel, family=%hx/port=%hx\n",
++ ti->sa_family, be16_to_cpu(ti->port));
++
++ if (pdata->vxlan_force_disable)
++ return;
++
++ vdata = kzalloc(sizeof(*vdata), GFP_ATOMIC);
++ if (!vdata) {
++ /* Can no longer properly track VXLAN ports */
++ pdata->vxlan_force_disable = 1;
++ netif_dbg(pdata, drv, netdev,
++ "internal error, disabling VXLAN accelerations\n");
++
++ xgbe_disable_vxlan_accel(pdata);
++
++ return;
++ }
++ vdata->sa_family = ti->sa_family;
++ vdata->port = ti->port;
++
++ list_add_tail(&vdata->list, &pdata->vxlan_ports);
++
++ /* First port added? */
++ if (pdata->vxlan_port_count == 1) {
++ xgbe_enable_vxlan_accel(pdata);
++
++ return;
++ }
++}
++
++static void xgbe_udp_tunnel_del(struct net_device *netdev,
++ struct udp_tunnel_info *ti)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ struct xgbe_vxlan_data *vdata;
++
++ if (!pdata->hw_feat.vxn)
++ return;
++
++ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
++ return;
++
++ netif_dbg(pdata, drv, netdev,
++ "deleting VXLAN tunnel, family=%hx/port=%hx\n",
++ ti->sa_family, be16_to_cpu(ti->port));
++
++ /* Don't need safe version since loop terminates with deletion */
++ list_for_each_entry(vdata, &pdata->vxlan_ports, list) {
++ if (vdata->sa_family != ti->sa_family)
++ continue;
++
++ if (vdata->port != ti->port)
++ continue;
++
++ list_del(&vdata->list);
++ kfree(vdata);
++
++ break;
++ }
++
++ pdata->vxlan_port_count--;
++ if (!pdata->vxlan_port_count) {
++ xgbe_reset_vxlan_accel(pdata);
++
++ return;
++ }
++
++ if (pdata->vxlan_force_disable)
++ return;
++
++ /* See if VXLAN tunnel id needs to be changed */
++ vdata = list_first_entry(&pdata->vxlan_ports,
++ struct xgbe_vxlan_data, list);
++ if (pdata->vxlan_port == be16_to_cpu(vdata->port))
++ return;
++
++ pdata->vxlan_port = be16_to_cpu(vdata->port);
++ pdata->hw_if.set_vxlan_id(pdata);
++}
++
++static netdev_features_t xgbe_features_check(struct sk_buff *skb,
++ struct net_device *netdev,
++ netdev_features_t features)
++{
++ features = vlan_features_check(skb, features);
++ features = vxlan_features_check(skb, features);
++
++ return features;
++}
++
+ static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+@@ -2107,7 +2455,11 @@ static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_poll_controller = xgbe_poll_controller,
+ #endif
+ .ndo_setup_tc = xgbe_setup_tc,
++ .ndo_fix_features = xgbe_fix_features,
+ .ndo_set_features = xgbe_set_features,
++ .ndo_udp_tunnel_add = xgbe_udp_tunnel_add,
++ .ndo_udp_tunnel_del = xgbe_udp_tunnel_del,
++ .ndo_features_check = xgbe_features_check,
+ };
+
+ const struct net_device_ops *xgbe_get_netdev_ops(void)
+@@ -2420,6 +2772,15 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
++ RX_PACKET_ATTRIBUTES, TNP)) {
++ skb->encapsulation = 1;
++
++ if (XGMAC_GET_BITS(packet->attributes,
++ RX_PACKET_ATTRIBUTES, TNPCSUM_DONE))
++ skb->csum_level = 1;
++ }
++
++ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index c5ff385..d91fa59 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -193,6 +193,7 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ mutex_init(&pdata->i2c_mutex);
+ init_completion(&pdata->i2c_complete);
+ init_completion(&pdata->mdio_complete);
++ INIT_LIST_HEAD(&pdata->vxlan_ports);
+
+ pdata->msg_enable = netif_msg_init(debug, default_msg_level);
+
+@@ -374,6 +375,28 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ if (pdata->hw_feat.rss)
+ netdev->hw_features |= NETIF_F_RXHASH;
+
++ if (pdata->hw_feat.vxn) {
++ netdev->hw_enc_features = NETIF_F_SG |
++ NETIF_F_IP_CSUM |
++ NETIF_F_IPV6_CSUM |
++ NETIF_F_RXCSUM |
++ NETIF_F_TSO |
++ NETIF_F_TSO6 |
++ NETIF_F_GRO |
++ NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++ NETIF_F_RX_UDP_TUNNEL_PORT;
++
++ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++ NETIF_F_RX_UDP_TUNNEL_PORT;
++
++ pdata->vxlan_offloads_set = 1;
++ pdata->vxlan_features = NETIF_F_GSO_UDP_TUNNEL |
++ NETIF_F_GSO_UDP_TUNNEL_CSUM |
++ NETIF_F_RX_UDP_TUNNEL_PORT;
++ }
++
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 48a46a7..db155fe 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -132,6 +132,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/dcache.h>
+ #include <linux/ethtool.h>
++#include <linux/list.h>
+
+ #define XGBE_DRV_NAME "amd-xgbe"
+ #define XGBE_DRV_VERSION "1.0.3"
+@@ -817,6 +818,11 @@ struct xgbe_hw_if {
+ /* For ECC */
+ void (*disable_ecc_ded)(struct xgbe_prv_data *);
+ void (*disable_ecc_sec)(struct xgbe_prv_data *, enum xgbe_ecc_sec);
++
++ /* For VXLAN */
++ void (*enable_vxlan)(struct xgbe_prv_data *);
++ void (*disable_vxlan)(struct xgbe_prv_data *);
++ void (*set_vxlan_id)(struct xgbe_prv_data *);
+ };
+
+ /* This structure represents implementation specific routines for an
+@@ -941,6 +947,7 @@ struct xgbe_hw_features {
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
++ unsigned int vxn; /* VXLAN/NVGRE */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+@@ -979,6 +986,12 @@ struct xgbe_version_data {
+ unsigned int rx_desc_prefetch;
+ };
+
++struct xgbe_vxlan_data {
++ struct list_head list;
++ sa_family_t sa_family;
++ __be16 port;
++};
++
+ struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct pci_dev *pcidev;
+@@ -1120,6 +1133,15 @@ struct xgbe_prv_data {
+ u32 rss_table[XGBE_RSS_MAX_TABLE_SIZE];
+ u32 rss_options;
+
++ /* VXLAN settings */
++ unsigned int vxlan_port_set;
++ unsigned int vxlan_offloads_set;
++ unsigned int vxlan_force_disable;
++ unsigned int vxlan_port_count;
++ struct list_head vxlan_ports;
++ u16 vxlan_port;
++ netdev_features_t vxlan_features;
++
+ /* Netdev related settings */
+ unsigned char mac_addr[ETH_ALEN];
+ netdev_features_t netdev_features;
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2967-amd-xgbe-Add-additional-ethtool-statistics.patch b/meta-v1000/recipes-kernel/linux/files/2967-amd-xgbe-Add-additional-ethtool-statistics.patch
new file mode 100644
index 00000000..c142a67b
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2967-amd-xgbe-Add-additional-ethtool-statistics.patch
@@ -0,0 +1,108 @@
+From 39532b54a89cbbf0902ce540b34e71ea16582c14 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:58:46 +0530
+Subject: [PATCH] amd-xgbe: Add additional ethtool statistics
+
+Add some additional statistics for tracking VXLAN packets and checksum
+errors.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 8 +++++++-
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 4 ++++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++
+ 3 files changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 1bf671e..671203d 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -1805,10 +1805,13 @@ static void xgbe_dev_xmit(struct xgbe_channel *channel)
+ packet->length);
+ }
+
+- if (vxlan)
++ if (vxlan) {
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
+ TX_NORMAL_DESC3_VXLAN_PACKET);
+
++ pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
++ }
++
+ for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
+ cur_index++;
+ rdata = XGBE_GET_DESC_DATA(ring, cur_index);
+@@ -1981,6 +1984,7 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNP, 1);
++ pdata->ext_stats.rx_vxlan_packets++;
+
+ l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
+ switch (l34t) {
+@@ -2018,11 +2022,13 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
+ CSUM_DONE, 0);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
++ pdata->ext_stats.rx_csum_errors++;
+ } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ TNPCSUM_DONE, 0);
++ pdata->ext_stats.rx_vxlan_csum_errors++;
+ } else {
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index cea25ac..ff397bb 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -146,6 +146,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
++ XGMAC_EXT_STAT("tx_vxlan_packets", tx_vxlan_packets),
+ XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+@@ -162,6 +163,7 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
++ XGMAC_EXT_STAT("rx_vxlan_packets", rx_vxlan_packets),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+@@ -177,6 +179,8 @@ static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
++ XGMAC_EXT_STAT("rx_csum_errors", rx_csum_errors),
++ XGMAC_EXT_STAT("rx_vxlan_csum_errors", rx_vxlan_csum_errors),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+ XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
+ XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index db155fe..ad102c8 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -715,6 +715,11 @@ struct xgbe_ext_stats {
+ u64 txq_bytes[XGBE_MAX_DMA_CHANNELS];
+ u64 rxq_packets[XGBE_MAX_DMA_CHANNELS];
+ u64 rxq_bytes[XGBE_MAX_DMA_CHANNELS];
++
++ u64 tx_vxlan_packets;
++ u64 rx_vxlan_packets;
++ u64 rx_csum_errors;
++ u64 rx_vxlan_csum_errors;
+ };
+
+ struct xgbe_hw_if {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2968-amd-xgbe-Interrupt-summary-bits-are-h-w-version.patch b/meta-v1000/recipes-kernel/linux/files/2968-amd-xgbe-Interrupt-summary-bits-are-h-w-version.patch
new file mode 100644
index 00000000..b54fc1f4
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2968-amd-xgbe-Interrupt-summary-bits-are-h-w-version.patch
@@ -0,0 +1,90 @@
+From e4aadcaff016ac811cddf37d58f6291922ecfbf6 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 09:59:44 +0530
+Subject: [PATCH] amd-xgbe: Interrupt summary bits are h/w version dependent
+
+There is a difference in the bit position of the normal interrupt summary
+enable (NIE) and abnormal interrupt summary enable (AIE) between revisions
+of the hardware. For older revisions the NIE and AIE bits are positions
+16 and 15 respectively. For newer revisions the NIE and AIE bits are
+positions 15 and 14. The effect in changing the bit position is that
+newer hardware won't receive AIE interrupts in the current version of the
+driver. Specifically, the driver uses this interrupt to collect
+statistics on when a receive buffer unavailable event occurs and to
+restart the driver/device when a fatal bus error occurs.
+
+Update the driver to set the interrupt enable bit based on the reported
+version of the hardware.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 8 ++++++--
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 13 ++++++++++---
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 9431330..7ea72ef 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -210,11 +210,15 @@
+ #define DMA_CH_CR_PBLX8_WIDTH 1
+ #define DMA_CH_CR_SPH_INDEX 24
+ #define DMA_CH_CR_SPH_WIDTH 1
+-#define DMA_CH_IER_AIE_INDEX 15
++#define DMA_CH_IER_AIE20_INDEX 15
++#define DMA_CH_IER_AIE20_WIDTH 1
++#define DMA_CH_IER_AIE_INDEX 14
+ #define DMA_CH_IER_AIE_WIDTH 1
+ #define DMA_CH_IER_FBEE_INDEX 12
+ #define DMA_CH_IER_FBEE_WIDTH 1
+-#define DMA_CH_IER_NIE_INDEX 16
++#define DMA_CH_IER_NIE20_INDEX 16
++#define DMA_CH_IER_NIE20_WIDTH 1
++#define DMA_CH_IER_NIE_INDEX 15
+ #define DMA_CH_IER_NIE_WIDTH 1
+ #define DMA_CH_IER_RBUE_INDEX 7
+ #define DMA_CH_IER_RBUE_WIDTH 1
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index 671203d..e107e18 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -649,13 +649,15 @@ static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_channel *channel;
+- unsigned int i;
++ unsigned int i, ver;
+
+ /* Set the interrupt mode if supported */
+ if (pdata->channel_irq_mode)
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
+ pdata->channel_irq_mode);
+
++ ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
++
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+
+@@ -671,8 +673,13 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+- XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
+- XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
++ if (ver < 0x21) {
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
++ } else {
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
++ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
++ }
+ XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2969-Enabled-DMA-flags-in-eMMC-driver.patch b/meta-v1000/recipes-kernel/linux/files/2969-Enabled-DMA-flags-in-eMMC-driver.patch
new file mode 100644
index 00000000..21aef87a
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2969-Enabled-DMA-flags-in-eMMC-driver.patch
@@ -0,0 +1,30 @@
+From c67b2175cde0859f48a2134dcea8921f65df53b0 Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 10:00:33 +0530
+Subject: [PATCH] Enabled DMA flags in eMMC driver.
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/mmc/host/sdhci-acpi.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 0944a8c..232bd8e 100755
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -398,8 +398,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
+ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .chip = &sdhci_acpi_chip_amd,
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET,
+- //.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE,
+- .quirks = SDHCI_QUIRK_BROKEN_DMA | SDHCI_QUIRK_BROKEN_ADMA,
++ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
++ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ };
+ struct sdhci_acpi_uid_slot {
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2970-drm-amd-display-amdgpu_dm-glmark2-hard-hang-fix.patch b/meta-v1000/recipes-kernel/linux/files/2970-drm-amd-display-amdgpu_dm-glmark2-hard-hang-fix.patch
new file mode 100644
index 00000000..f7cc4375
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2970-drm-amd-display-amdgpu_dm-glmark2-hard-hang-fix.patch
@@ -0,0 +1,33 @@
+From b1a75bad901c7ac5ff04fdf9a57f0e117740679e Mon Sep 17 00:00:00 2001
+From: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+Date: Fri, 8 Dec 2017 10:01:29 +0530
+Subject: [PATCH] drm/amd/display/amdgpu_dm: glmark2 hard hang fix
+
+Fixes the issue with dangling pointer cleanup.
+
+Signed-off-by: Kalyan Alle <kalyan.alle@amd.com>
+Reviewed-by: Ravi Patlegar <Ravi.Patlegar@amd.com>
+
+Signed-off-by: Pavan Kumar Ramayanam <pavan.ramayanam@amd.com>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index f0c459a..a59c341 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -3131,10 +3131,6 @@ void dm_drm_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+ {
+ struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+-
+- if (dm_plane_state->dc_state)
+- dc_plane_state_release(dm_plane_state->dc_state);
+-
+ drm_atomic_helper_plane_destroy_state(plane, state);
+ }
+
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/2971-ASoC-AMD-Enable-pci-bus-mastering-for-ACP3.x-device.patch b/meta-v1000/recipes-kernel/linux/files/2971-ASoC-AMD-Enable-pci-bus-mastering-for-ACP3.x-device.patch
new file mode 100644
index 00000000..69c6a274
--- /dev/null
+++ b/meta-v1000/recipes-kernel/linux/files/2971-ASoC-AMD-Enable-pci-bus-mastering-for-ACP3.x-device.patch
@@ -0,0 +1,33 @@
+From f48448bd4006f2e6f64736128cd5b5ba9e49c45b Mon Sep 17 00:00:00 2001
+From: Sanjay R Mehta <Sanju.Mehta@amd.com>
+Date: Mon, 18 Dec 2017 20:48:30 +0530
+Subject: [PATCH] ASoC: AMD: Enable pci bus-mastering for ACP3.x device
+
+By default DMA bus-mastering is disabled in uefi boot
+mode for ACP3.x device, so pci_set_master() api need
+to be invoked explicitly to set the DMA bit in the
+ACP3.x PCI_COMMAND register.
+
+Signed-off-by: Sanjay R Mehta <Sanju.Mehta@amd.com>
+Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
+---
+ sound/soc/amd/raven/pci-acp3x.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
+index 5891c5b..0d39aa8 100644
+--- a/sound/soc/amd/raven/pci-acp3x.c
++++ b/sound/soc/amd/raven/pci-acp3x.c
+@@ -70,6 +70,9 @@ static int snd_acp3x_probe(struct pci_dev *pci,
+ goto release_regions;
+ }
+
++ /* set pci bus-mastering */
++ pci_set_master(pci);
++
+ pci_set_drvdata(pci, adata);
+
+ val = rv_readl(adata->acp3x_base + mmACP_I2S_PIN_CONFIG);
+--
+2.7.4
+
diff --git a/meta-v1000/recipes-kernel/linux/files/v1000-extra-config.cfg b/meta-v1000/recipes-kernel/linux/files/v1000-extra-config.cfg
index a6c1de08..92584a04 100644
--- a/meta-v1000/recipes-kernel/linux/files/v1000-extra-config.cfg
+++ b/meta-v1000/recipes-kernel/linux/files/v1000-extra-config.cfg
@@ -118,6 +118,7 @@ CONFIG_ALLOW_DEV_COREDUMP=y
# CONFIG_SXGBE_ETH is not set
# CONFIG_TI_CPSW_ALE is not set
# CONFIG_BCM7XXX_PHY is not set
+CONFIG_MARVELL_PHY=y
# CONFIG_MDIO_BCM_UNIMAC is not set
CONFIG_USB_NET_DRIVERS=y
# CONFIG_ATH9K_DYNACK is not set
diff --git a/meta-v1000/recipes-kernel/linux/files/v1000-user-config.cfg b/meta-v1000/recipes-kernel/linux/files/v1000-user-config.cfg
index 1362d7eb..6903c924 100644
--- a/meta-v1000/recipes-kernel/linux/files/v1000-user-config.cfg
+++ b/meta-v1000/recipes-kernel/linux/files/v1000-user-config.cfg
@@ -196,3 +196,7 @@ CONFIG_SND_SOC=m
CONFIG_SND_SOC_AMD_ACP3x=m
CONFIG_FB_SIMPLE=y
CONFIG_LOGO=y
+CONFIG_AMD_XGBE=y
+CONFIG_AMD_XGBE_DCB=y
+CONFIG_AMD_XGBE_HAVE_ECC=y
+CONFIG_SND_SOC_AMD_ACP3x=m
diff --git a/meta-v1000/recipes-kernel/linux/files/v1000-user-patches.scc b/meta-v1000/recipes-kernel/linux/files/v1000-user-patches.scc
index c320c05f..55037895 100755
--- a/meta-v1000/recipes-kernel/linux/files/v1000-user-patches.scc
+++ b/meta-v1000/recipes-kernel/linux/files/v1000-user-patches.scc
@@ -2908,3 +2908,63 @@ patch 2908-EDAC-amd64-Don-t-treat-ECC-disabled-as-failure.patch
patch 2909-EDAC-amd64-Add-x86cpuid-sanity-check-during-init.patch
patch 2910-EDAC-amd64-Bump-driver-version.patch
patch 2911-AMD-eMMC-fixes.patch
+patch 2912-drm-amdkfd-Reset-process-queues-if-it-VM_FAULTs.patch
+patch 2913-drm-amdkfd-Support-registering-third-pary-device-mem.patch
+patch 2914-drm-amdkfd-Address-kernel-warning.patch
+patch 2915-drm-amdkfd-Handle-MEM_VIOL-in-trap-handler.patch
+patch 2916-drm-amd-Add-mqd-as-parameter-in-kfd2kgd.hqd_destroy-.patch
+patch 2917-drm-amdkfd-Fix-a-bug-that-process-cleanup-is-not-don.patch
+patch 2918-drm-amdkfd-Fix-a-bug-that-vmid-is-released-before.patch
+patch 2919-drm-amd-Implement-parallel-memory-mapping-on-mGPUs.patch
+patch 2920-drm-amdkfd-gfx9-preempt-queues-after-VM_FAULT.patch
+patch 2921-drm-amd-pp-Read-the-maximum-clock-frequency-from.patch
+patch 2922-AMD-XGBE-support.patch
+patch 2923-amd-xgbe-clocksource-Use-a-plain-u64-instead-of.patch
+patch 2924-amd-xgbe-Fix-IRQ-processing-when-running-in-single.patch
+patch 2925-amd-xgbe-Update-PCI-support-to-use-new-IRQ.patch
+patch 2926-amd-xgbe-Add-a-hardware-quirk-for-register.patch
+patch 2927-amd-xgbe-Check-xgbe_init-return-code.patch
+patch 2928-amd-xgbe-Stop-the-PHY-before-releasing-interrupts.patch
+patch 2929-amd-xgbe-Be-sure-to-set-MDIO-modes-on-device.patch
+patch 2930-amd-xgbe-Don-t-overwrite-SFP-PHY-mod_absent.patch
+patch 2931-net-busy-poll-allow-preemption-in-sk_busy_loop.patch
+patch 2932-net-busy-poll-return-busypolling-status-to.patch
+patch 2933-net-remove-__napi_complete-All-__napi_complete.patch
+patch 2934-amd-xgbe-Enable-IRQs-only-if-napi_complete_done.patch
+patch 2935-amd-xgbe-Fix-the-ECC-related-bit-position.patch
+patch 2936-net-ethernet-update-drivers-to-make-both-SW-and.patch
+patch 2937-amd-xgbe-use-PAGE_ALLOC_COSTLY_ORDER-in.patch
+patch 2938-amd-xgbe-Simplify-mailbox-interface-rate-change.patch
+patch 2939-amd-xgbe-Fix-SFP-PHY-supported-advertised-settings.patch
+patch 2940-amd-xgbe-Use-the-proper-register-during-PTP.patch
+patch 2941-amd-xgbe-Add-a-check-for-an-skb-in-the-timestamp.patch
+patch 2942-amd-xgbe-Prevent-looping-forever-if-timestamp.patch
+patch 2943-amd-xgbe-Handle-return-code-from-software-reset.patch
+patch 2944-amd-xgbe-Fixes-for-working-with-PHYs-that-support.patch
+patch 2945-amd-xgbe-Limit-the-I2C-error-messages-that-are.patch
+patch 2946-amd-xgbe-Re-issue-interrupt-if-interrupt-status.patch
+patch 2947-amd-xgbe-Add-NUMA-affinity-support-for-memory.patch
+patch 2948-amd-xgbe-Add-NUMA-affinity-support-for-IRQ-hints.patch
+patch 2949-amd-xgbe-Prepare-for-more-fine-grained-cache.patch
+patch 2950-amd-xgbe-Simplify-the-burst-length-settings.patch
+patch 2951-amd-xgbe-Adjust-register-settings-to-improve.patch
+patch 2952-amd-xgbe-fix-spelling-mistake-avialable.patch
+patch 2953-drivers-net-add-missing-interrupt.h-include.patch
+patch 2954-amd-xgbe-Set-the-MDIO-mode-for-10000Base-T.patch
+patch 2955-amd-xgbe-Set-the-MII-control-width-for-the-MAC.patch
+patch 2956-amd-xgbe-Be-sure-driver-shuts-down-cleanly-on.patch
+patch 2957-amd-xgbe-Update-TSO-packet-statistics-accuracy.patch
+patch 2958-amd-xgbe-Add-support-to-handle-device-renaming.patch
+patch 2959-amd-xgbe-Add-additional-dynamic-debug-messages.patch
+patch 2960-amd-xgbe-Optimize-DMA-channel-interrupt-enablement.patch
+patch 2961-amd-xgbe-Add-hardware-features-debug-output.patch
+patch 2962-amd-xgbe-Add-per-queue-Tx-and-Rx-statistics.patch
+patch 2963-amd-xgbe-Convert-to-using-the-new-link-mode.patch
+patch 2964-net-ethtool-Add-macro-to-clear-a-link-mode.patch
+patch 2965-Fix-for-build-error.patch
+patch 2966-amd-xgbe-Add-support-for-VXLAN-offload.patch
+patch 2967-amd-xgbe-Add-additional-ethtool-statistics.patch
+patch 2968-amd-xgbe-Interrupt-summary-bits-are-h-w-version.patch
+patch 2969-Enabled-DMA-flags-in-eMMC-driver.patch
+patch 2970-drm-amd-display-amdgpu_dm-glmark2-hard-hang-fix.patch
+patch 2971-ASoC-AMD-Enable-pci-bus-mastering-for-ACP3.x-device.patch