aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0001-amd-xgbe-Fix-debug-output-of-max-channel-counts.patch32
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0002-amd-xgbe-Read-and-save-the-port-property-registers-d.patch244
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0003-amd-xgbe-Remove-use-of-comm_owned-field.patch76
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0004-amd-xgbe-Remove-field-that-indicates-SFP-diagnostic-.patch56
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0005-amd-xgbe-Add-ethtool-support-to-retrieve-SFP-module-.patch299
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0006-amd-xgbe-Add-ethtool-show-set-ring-parameter-support.patch153
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0007-amd-xgbe-Prepare-for-ethtool-set-channel-support.patch241
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0008-amd-xgbe-Add-ethtool-show-set-channels-support.patch237
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0009-amd-xgbe-Always-attempt-link-training-in-KR-mode.patch161
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0010-amd-xgbe-Advertise-FEC-support-with-the-KR-re-driver.patch33
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0011-amd-xgbe-Update-the-BelFuse-quirk-to-support-SGMII.patch162
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0012-amd-xgbe-Improve-SFP-100Mbps-auto-negotiation.patch201
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0013-amd-xgbe-Merged-From-453f85d43fa9ee243f0fc3ac4e1be45.patch82
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0014-crypto-ccp-Use-GCM-IV-size-constant.patch57
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0015-crypto-ccp-unmap-pages-and-remove-unmap-objects-in-c.patch32
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0016-crypto-ccp-invoke-the-DMA-callback-in-a-standard-way.patch33
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0017-crypto-ccp-remove-unused-variable-qim.patch41
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0018-crypto-ccp-use-ENOSPC-for-transient-busy-indication.patch66
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0019-crypto-ccp-Build-the-AMD-secure-processor-driver-onl.patch37
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0020-crypto-ccp-Add-Platform-Security-Processor-PSP-devic.patch462
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0021-crypto-ccp-Define-SEV-key-management-command-id.patch505
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0022-Documentation-virtual-kvm-Add-AMD-Secure-Encrypted-V.patch97
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0023-crypto-ccp-Add-Secure-Encrypted-Virtualization-SEV-c.patch678
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0024-crypto-ccp-Define-SEV-userspace-ioctl-and-command-id.patch181
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0025-crypto-ccp-Implement-SEV_FACTORY_RESET-ioctl-command.patch121
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0026-crypto-ccp-Implement-SEV_PLATFORM_STATUS-ioctl-comma.patch70
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0027-crypto-ccp-Implement-SEV_PEK_GEN-ioctl-command.patch66
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0028-crypto-ccp-Implement-SEV_PDH_GEN-ioctl-command.patch46
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0029-crypto-ccp-Implement-SEV_PEK_CSR-ioctl-command.patch115
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0030-crypto-ccp-Implement-SEV_PEK_CERT_IMPORT-ioctl-comma.patch155
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0031-crypto-ccp-Implement-SEV_PDH_CERT_EXPORT-ioctl-comma.patch147
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0032-crypto-drivers-remove-duplicate-includes.patch30
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0033-crypto-ccp-Make-function-ccp_get_dma_chan_attr-stati.patch36
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0034-crypto-ccp-add-check-to-get-PSP-master-only-when-PSP.patch72
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0035-crypto-ccp-Fix-sparse-use-plain-integer-as-NULL-poin.patch65
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0036-crypto-ccp-Fill-the-result-buffer-only-on-digest-fin.patch49
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0037-crypto-ccp-Use-memdup_user-rather-than-duplicating-i.patch56
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0038-crypto-ccp-Validate-buffer-lengths-for-copy-operatio.patch265
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0039-crypto-ccp-Add-DOWNLOAD_FIRMWARE-SEV-command.patch223
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0040-crypto-ccp-Add-GET_ID-SEV-command.patch153
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0041-include-psp-sev-Capitalize-invalid-length-enum.patch41
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0042-Documentation-x86-Add-AMD-Secure-Encrypted-Virtualiz.patch92
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch121
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0044-x86-mm-Insure-that-boot-memory-areas-are-mapped-prop.patch76
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0045-x86-mm-Don-t-attempt-to-encrypt-initrd-under-SEV.patch52
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0046-x86-mm-Use-encrypted-access-of-boot-related-data-wit.patch118
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0047-x86-mm-Include-SEV-for-encryption-memory-attribute-c.patch47
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0048-x86-efi-Access-EFI-data-as-encrypted-when-SEV-is-act.patch85
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0049-resource-Consolidate-resource-walking-code.patch120
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0050-resource-Provide-resource-struct-in-resource-walk-ca.patch251
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch218
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch154
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0053-x86-boot-Add-early-boot-support-when-running-with-SE.patch399
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0054-x86-io-Unroll-string-I-O-when-SEV-is-active.patch124
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0055-x86-Add-support-for-changing-memory-encryption-attri.patch207
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0056-percpu-Introduce-DEFINE_PER_CPU_DECRYPTED.patch99
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0057-X86-KVM-Decrypt-shared-per-cpu-variables-when-SEV-is.patch106
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0058-X86-KVM-Clear-encryption-attribute-when-SEV-is-activ.patch175
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch188
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0060-kvm-svm-prepare-for-new-bit-definition-in-nested_ctl.patch81
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0061-kvm-svm-Add-SEV-feature-definitions-to-KVM.patch44
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0062-KVM-SVM-Prepare-to-reserve-asid-for-SEV-guest.patch62
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0063-KVM-X86-Extend-CPUID-range-to-include-new-leaf.patch64
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0064-KVM-Introduce-KVM_MEMORY_ENCRYPT_OP-ioctl.patch111
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0065-KVM-Introduce-KVM_MEMORY_ENCRYPT_-UN-REG_REGION.patch151
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0066-KVM-X86-Add-CONFIG_KVM_AMD_SEV.patch52
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0067-KVM-SVM-Reserve-ASID-range-for-SEV-guest.patch56
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0068-KVM-SVM-Add-sev-module_param.patch112
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0069-KVM-Define-SEV-key-management-command-id.patch338
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0070-KVM-SVM-Add-KVM_SEV_INIT-command.patch270
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0071-KVM-SVM-VMRUN-should-use-associated-ASID-when-SEV-is.patch162
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0072-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_START-command.patch248
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0073-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_UPDATE_DATA-c.patch298
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0074-SEV-error-too-few-arguments-to-function-release_page.patch39
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0075-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_MEASURE-comma.patch125
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0076-KVM-SVM-Add-support-for-SEV-LAUNCH_FINISH-command.patch72
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0077-KVM-SVM-Add-support-for-SEV-GUEST_STATUS-command.patch82
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0078-KVM-SVM-Add-support-for-SEV-DEBUG_DECRYPT-command.patch202
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0079-KVM-SVM-Add-support-for-SEV-DEBUG_ENCRYPT-command.patch155
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0080-KVM-SVM-Add-support-for-SEV-LAUNCH_SECRET-command.patch117
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch231
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0082-KVM-SVM-limit-kvm_handle_page_fault-to-PF-handling.patch111
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0083-KVM-SVM-Clear-C-bit-from-the-page-fault-address.patch54
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0084-KVM-X86-Restart-the-guest-when-insn_len-is-zero-and-.patch88
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0085-x86-mm-Unbreak-modules-that-use-the-DMA-API.patch56
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0086-x86-mm-Encrypt-the-initrd-earlier-for-BSP-microcode-.patch54
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0087-KVM-x86-prefer-depends-on-to-select-for-SEV.patch36
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0088-KVM-SVM-no-need-to-call-access_ok-in-LAUNCH_MEASURE-.patch92
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0089-KVM-SVM-install-RSM-intercept.patch78
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0090-KVM-SVM-Fix-SEV-LAUNCH_SECRET-command.patch64
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0091-KVM-x86-define-SVM-VMX-specific-kvm_arch_-alloc-free.patch123
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch467
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0093-check-pci-dev-before-getting-pci-alias.patch56
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0094-mmc-sdhci-acpi-Add-support-for-ACPI-HID-of-AMD-Contr.patch125
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0095-eMMC-patch-4.14.48.patch219
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0096-Revert-eMMC-patch-4.14.48.patch221
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0097-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch50
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0098-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch86
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0099-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch91
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0100-mmc-sdhci-add-tuning-error-codes.patch78
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0101-mmc-sdhci-Export-sdhci_request.patch53
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0102-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch80
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0103-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch128
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0104-mmc-sdhci-Add-version-V4-definition.patch46
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0105-mmc-sdhci-Add-sd-host-v4-mode.patch105
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0106-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch210
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0107-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch79
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0108-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch113
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0109-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch149
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0110-pinctrl-eMMC-and-PinCtrl-is-sharing-the-interrupt-no.patch29
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0111-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch45
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0112-lib-crc-Move-polynomial-definition-to-separate-heade.patch96
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0113-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch104
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0114-net-ethernet-Use-existing-define-with-polynomial.patch46
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0115-net-amd-fix-return-type-of-ndo_start_xmit-function.patch44
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0116-net-phy-Add-helper-for-advertise-to-lcl-value.patch71
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0117-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch35
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0118-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch104
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0119-crypto-ahash-remove-useless-setting-of-type-flags.patch53
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0120-crypto-ahash-remove-useless-setting-of-cra_type.patch50
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0121-crypto-ccp-Fix-command-completion-detection-race.patch50
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0122-crypto-ccp-Add-psp-enabled-message-when-initializati.patch33
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0123-crypto-ccp-Remove-unused-defines.patch60
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0124-crypto-ccp-Support-register-differences-between-PSP-.patch174
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0125-crypto-ccp-Add-support-for-new-CCP-PSP-device-ID.patch91
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0126-crypto-ccp-Check-for-NULL-PSP-pointer-at-module-unlo.patch41
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0127-crypto-ccp-add-timeout-support-in-the-SEV-command.patch121
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0128-crypto-ccp-Fix-static-checker-warning.patch36
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0129-crypto-ccp-Allow-SEV-firmware-to-be-chosen-based-on-.patch99
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0130-crypto-ccp-Remove-forward-declaration.patch100
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0131-crypto-ccp-Make-function-sev_get_firmware-static.patch35
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0132-amd-xgbe-Sometimes-driver-report-incorrect-link-stat.patch39
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/afalg.cfg44
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/amd-ccp.cfg29
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/amd-emmc-patches.scc18
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/amd-xgbe-patches.scc22
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/amd-xgbe.cfg51
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/disable-graphics.cfg5
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000-extra-config.cfg351
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000-standard-only.cfg3
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000-user-config.cfg407
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000-user-features.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000-user-patches.scc92
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/e3000.cfg59
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/kvm.cfg40
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-extra-config.cfg416
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-gpu-config.cfg8
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-standard-only.cfg3
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-user-config.cfg204
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-user-features.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000-user-patches.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-r1000/r1000.cfg61
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-extra-config.cfg417
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-gpu-config.cfg7
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-standard-only.cfg3
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-user-config.cfg204
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-user-features.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000-user-patches.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-v1000/v1000.cfg61
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-extra-config.cfg388
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-gpu-config.cfg7
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-standard-only.cfg3
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-user-config.cfg196
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-user-features.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000-user-patches.scc0
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.19.8-r1000/r1000.cfg60
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-e3000_4.14.inc29
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-r1000_4.14.inc14
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-r1000_4.19.inc14
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-rt_4.14.bbappend2
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-v1000_4.14.inc14
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto_4.14.bbappend4
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto_4.19.bbappend4
173 files changed, 19486 insertions, 2 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0001-amd-xgbe-Fix-debug-output-of-max-channel-counts.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0001-amd-xgbe-Fix-debug-output-of-max-channel-counts.patch
new file mode 100644
index 00000000..c05dbe95
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0001-amd-xgbe-Fix-debug-output-of-max-channel-counts.patch
@@ -0,0 +1,32 @@
+From e38545aac6c2a14c934331f7307f0b53e5ad70da Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:11 -0500
+Subject: [PATCH 01/95] amd-xgbe: Fix debug output of max channel counts
+
+A debug output print statement uses the wrong variable to output the
+maximum Rx channel count (cut and paste error, basically). Fix the
+statement to use the proper variable.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 82d1f41..7b63521 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -344,7 +344,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
+ pdata->tx_max_channel_count,
+- pdata->tx_max_channel_count);
++ pdata->rx_max_channel_count);
+ dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
+ pdata->tx_max_q_count, pdata->rx_max_q_count);
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0002-amd-xgbe-Read-and-save-the-port-property-registers-d.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0002-amd-xgbe-Read-and-save-the-port-property-registers-d.patch
new file mode 100644
index 00000000..010ddaf9
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0002-amd-xgbe-Read-and-save-the-port-property-registers-d.patch
@@ -0,0 +1,244 @@
+From 2bae5fe2100e17ecf248820b543f8afaf2fe2de3 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:20 -0500
+Subject: [PATCH 02/95] amd-xgbe: Read and save the port property registers
+ during probe
+
+Read and save the port property registers once during the device probe
+and then use the saved values as they are needed.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 34 +++++++++++----
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 68 ++++++++++++-----------------
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 7 +++
+ 3 linux-yocto-4.14.71 changed, 62 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+index 7b63521..7b86240 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+@@ -335,12 +335,29 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ pdata->awcr = XGBE_DMA_PCI_AWCR;
+ pdata->awarcr = XGBE_DMA_PCI_AWARCR;
+
++ /* Read the port property registers */
++ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0);
++ pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1);
++ pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2);
++ pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3);
++ pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4);
++ if (netif_msg_probe(pdata)) {
++ dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0);
++ dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1);
++ dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2);
++ dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3);
++ dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4);
++ }
++
+ /* Set the maximum channels and queues */
+- reg = XP_IOREAD(pdata, XP_PROP_1);
+- pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+- pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+- pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+- pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
++ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
++ MAX_TX_DMA);
++ pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
++ MAX_RX_DMA);
++ pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
++ MAX_TX_QUEUES);
++ pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1,
++ MAX_RX_QUEUES);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
+ pdata->tx_max_channel_count,
+@@ -353,12 +370,13 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ xgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+- reg = XP_IOREAD(pdata, XP_PROP_2);
+- pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
++ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
++ TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+- pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
++ pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2,
++ RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index aac8843..123ceb0 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -2421,22 +2421,21 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int reg;
+-
+- reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 +
+- XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
++ XP_GET_BITS(pdata->pp3, XP_PROP_3,
++ GPIO_ADDR);
+
+- phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
++ phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3,
++ GPIO_MASK);
+
+- phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RX_LOS);
+- phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_TX_FAULT);
+- phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_MOD_ABS);
+- phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ GPIO_RATE_SELECT);
+
+ if (netif_msg_probe(pdata)) {
+@@ -2458,18 +2457,17 @@ static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata)
+ static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int reg, mux_addr_hi, mux_addr_lo;
++ unsigned int mux_addr_hi, mux_addr_lo;
+
+- reg = XP_IOREAD(pdata, XP_PROP_4);
+-
+- mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+- mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
++ mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI);
++ mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == XGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+- phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
++ phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4,
++ MUX_CHAN);
+
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "SFP: mux_address=%#x\n",
+@@ -2592,13 +2590,11 @@ static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data)
+ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+- unsigned int reg;
+
+ if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO)
+ return 0;
+
+- reg = XP_IOREAD(pdata, XP_PROP_3);
+- phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
++ phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case XGBE_MDIO_RESET_NONE:
+ case XGBE_MDIO_RESET_I2C_GPIO:
+@@ -2612,12 +2608,12 @@ static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata)
+
+ if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 +
+- XP_GET_BITS(reg, XP_PROP_3,
++ XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+- phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) {
+- phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
++ phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+@@ -2707,12 +2703,9 @@ static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata)
+
+ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata)
+ {
+- unsigned int reg;
+-
+- reg = XP_IOREAD(pdata, XP_PROP_0);
+- if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
++ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS))
+ return false;
+- if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
++ if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+@@ -2921,7 +2914,6 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+ struct xgbe_phy_data *phy_data;
+ struct mii_bus *mii;
+- unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+@@ -2940,12 +2932,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ return -ENOMEM;
+ pdata->phy_data = phy_data;
+
+- reg = XP_IOREAD(pdata, XP_PROP_0);
+- phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+- phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+- phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+- phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+- phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
++ phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
++ phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID);
++ phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS);
++ phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE);
++ phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR);
+ if (netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode);
+ dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id);
+@@ -2954,12 +2945,11 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata)
+ dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr);
+ }
+
+- reg = XP_IOREAD(pdata, XP_PROP_4);
+- phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+- phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+- phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+- phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+- phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
++ phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT);
++ phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF);
++ phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR);
++ phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE);
++ phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL);
+ if (phy_data->redrv && netif_msg_probe(pdata)) {
+ dev_dbg(pdata->dev, "redrv present\n");
+ dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 95d4b56..54e43ad3 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1027,6 +1027,13 @@ struct xgbe_prv_data {
+ void __iomem *xprop_regs; /* XGBE property registers */
+ void __iomem *xi2c_regs; /* XGBE I2C CSRs */
+
++ /* Port property registers */
++ unsigned int pp0;
++ unsigned int pp1;
++ unsigned int pp2;
++ unsigned int pp3;
++ unsigned int pp4;
++
+ /* Overall device lock */
+ spinlock_t lock;
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0003-amd-xgbe-Remove-use-of-comm_owned-field.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0003-amd-xgbe-Remove-use-of-comm_owned-field.patch
new file mode 100644
index 00000000..6b5c1c6f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0003-amd-xgbe-Remove-use-of-comm_owned-field.patch
@@ -0,0 +1,76 @@
+From 34f2152820350514007573625e85cf97366d3172 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:29 -0500
+Subject: [PATCH 03/95] amd-xgbe: Remove use of comm_owned field
+
+The comm_owned field can hide logic where double locking is attempted
+and prevent multiple threads for the same device from accessing the
+mutex properly. Remove the comm_owned field and use the mutex API
+exclusively for gaining ownership. The current driver has been audited
+and is obtaining communications ownership properly.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 16 ----------------
+ 1 file changed, 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 123ceb0..05003be 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -327,8 +327,6 @@ struct xgbe_phy_data {
+
+ unsigned int mdio_addr;
+
+- unsigned int comm_owned;
+-
+ /* SFP Support */
+ enum xgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+@@ -382,12 +380,6 @@ static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata);
+ static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata,
+ struct xgbe_i2c_op *i2c_op)
+ {
+- struct xgbe_phy_data *phy_data = pdata->phy_data;
+-
+- /* Be sure we own the bus */
+- if (WARN_ON(!phy_data->comm_owned))
+- return -EIO;
+-
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+ }
+
+@@ -549,10 +541,6 @@ static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata)
+
+ static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata)
+ {
+- struct xgbe_phy_data *phy_data = pdata->phy_data;
+-
+- phy_data->comm_owned = 0;
+-
+ mutex_unlock(&xgbe_phy_comm_lock);
+ }
+
+@@ -562,9 +550,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
+ unsigned long timeout;
+ unsigned int mutex_id;
+
+- if (phy_data->comm_owned)
+- return 0;
+-
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+@@ -593,7 +578,6 @@ static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata)
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+- phy_data->comm_owned = 1;
+ return 0;
+ }
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0004-amd-xgbe-Remove-field-that-indicates-SFP-diagnostic-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0004-amd-xgbe-Remove-field-that-indicates-SFP-diagnostic-.patch
new file mode 100644
index 00000000..1053a20f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0004-amd-xgbe-Remove-field-that-indicates-SFP-diagnostic-.patch
@@ -0,0 +1,56 @@
+From 2d4a7ba9c428cedb9c8dfd97622cf6d63c8d7e67 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:38 -0500
+Subject: [PATCH 04/95] amd-xgbe: Remove field that indicates SFP diagnostic
+ support
+
+The driver currently sets an indication of whether the SFP supports, and
+that the driver can obtain, diagnostics data. This isn't currently used
+by the driver and the logic to set this indicator is flawed because the
+field is cleared each time the SFP is checked and only set when a new SFP
+is detected. Remove this field and the logic supporting it.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 9 ---------
+ 1 file changed, 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 05003be..cb15caf 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -343,7 +343,6 @@ struct xgbe_phy_data {
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+- unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+@@ -1211,13 +1210,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata)
+
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+- if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) {
+- u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG];
+-
+- if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+- phy_data->sfp_diags = 1;
+- }
+-
+ xgbe_phy_free_phy_device(pdata);
+ } else {
+ phy_data->sfp_changed = 0;
+@@ -1267,7 +1259,6 @@ static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data)
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+- phy_data->sfp_diags = 0;
+ phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0005-amd-xgbe-Add-ethtool-support-to-retrieve-SFP-module-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0005-amd-xgbe-Add-ethtool-support-to-retrieve-SFP-module-.patch
new file mode 100644
index 00000000..b1337b11
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0005-amd-xgbe-Add-ethtool-support-to-retrieve-SFP-module-.patch
@@ -0,0 +1,299 @@
+From d8470b1e4163404f94fcec04b82c4485b8a40a68 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:46 -0500
+Subject: [PATCH 05/95] amd-xgbe: Add ethtool support to retrieve SFP module
+ info
+
+Add support to get SFP module information using ethtool.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 18 ++++
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 21 ++++
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 137 +++++++++++++++++++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 13 +++
+ 4 files changed, 189 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index ff397bb..57394b77 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -626,6 +626,22 @@ static int xgbe_get_ts_info(struct net_device *netdev,
+ return 0;
+ }
+
++static int xgbe_get_module_info(struct net_device *netdev,
++ struct ethtool_modinfo *modinfo)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++
++ return pdata->phy_if.module_info(pdata, modinfo);
++}
++
++static int xgbe_get_module_eeprom(struct net_device *netdev,
++ struct ethtool_eeprom *eeprom, u8 *data)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++
++ return pdata->phy_if.module_eeprom(pdata, eeprom, data);
++}
++
+ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+@@ -646,6 +662,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_ts_info = xgbe_get_ts_info,
+ .get_link_ksettings = xgbe_get_link_ksettings,
+ .set_link_ksettings = xgbe_set_link_ksettings,
++ .get_module_info = xgbe_get_module_info,
++ .get_module_eeprom = xgbe_get_module_eeprom,
+ };
+
+ const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 1197779..36fddc2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -126,6 +126,24 @@
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+
++static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
++ struct ethtool_eeprom *eeprom, u8 *data)
++{
++ if (!pdata->phy_if.phy_impl.module_eeprom)
++ return -ENXIO;
++
++ return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data);
++}
++
++static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
++ struct ethtool_modinfo *modinfo)
++{
++ if (!pdata->phy_if.phy_impl.module_info)
++ return -ENXIO;
++
++ return pdata->phy_if.phy_impl.module_info(pdata, modinfo);
++}
++
+ static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata)
+ {
+ int reg;
+@@ -1639,4 +1657,7 @@ void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
+ phy_if->phy_valid_speed = xgbe_phy_valid_speed;
+
+ phy_if->an_isr = xgbe_an_combined_isr;
++
++ phy_if->module_info = xgbe_phy_module_info;
++ phy_if->module_eeprom = xgbe_phy_module_eeprom;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index cb15caf..141bb13 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -119,6 +119,7 @@
+ #include <linux/kmod.h>
+ #include <linux/mdio.h>
+ #include <linux/phy.h>
++#include <linux/ethtool.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -270,6 +271,15 @@ struct xgbe_sfp_eeprom {
+ u8 vendor[32];
+ };
+
++#define XGBE_SFP_DIAGS_SUPPORTED(_x) \
++ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \
++ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
++
++#define XGBE_SFP_EEPROM_BASE_LEN 256
++#define XGBE_SFP_EEPROM_DIAG_LEN 256
++#define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \
++ XGBE_SFP_EEPROM_DIAG_LEN)
++
+ #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE "
+ #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 "
+
+@@ -1301,6 +1311,130 @@ static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata)
+ xgbe_phy_put_comm_ownership(pdata);
+ }
+
++static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata,
++ struct ethtool_eeprom *eeprom, u8 *data)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX];
++ struct xgbe_sfp_eeprom *sfp_eeprom;
++ unsigned int i, j, rem;
++ int ret;
++
++ rem = eeprom->len;
++
++ if (!eeprom->len) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) {
++ ret = -EINVAL;
++ goto done;
++ }
++
++ if (phy_data->port_mode != XGBE_PORT_MODE_SFP) {
++ ret = -ENXIO;
++ goto done;
++ }
++
++ if (!netif_running(pdata->netdev)) {
++ ret = -EIO;
++ goto done;
++ }
++
++ if (phy_data->sfp_mod_absent) {
++ ret = -EIO;
++ goto done;
++ }
++
++ ret = xgbe_phy_get_comm_ownership(pdata);
++ if (ret) {
++ ret = -EIO;
++ goto done;
++ }
++
++ ret = xgbe_phy_sfp_get_mux(pdata);
++ if (ret) {
++ netdev_err(pdata->netdev, "I2C error setting SFP MUX\n");
++ ret = -EIO;
++ goto put_own;
++ }
++
++ /* Read the SFP serial ID eeprom */
++ eeprom_addr = 0;
++ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS,
++ &eeprom_addr, sizeof(eeprom_addr),
++ eeprom_data, XGBE_SFP_EEPROM_BASE_LEN);
++ if (ret) {
++ netdev_err(pdata->netdev,
++ "I2C error reading SFP EEPROM\n");
++ ret = -EIO;
++ goto put_mux;
++ }
++
++ sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data;
++
++ if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) {
++ /* Read the SFP diagnostic eeprom */
++ eeprom_addr = 0;
++ ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS,
++ &eeprom_addr, sizeof(eeprom_addr),
++ eeprom_data + XGBE_SFP_EEPROM_BASE_LEN,
++ XGBE_SFP_EEPROM_DIAG_LEN);
++ if (ret) {
++ netdev_err(pdata->netdev,
++ "I2C error reading SFP DIAGS\n");
++ ret = -EIO;
++ goto put_mux;
++ }
++ }
++
++ for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) {
++ if ((j >= XGBE_SFP_EEPROM_BASE_LEN) &&
++ !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom))
++ break;
++
++ data[i] = eeprom_data[j];
++ rem--;
++ }
++
++put_mux:
++ xgbe_phy_sfp_put_mux(pdata);
++
++put_own:
++ xgbe_phy_put_comm_ownership(pdata);
++
++done:
++ eeprom->len -= rem;
++
++ return ret;
++}
++
++static int xgbe_phy_module_info(struct xgbe_prv_data *pdata,
++ struct ethtool_modinfo *modinfo)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++
++ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
++ return -ENXIO;
++
++ if (!netif_running(pdata->netdev))
++ return -EIO;
++
++ if (phy_data->sfp_mod_absent)
++ return -EIO;
++
++ if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) {
++ modinfo->type = ETH_MODULE_SFF_8472;
++ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
++ } else {
++ modinfo->type = ETH_MODULE_SFF_8079;
++ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
++ }
++
++ return 0;
++}
++
+ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+ {
+ struct ethtool_link_ksettings *lks = &pdata->phy.lks;
+@@ -3196,4 +3330,7 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if)
+
+ phy_impl->kr_training_pre = xgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = xgbe_phy_kr_training_post;
++
++ phy_impl->module_info = xgbe_phy_module_info;
++ phy_impl->module_eeprom = xgbe_phy_module_eeprom;
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 54e43ad3..f0f455b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -835,6 +835,7 @@ struct xgbe_hw_if {
+ * Optional routines:
+ * an_pre, an_post
+ * kr_training_pre, kr_training_post
++ * module_info, module_eeprom
+ */
+ struct xgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+@@ -883,6 +884,12 @@ struct xgbe_phy_impl_if {
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct xgbe_prv_data *);
+ void (*kr_training_post)(struct xgbe_prv_data *);
++
++ /* SFP module related info */
++ int (*module_info)(struct xgbe_prv_data *pdata,
++ struct ethtool_modinfo *modinfo);
++ int (*module_eeprom)(struct xgbe_prv_data *pdata,
++ struct ethtool_eeprom *eeprom, u8 *data);
+ };
+
+ struct xgbe_phy_if {
+@@ -905,6 +912,12 @@ struct xgbe_phy_if {
+ /* For single interrupt support */
+ irqreturn_t (*an_isr)(struct xgbe_prv_data *);
+
++ /* For ethtool PHY support */
++ int (*module_info)(struct xgbe_prv_data *pdata,
++ struct ethtool_modinfo *modinfo);
++ int (*module_eeprom)(struct xgbe_prv_data *pdata,
++ struct ethtool_eeprom *eeprom, u8 *data);
++
+ /* PHY implementation specific services */
+ struct xgbe_phy_impl_if phy_impl;
+ };
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0006-amd-xgbe-Add-ethtool-show-set-ring-parameter-support.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0006-amd-xgbe-Add-ethtool-show-set-ring-parameter-support.patch
new file mode 100644
index 00000000..3464a094
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0006-amd-xgbe-Add-ethtool-show-set-ring-parameter-support.patch
@@ -0,0 +1,153 @@
+From b92764506809e9dd65c2e1a1d7e8e93bd8910491 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:38:56 -0500
+Subject: [PATCH 06/95] amd-xgbe: Add ethtool show/set ring parameter support
+
+Add ethtool support to show and set the number of the Rx and Tx ring
+descriptors. Changing the ring configuration will result in a device
+restart.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 6 +--
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 65 ++++++++++++++++++++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 6 +++
+ 3 files changed, 72 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 75c4455..0360633 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1428,10 +1428,8 @@ static void xgbe_stopdev(struct work_struct *work)
+ netdev_alert(pdata->netdev, "device stopped\n");
+ }
+
+-static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
++void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+ {
+- DBGPR("-->xgbe_restart_dev\n");
+-
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+@@ -1442,8 +1440,6 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+ xgbe_free_rx_data(pdata);
+
+ xgbe_start(pdata);
+-
+- DBGPR("<--xgbe_restart_dev\n");
+ }
+
+ static void xgbe_restart(struct work_struct *work)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index 57394b77..d12f982 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -642,6 +642,69 @@ static int xgbe_get_module_eeprom(struct net_device *netdev,
+ return pdata->phy_if.module_eeprom(pdata, eeprom, data);
+ }
+
++static void xgbe_get_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ringparam)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++
++ ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX;
++ ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX;
++ ringparam->rx_pending = pdata->rx_desc_count;
++ ringparam->tx_pending = pdata->tx_desc_count;
++}
++
++static int xgbe_set_ringparam(struct net_device *netdev,
++ struct ethtool_ringparam *ringparam)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ unsigned int rx, tx;
++
++ if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) {
++ netdev_err(netdev, "unsupported ring parameter\n");
++ return -EINVAL;
++ }
++
++ if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) ||
++ (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) {
++ netdev_err(netdev,
++ "rx ring parameter must be between %u and %u\n",
++ XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX);
++ return -EINVAL;
++ }
++
++ if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) ||
++ (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) {
++ netdev_err(netdev,
++ "tx ring parameter must be between %u and %u\n",
++ XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX);
++ return -EINVAL;
++ }
++
++ rx = __rounddown_pow_of_two(ringparam->rx_pending);
++ if (rx != ringparam->rx_pending)
++ netdev_notice(netdev,
++ "rx ring parameter rounded to power of two: %u\n",
++ rx);
++
++ tx = __rounddown_pow_of_two(ringparam->tx_pending);
++ if (tx != ringparam->tx_pending)
++ netdev_notice(netdev,
++ "tx ring parameter rounded to power of two: %u\n",
++ tx);
++
++ if ((rx == pdata->rx_desc_count) &&
++ (tx == pdata->tx_desc_count))
++ goto out;
++
++ pdata->rx_desc_count = rx;
++ pdata->tx_desc_count = tx;
++
++ xgbe_restart_dev(pdata);
++
++out:
++ return 0;
++}
++
+ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+@@ -664,6 +727,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .set_link_ksettings = xgbe_set_link_ksettings,
+ .get_module_info = xgbe_get_module_info,
+ .get_module_eeprom = xgbe_get_module_eeprom,
++ .get_ringparam = xgbe_get_ringparam,
++ .set_ringparam = xgbe_set_ringparam,
+ };
+
+ const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index f0f455b..7dc0fac 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -144,6 +144,11 @@
+ #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1)
+ #define XGBE_RX_DESC_CNT 512
+
++#define XGBE_TX_DESC_CNT_MIN 64
++#define XGBE_TX_DESC_CNT_MAX 4096
++#define XGBE_RX_DESC_CNT_MIN 64
++#define XGBE_RX_DESC_CNT_MAX 4096
++
+ #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+ /* Descriptors required for maximum contiguous TSO/GSO packet */
+@@ -1330,6 +1335,7 @@ int xgbe_powerup(struct net_device *, unsigned int);
+ int xgbe_powerdown(struct net_device *, unsigned int);
+ void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
++void xgbe_restart_dev(struct xgbe_prv_data *pdata);
+
+ #ifdef CONFIG_DEBUG_FS
+ void xgbe_debugfs_init(struct xgbe_prv_data *);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0007-amd-xgbe-Prepare-for-ethtool-set-channel-support.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0007-amd-xgbe-Prepare-for-ethtool-set-channel-support.patch
new file mode 100644
index 00000000..6a46eb47
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0007-amd-xgbe-Prepare-for-ethtool-set-channel-support.patch
@@ -0,0 +1,241 @@
+From 020af1166b9d83857753bd92b3e6c9162c46ff86 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:04 -0500
+Subject: [PATCH 07/95] amd-xgbe: Prepare for ethtool set-channel support
+
+In order to support being able to dynamically set/change the number of
+Rx and Tx channels, update the code to:
+ - Move alloc and free of device memory into callable functions
+ - Move setting of the real number of Rx and Tx channels to device startup
+ - Move mapping of the RSS channels to device startup
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 108 ++++++++++++++++++------------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 20 +-----
+ 2 files changed, 68 insertions(+), 60 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 0360633..3c9681a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1314,14 +1314,72 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return 0;
+ }
+
++static void xgbe_free_memory(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_desc_if *desc_if = &pdata->desc_if;
++
++ /* Free the ring descriptors and buffers */
++ desc_if->free_ring_resources(pdata);
++
++ /* Free the channel and ring structures */
++ xgbe_free_channels(pdata);
++}
++
++static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_desc_if *desc_if = &pdata->desc_if;
++ struct net_device *netdev = pdata->netdev;
++ int ret;
++
++ /* Calculate the Rx buffer size before allocating rings */
++ pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
++
++ /* Allocate the channel and ring structures */
++ ret = xgbe_alloc_channels(pdata);
++ if (ret)
++ return ret;
++
++ /* Allocate the ring descriptors and buffers */
++ ret = desc_if->alloc_ring_resources(pdata);
++ if (ret)
++ goto err_channels;
++
++ /* Initialize the service and Tx timers */
++ xgbe_init_timers(pdata);
++
++ return 0;
++
++err_channels:
++ xgbe_free_memory(pdata);
++
++ return ret;
++}
++
+ static int xgbe_start(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_phy_if *phy_if = &pdata->phy_if;
+ struct net_device *netdev = pdata->netdev;
++ unsigned int i;
+ int ret;
+
+- DBGPR("-->xgbe_start\n");
++ /* Set the number of queues */
++ ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
++ if (ret) {
++ netdev_err(netdev, "error setting real tx queue count\n");
++ return ret;
++ }
++
++ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
++ if (ret) {
++ netdev_err(netdev, "error setting real rx queue count\n");
++ return ret;
++ }
++
++ /* Set RSS lookup table data for programming */
++ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
++ XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
++ i % pdata->rx_ring_count);
+
+ ret = hw_if->init(pdata);
+ if (ret)
+@@ -1349,8 +1407,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
+
+ clear_bit(XGBE_STOPPED, &pdata->dev_state);
+
+- DBGPR("<--xgbe_start\n");
+-
+ return 0;
+
+ err_irqs:
+@@ -1825,11 +1881,8 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+ static int xgbe_open(struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+- DBGPR("-->xgbe_open\n");
+-
+ /* Create the various names based on netdev name */
+ snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
+ netdev_name(netdev));
+@@ -1874,43 +1927,25 @@ static int xgbe_open(struct net_device *netdev)
+ goto err_sysclk;
+ }
+
+- /* Calculate the Rx buffer size before allocating rings */
+- ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+- if (ret < 0)
+- goto err_ptpclk;
+- pdata->rx_buf_size = ret;
+-
+- /* Allocate the channel and ring structures */
+- ret = xgbe_alloc_channels(pdata);
+- if (ret)
+- goto err_ptpclk;
+-
+- /* Allocate the ring descriptors and buffers */
+- ret = desc_if->alloc_ring_resources(pdata);
+- if (ret)
+- goto err_channels;
+-
+ INIT_WORK(&pdata->service_work, xgbe_service);
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+ INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
+ INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+- xgbe_init_timers(pdata);
++
++ ret = xgbe_alloc_memory(pdata);
++ if (ret)
++ goto err_ptpclk;
+
+ ret = xgbe_start(pdata);
+ if (ret)
+- goto err_rings;
++ goto err_mem;
+
+ clear_bit(XGBE_DOWN, &pdata->dev_state);
+
+- DBGPR("<--xgbe_open\n");
+-
+ return 0;
+
+-err_rings:
+- desc_if->free_ring_resources(pdata);
+-
+-err_channels:
+- xgbe_free_channels(pdata);
++err_mem:
++ xgbe_free_memory(pdata);
+
+ err_ptpclk:
+ clk_disable_unprepare(pdata->ptpclk);
+@@ -1930,18 +1965,11 @@ static int xgbe_open(struct net_device *netdev)
+ static int xgbe_close(struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+- struct xgbe_desc_if *desc_if = &pdata->desc_if;
+-
+- DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+- /* Free the ring descriptors and buffers */
+- desc_if->free_ring_resources(pdata);
+-
+- /* Free the channel and ring structures */
+- xgbe_free_channels(pdata);
++ xgbe_free_memory(pdata);
+
+ /* Disable the clocks */
+ clk_disable_unprepare(pdata->ptpclk);
+@@ -1955,8 +1983,6 @@ static int xgbe_close(struct net_device *netdev)
+
+ set_bit(XGBE_DOWN, &pdata->dev_state);
+
+- DBGPR("<--xgbe_close\n");
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index e31d9d1..d1eb855 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -265,7 +265,6 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ {
+ struct net_device *netdev = pdata->netdev;
+ struct device *dev = pdata->dev;
+- unsigned int i;
+ int ret;
+
+ netdev->irq = pdata->dev_irq;
+@@ -324,26 +323,9 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
+ pdata->tx_ring_count, pdata->rx_ring_count);
+ }
+
+- /* Set the number of queues */
+- ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+- if (ret) {
+- dev_err(dev, "error setting real tx queue count\n");
+- return ret;
+- }
+-
+- ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+- if (ret) {
+- dev_err(dev, "error setting real rx queue count\n");
+- return ret;
+- }
+-
+- /* Initialize RSS hash key and lookup table */
++ /* Initialize RSS hash key */
+ netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
+
+- for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
+- XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+- i % pdata->rx_ring_count);
+-
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0008-amd-xgbe-Add-ethtool-show-set-channels-support.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0008-amd-xgbe-Add-ethtool-show-set-channels-support.patch
new file mode 100644
index 00000000..4282cc5e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0008-amd-xgbe-Add-ethtool-show-set-channels-support.patch
@@ -0,0 +1,237 @@
+From 8ce6e8e4c95dc8c55c49348719668b9b61fc821e Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:13 -0500
+Subject: [PATCH 08/95] amd-xgbe: Add ethtool show/set channels support
+
+Add ethtool support to show and set the device channel configuration.
+Changing the channel configuration will result in a device restart.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 25 +++++
+ drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | 134 +++++++++++++++++++++++++++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 4 +
+ 3 files changed, 163 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 3c9681a..145b5c0 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1331,6 +1331,17 @@ static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
+ struct net_device *netdev = pdata->netdev;
+ int ret;
+
++ if (pdata->new_tx_ring_count) {
++ pdata->tx_ring_count = pdata->new_tx_ring_count;
++ pdata->tx_q_count = pdata->tx_ring_count;
++ pdata->new_tx_ring_count = 0;
++ }
++
++ if (pdata->new_rx_ring_count) {
++ pdata->rx_ring_count = pdata->new_rx_ring_count;
++ pdata->new_rx_ring_count = 0;
++ }
++
+ /* Calculate the Rx buffer size before allocating rings */
+ pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+
+@@ -1484,6 +1495,20 @@ static void xgbe_stopdev(struct work_struct *work)
+ netdev_alert(pdata->netdev, "device stopped\n");
+ }
+
++void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
++{
++ /* If not running, "restart" will happen on open */
++ if (!netif_running(pdata->netdev))
++ return;
++
++ xgbe_stop(pdata);
++
++ xgbe_free_memory(pdata);
++ xgbe_alloc_memory(pdata);
++
++ xgbe_start(pdata);
++}
++
+ void xgbe_restart_dev(struct xgbe_prv_data *pdata)
+ {
+ /* If not running, "restart" will happen on open */
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+index d12f982..a880f10 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+@@ -705,6 +705,138 @@ static int xgbe_set_ringparam(struct net_device *netdev,
+ return 0;
+ }
+
++static void xgbe_get_channels(struct net_device *netdev,
++ struct ethtool_channels *channels)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ unsigned int rx, tx, combined;
++
++ /* Calculate maximums allowed:
++ * - Take into account the number of available IRQs
++ * - Do not take into account the number of online CPUs so that
++ * the user can over-subscribe if desired
++ * - Tx is additionally limited by the number of hardware queues
++ */
++ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
++ rx = min(rx, pdata->channel_irq_count);
++ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
++ tx = min(tx, pdata->channel_irq_count);
++ tx = min(tx, pdata->tx_max_q_count);
++
++ combined = min(rx, tx);
++
++ channels->max_combined = combined;
++ channels->max_rx = rx ? rx - 1 : 0;
++ channels->max_tx = tx ? tx - 1 : 0;
++
++ /* Get current settings based on device state */
++ rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
++ tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
++
++ combined = min(rx, tx);
++ rx -= combined;
++ tx -= combined;
++
++ channels->combined_count = combined;
++ channels->rx_count = rx;
++ channels->tx_count = tx;
++}
++
++static void xgbe_print_set_channels_input(struct net_device *netdev,
++ struct ethtool_channels *channels)
++{
++ netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n",
++ channels->combined_count, channels->rx_count,
++ channels->tx_count);
++}
++
++static int xgbe_set_channels(struct net_device *netdev,
++ struct ethtool_channels *channels)
++{
++ struct xgbe_prv_data *pdata = netdev_priv(netdev);
++ unsigned int rx, rx_curr, tx, tx_curr, combined;
++
++ /* Calculate maximums allowed:
++ * - Take into account the number of available IRQs
++ * - Do not take into account the number of online CPUs so that
++ * the user can over-subscribe if desired
++ * - Tx is additionally limited by the number of hardware queues
++ */
++ rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count);
++ rx = min(rx, pdata->channel_irq_count);
++ tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count);
++ tx = min(tx, pdata->tx_max_q_count);
++ tx = min(tx, pdata->channel_irq_count);
++
++ combined = min(rx, tx);
++
++ /* Should not be setting other count */
++ if (channels->other_count) {
++ netdev_err(netdev,
++ "other channel count must be zero\n");
++ return -EINVAL;
++ }
++
++ /* Require at least one Combined (Rx and Tx) channel */
++ if (!channels->combined_count) {
++ netdev_err(netdev,
++ "at least one combined Rx/Tx channel is required\n");
++ xgbe_print_set_channels_input(netdev, channels);
++ return -EINVAL;
++ }
++
++ /* Check combined channels */
++ if (channels->combined_count > combined) {
++ netdev_err(netdev,
++ "combined channel count cannot exceed %u\n",
++ combined);
++ xgbe_print_set_channels_input(netdev, channels);
++ return -EINVAL;
++ }
++
++ /* Can have some Rx-only or Tx-only channels, but not both */
++ if (channels->rx_count && channels->tx_count) {
++ netdev_err(netdev,
++ "cannot specify both Rx-only and Tx-only channels\n");
++ xgbe_print_set_channels_input(netdev, channels);
++ return -EINVAL;
++ }
++
++ /* Check that we don't exceed the maximum number of channels */
++ if ((channels->combined_count + channels->rx_count) > rx) {
++ netdev_err(netdev,
++ "total Rx channels (%u) requested exceeds maximum available (%u)\n",
++ channels->combined_count + channels->rx_count, rx);
++ xgbe_print_set_channels_input(netdev, channels);
++ return -EINVAL;
++ }
++
++ if ((channels->combined_count + channels->tx_count) > tx) {
++ netdev_err(netdev,
++ "total Tx channels (%u) requested exceeds maximum available (%u)\n",
++ channels->combined_count + channels->tx_count, tx);
++ xgbe_print_set_channels_input(netdev, channels);
++ return -EINVAL;
++ }
++
++ rx = channels->combined_count + channels->rx_count;
++ tx = channels->combined_count + channels->tx_count;
++
++ rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count;
++ tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count;
++
++ if ((rx == rx_curr) && (tx == tx_curr))
++ goto out;
++
++ pdata->new_rx_ring_count = rx;
++ pdata->new_tx_ring_count = tx;
++
++ xgbe_full_restart_dev(pdata);
++
++out:
++ return 0;
++}
++
+ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_msglevel = xgbe_get_msglevel,
+@@ -729,6 +861,8 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_module_eeprom = xgbe_get_module_eeprom,
+ .get_ringparam = xgbe_get_ringparam,
+ .set_ringparam = xgbe_set_ringparam,
++ .get_channels = xgbe_get_channels,
++ .set_channels = xgbe_set_channels,
+ };
+
+ const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 7dc0fac..7a412cf 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1122,6 +1122,9 @@ struct xgbe_prv_data {
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
++ unsigned int new_tx_ring_count;
++ unsigned int new_rx_ring_count;
++
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+@@ -1336,6 +1339,7 @@ int xgbe_powerdown(struct net_device *, unsigned int);
+ void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+ void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+ void xgbe_restart_dev(struct xgbe_prv_data *pdata);
++void xgbe_full_restart_dev(struct xgbe_prv_data *pdata);
+
+ #ifdef CONFIG_DEBUG_FS
+ void xgbe_debugfs_init(struct xgbe_prv_data *);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0009-amd-xgbe-Always-attempt-link-training-in-KR-mode.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0009-amd-xgbe-Always-attempt-link-training-in-KR-mode.patch
new file mode 100644
index 00000000..0a31f7c1
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0009-amd-xgbe-Always-attempt-link-training-in-KR-mode.patch
@@ -0,0 +1,161 @@
+From d95e49eb961bace4a0e4499fc0c8931a078656c0 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:21 -0500
+Subject: [PATCH 09/95] amd-xgbe: Always attempt link training in KR mode
+
+Link training is always attempted when in KR mode, but the code is
+structured to check if link training has been enabled before attempting
+to perform it. Since that check will always be true, simplify the code
+to always enable and start link training during KR auto-negotiation.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 69 +++++++------------------------
+ 1 file changed, 16 insertions(+), 53 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 36fddc2..eba757e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -216,31 +216,8 @@ static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata)
+ xgbe_an37_clear_interrupts(pdata);
+ }
+
+-static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata)
+-{
+- unsigned int reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+-
+- reg |= XGBE_KR_TRAINING_ENABLE;
+- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+-}
+-
+-static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata)
+-{
+- unsigned int reg;
+-
+- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+-
+- reg &= ~XGBE_KR_TRAINING_ENABLE;
+- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+-}
+-
+ static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Enable KR training */
+- xgbe_an73_enable_kr_training(pdata);
+-
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+@@ -250,9 +227,6 @@ static void xgbe_kr_mode(struct xgbe_prv_data *pdata)
+
+ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+@@ -262,9 +236,6 @@ static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata)
+
+ static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+@@ -278,9 +249,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
+ if (pdata->kr_redrv)
+ return xgbe_kr_mode(pdata);
+
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+@@ -290,9 +258,6 @@ static void xgbe_sfi_mode(struct xgbe_prv_data *pdata)
+
+ static void xgbe_x_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+@@ -302,9 +267,6 @@ static void xgbe_x_mode(struct xgbe_prv_data *pdata)
+
+ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+@@ -314,9 +276,6 @@ static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata)
+
+ static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata)
+ {
+- /* Disable KR training */
+- xgbe_an73_disable_kr_training(pdata);
+-
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+@@ -425,6 +384,12 @@ static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable,
+ {
+ unsigned int reg;
+
++ /* Disable KR training for now */
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++ reg &= ~XGBE_KR_TRAINING_ENABLE;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
++
++ /* Update AN settings */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+@@ -522,21 +487,19 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+- reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+- if (reg & XGBE_KR_TRAINING_ENABLE) {
+- if (pdata->phy_if.phy_impl.kr_training_pre)
+- pdata->phy_if.phy_impl.kr_training_pre(pdata);
++ if (pdata->phy_if.phy_impl.kr_training_pre)
++ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+- reg |= XGBE_KR_TRAINING_START;
+- XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+- reg);
++ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
++ reg |= XGBE_KR_TRAINING_ENABLE;
++ reg |= XGBE_KR_TRAINING_START;
++ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+
+- netif_dbg(pdata, link, pdata->netdev,
+- "KR training initiated\n");
++ netif_dbg(pdata, link, pdata->netdev,
++ "KR training initiated\n");
+
+- if (pdata->phy_if.phy_impl.kr_training_post)
+- pdata->phy_if.phy_impl.kr_training_post(pdata);
+- }
++ if (pdata->phy_if.phy_impl.kr_training_post)
++ pdata->phy_if.phy_impl.kr_training_post(pdata);
+
+ return XGBE_AN_PAGE_RECEIVED;
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0010-amd-xgbe-Advertise-FEC-support-with-the-KR-re-driver.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0010-amd-xgbe-Advertise-FEC-support-with-the-KR-re-driver.patch
new file mode 100644
index 00000000..b3dcded5
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0010-amd-xgbe-Advertise-FEC-support-with-the-KR-re-driver.patch
@@ -0,0 +1,33 @@
+From 4e6f4e0094f5fd77b0d1d57814132ab7d7a9afe3 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:31 -0500
+Subject: [PATCH 10/95] amd-xgbe: Advertise FEC support with the KR re-driver
+
+When a KR re-driver is present, indicate the FEC support is available
+during auto-negotiation.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 141bb13..dd747f6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1720,6 +1720,10 @@ static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata,
+ XGBE_CLR_ADV(dlks, 1000baseKX_Full);
+ XGBE_CLR_ADV(dlks, 10000baseKR_Full);
+
++ /* Advertise FEC support is present */
++ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
++ XGBE_SET_ADV(dlks, 10000baseR_FEC);
++
+ switch (phy_data->port_mode) {
+ case XGBE_PORT_MODE_BACKPLANE:
+ XGBE_SET_ADV(dlks, 10000baseKR_Full);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0011-amd-xgbe-Update-the-BelFuse-quirk-to-support-SGMII.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0011-amd-xgbe-Update-the-BelFuse-quirk-to-support-SGMII.patch
new file mode 100644
index 00000000..915ffd1f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0011-amd-xgbe-Update-the-BelFuse-quirk-to-support-SGMII.patch
@@ -0,0 +1,162 @@
+From 5c5431b3856e575439d5ce59699cd6f20de3a12b Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:39 -0500
+Subject: [PATCH 11/95] amd-xgbe: Update the BelFuse quirk to support SGMII
+
+Instead of using a quirk to make the BelFuse 1GBT-SFP06 part look like
+a 1000baseX part, program the SFP PHY to support SGMII and 10/100/1000
+baseT.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 109 +++++++++++++++++++---------
+ 1 file changed, 75 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index dd747f6..194a569 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -860,6 +860,9 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int phy_id = phy_data->phydev->phy_id;
+
++ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
++ return false;
++
+ if ((phy_id & 0xfffffff0) != 0x01ff0cc0)
+ return false;
+
+@@ -885,8 +888,80 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ return true;
+ }
+
++static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
++{
++ struct xgbe_phy_data *phy_data = pdata->phy_data;
++ struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
++ unsigned int phy_id = phy_data->phydev->phy_id;
++ int reg;
++
++ if (phy_data->port_mode != XGBE_PORT_MODE_SFP)
++ return false;
++
++ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
++ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
++ return false;
++
++ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
++ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
++ return false;
++
++ if ((phy_id & 0xfffffff0) != 0x03625d10)
++ return false;
++
++ /* Disable RGMII mode */
++ phy_write(phy_data->phydev, 0x18, 0x7007);
++ reg = phy_read(phy_data->phydev, 0x18);
++ phy_write(phy_data->phydev, 0x18, reg & ~0x0080);
++
++ /* Enable fiber register bank */
++ phy_write(phy_data->phydev, 0x1c, 0x7c00);
++ reg = phy_read(phy_data->phydev, 0x1c);
++ reg &= 0x03ff;
++ reg &= ~0x0001;
++ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001);
++
++ /* Power down SerDes */
++ reg = phy_read(phy_data->phydev, 0x00);
++ phy_write(phy_data->phydev, 0x00, reg | 0x00800);
++
++ /* Configure SGMII-to-Copper mode */
++ phy_write(phy_data->phydev, 0x1c, 0x7c00);
++ reg = phy_read(phy_data->phydev, 0x1c);
++ reg &= 0x03ff;
++ reg &= ~0x0006;
++ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004);
++
++ /* Power up SerDes */
++ reg = phy_read(phy_data->phydev, 0x00);
++ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
++
++ /* Enable copper register bank */
++ phy_write(phy_data->phydev, 0x1c, 0x7c00);
++ reg = phy_read(phy_data->phydev, 0x1c);
++ reg &= 0x03ff;
++ reg &= ~0x0001;
++ phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg);
++
++ /* Power up SerDes */
++ reg = phy_read(phy_data->phydev, 0x00);
++ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
++
++ phy_data->phydev->supported = PHY_GBIT_FEATURES;
++ phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ phy_data->phydev->advertising = phy_data->phydev->supported;
++
++ netif_dbg(pdata, drv, pdata->netdev,
++ "BelFuse PHY quirk in place\n");
++
++ return true;
++}
++
+ static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata)
+ {
++ if (xgbe_phy_belfuse_phy_quirks(pdata))
++ return;
++
+ if (xgbe_phy_finisar_phy_quirks(pdata))
+ return;
+ }
+@@ -1027,37 +1102,6 @@ static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data)
+ return false;
+ }
+
+-static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata)
+-{
+- struct xgbe_phy_data *phy_data = pdata->phy_data;
+- struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+-
+- if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME],
+- XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+- return false;
+-
+- if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+- XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) {
+- phy_data->sfp_base = XGBE_SFP_BASE_1000_SX;
+- phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE;
+- phy_data->sfp_speed = XGBE_SFP_SPEED_1000;
+- if (phy_data->sfp_changed)
+- netif_dbg(pdata, drv, pdata->netdev,
+- "Bel-Fuse SFP quirk in place\n");
+- return true;
+- }
+-
+- return false;
+-}
+-
+-static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata)
+-{
+- if (xgbe_phy_belfuse_parse_quirks(pdata))
+- return true;
+-
+- return false;
+-}
+-
+ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ {
+ struct xgbe_phy_data *phy_data = pdata->phy_data;
+@@ -1076,9 +1120,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata)
+ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data);
+ phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data);
+
+- if (xgbe_phy_sfp_parse_quirks(pdata))
+- return;
+-
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0012-amd-xgbe-Improve-SFP-100Mbps-auto-negotiation.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0012-amd-xgbe-Improve-SFP-100Mbps-auto-negotiation.patch
new file mode 100644
index 00000000..aba4c423
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0012-amd-xgbe-Improve-SFP-100Mbps-auto-negotiation.patch
@@ -0,0 +1,201 @@
+From c02438fa8e23e9752882fb6b783a70fc61b8c1ae Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Wed, 23 May 2018 11:39:47 -0500
+Subject: [PATCH 12/95] amd-xgbe: Improve SFP 100Mbps auto-negotiation
+
+After changing speed to 100Mbps as a result of auto-negotiation (AN),
+some 10/100/1000Mbps SFPs indicate a successful link (no faults or loss
+of signal), but cannot successfully transmit or receive data. These
+SFPs required an extra auto-negotiation (AN) after the speed change in
+order to operate properly. Add a quirk for these SFPs so that if the
+outcome of the AN actually results in changing to a new speed, re-initiate
+AN at that new speed.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 77 ++++++++++++++++-------------
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 6 +++
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 1 +
+ 3 files changed, 50 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index eba757e..8a3a60b 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -331,13 +331,15 @@ static void xgbe_switch_mode(struct xgbe_prv_data *pdata)
+ xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+ }
+
+-static void xgbe_set_mode(struct xgbe_prv_data *pdata,
++static bool xgbe_set_mode(struct xgbe_prv_data *pdata,
+ enum xgbe_mode mode)
+ {
+ if (mode == xgbe_cur_mode(pdata))
+- return;
++ return false;
+
+ xgbe_change_mode(pdata, mode);
++
++ return true;
+ }
+
+ static bool xgbe_use_mode(struct xgbe_prv_data *pdata,
+@@ -1178,21 +1180,23 @@ static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
+ return 0;
+ }
+
+-static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
++static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode)
+ {
+ int ret;
+
++ mutex_lock(&pdata->an_mutex);
++
+ set_bit(XGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = jiffies;
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+- return ret;
++ goto out;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = xgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+- return ret;
++ goto out;
+
+ netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n");
+ } else {
+@@ -1202,24 +1206,27 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+ /* Disable auto-negotiation interrupt */
+ disable_irq(pdata->an_irq);
+
+- /* Start auto-negotiation in a supported mode */
+- if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
+- xgbe_set_mode(pdata, XGBE_MODE_KR);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
+- xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
+- xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
+- xgbe_set_mode(pdata, XGBE_MODE_SFI);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
+- xgbe_set_mode(pdata, XGBE_MODE_X);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
+- xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
+- } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
+- xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
+- } else {
+- enable_irq(pdata->an_irq);
+- return -EINVAL;
++ if (set_mode) {
++ /* Start auto-negotiation in a supported mode */
++ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) {
++ xgbe_set_mode(pdata, XGBE_MODE_KR);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) {
++ xgbe_set_mode(pdata, XGBE_MODE_KX_2500);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) {
++ xgbe_set_mode(pdata, XGBE_MODE_KX_1000);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SFI);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) {
++ xgbe_set_mode(pdata, XGBE_MODE_X);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000);
++ } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) {
++ xgbe_set_mode(pdata, XGBE_MODE_SGMII_100);
++ } else {
++ enable_irq(pdata->an_irq);
++ ret = -EINVAL;
++ goto out;
++ }
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+@@ -1239,16 +1246,7 @@ static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+ xgbe_an_init(pdata);
+ xgbe_an_restart(pdata);
+
+- return 0;
+-}
+-
+-static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+-{
+- int ret;
+-
+- mutex_lock(&pdata->an_mutex);
+-
+- ret = __xgbe_phy_config_aneg(pdata);
++out:
+ if (ret)
+ set_bit(XGBE_LINK_ERR, &pdata->dev_state);
+ else
+@@ -1259,6 +1257,16 @@ static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
+ return ret;
+ }
+
++static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
++{
++ return __xgbe_phy_config_aneg(pdata, true);
++}
++
++static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata)
++{
++ return __xgbe_phy_config_aneg(pdata, false);
++}
++
+ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
+ {
+ return (pdata->an_result == XGBE_AN_COMPLETE);
+@@ -1315,7 +1323,8 @@ static void xgbe_phy_status_result(struct xgbe_prv_data *pdata)
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+- xgbe_set_mode(pdata, mode);
++ if (xgbe_set_mode(pdata, mode) && pdata->an_again)
++ xgbe_phy_reconfig_aneg(pdata);
+ }
+
+ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 194a569..3ceb4f9 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -902,6 +902,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN))
+ return false;
+
++ /* For Bel-Fuse, use the extra AN flag */
++ pdata->an_again = 1;
++
+ if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN],
+ XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN))
+ return false;
+@@ -978,6 +981,9 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata)
+ if (phy_data->phydev)
+ return 0;
+
++ /* Clear the extra AN flag */
++ pdata->an_again = 0;
++
+ /* Check for the use of an external PHY */
+ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE)
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 7a412cf..47bcbcf 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1261,6 +1261,7 @@ struct xgbe_prv_data {
+ enum xgbe_rx kr_state;
+ enum xgbe_rx kx_state;
+ struct work_struct an_work;
++ unsigned int an_again;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0013-amd-xgbe-Merged-From-453f85d43fa9ee243f0fc3ac4e1be45.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0013-amd-xgbe-Merged-From-453f85d43fa9ee243f0fc3ac4e1be45.patch
new file mode 100644
index 00000000..20e88321
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0013-amd-xgbe-Merged-From-453f85d43fa9ee243f0fc3ac4e1be45.patch
@@ -0,0 +1,82 @@
+From accb928c3c5a8370a99cf5808d9db38e8fb604c5 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Thu, 26 Jul 2018 22:36:39 +0530
+Subject: [PATCH 13/95] amd-xgbe: Merged From
+ 453f85d43fa9ee243f0fc3ac4e1be45615301e3f and From
+ c6c52ba1514120db3ad2d36391ed37bafcfc43d7
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 2 +-
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 14 ++++++--------
+ 2 files changed, 7 insertions(+), 9 deletions(-)
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+old mode 100644
+new mode 100755
+index 45d9230..cc1e4f8
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -295,7 +295,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ order = alloc_order;
+
+ /* Try to obtain pages, decreasing order if necessary */
+- gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
++ gfp = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages_node(node, gfp, order);
+ if (pages)
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+old mode 100644
+new mode 100755
+index 145b5c0..8cfba4b
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -642,9 +642,9 @@ static irqreturn_t xgbe_dma_isr(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static void xgbe_tx_timer(unsigned long data)
++static void xgbe_tx_timer(struct timer_list *t)
+ {
+- struct xgbe_channel *channel = (struct xgbe_channel *)data;
++ struct xgbe_channel *channel = from_timer(channel, t, tx_timer);
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct napi_struct *napi;
+
+@@ -680,9 +680,9 @@ static void xgbe_service(struct work_struct *work)
+ pdata->phy_if.phy_status(pdata);
+ }
+
+-static void xgbe_service_timer(unsigned long data)
++static void xgbe_service_timer(struct timer_list *t)
+ {
+- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
++ struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
+
+ queue_work(pdata->dev_workqueue, &pdata->service_work);
+
+@@ -694,16 +694,14 @@ static void xgbe_init_timers(struct xgbe_prv_data *pdata)
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+- setup_timer(&pdata->service_timer, xgbe_service_timer,
+- (unsigned long)pdata);
++ timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ channel = pdata->channel[i];
+ if (!channel->tx_ring)
+ break;
+
+- setup_timer(&channel->tx_timer, xgbe_tx_timer,
+- (unsigned long)channel);
++ timer_setup(&channel->tx_timer, xgbe_tx_timer, 0);
+ }
+ }
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0014-crypto-ccp-Use-GCM-IV-size-constant.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0014-crypto-ccp-Use-GCM-IV-size-constant.patch
new file mode 100644
index 00000000..3cab86e8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0014-crypto-ccp-Use-GCM-IV-size-constant.patch
@@ -0,0 +1,57 @@
+From 77d94637c99dc157e825b85569513f24e46fde4a Mon Sep 17 00:00:00 2001
+From: Corentin LABBE <clabbe.montjoie@gmail.com>
+Date: Tue, 22 Aug 2017 10:08:10 +0200
+Subject: [PATCH 14/95] crypto: ccp - Use GCM IV size constant
+
+This patch replace GCM IV size value by their constant name.
+
+Signed-off-by: Corentin Labbe <clabbe.montjoie@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-crypto-aes-galois.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+index 5231352..ff02b71 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -19,13 +19,12 @@
+ #include <crypto/algapi.h>
+ #include <crypto/aes.h>
+ #include <crypto/ctr.h>
++#include <crypto/gcm.h>
+ #include <crypto/scatterwalk.h>
+ #include <linux/delay.h>
+
+ #include "ccp-crypto.h"
+
+-#define AES_GCM_IVSIZE 12
+-
+ static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret)
+ {
+ return ret;
+@@ -95,9 +94,9 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+ */
+
+ /* Prepare the IV: 12 bytes + an integer (counter) */
+- memcpy(rctx->iv, req->iv, AES_GCM_IVSIZE);
++ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE);
+ for (i = 0; i < 3; i++)
+- rctx->iv[i + AES_GCM_IVSIZE] = 0;
++ rctx->iv[i + GCM_AES_IV_SIZE] = 0;
+ rctx->iv[AES_BLOCK_SIZE - 1] = 1;
+
+ /* Set up a scatterlist for the IV */
+@@ -160,7 +159,7 @@ static struct aead_alg ccp_aes_gcm_defaults = {
+ .encrypt = ccp_aes_gcm_encrypt,
+ .decrypt = ccp_aes_gcm_decrypt,
+ .init = ccp_aes_gcm_cra_init,
+- .ivsize = AES_GCM_IVSIZE,
++ .ivsize = GCM_AES_IV_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .base = {
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0015-crypto-ccp-unmap-pages-and-remove-unmap-objects-in-c.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0015-crypto-ccp-unmap-pages-and-remove-unmap-objects-in-c.patch
new file mode 100644
index 00000000..c9ac03ee
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0015-crypto-ccp-unmap-pages-and-remove-unmap-objects-in-c.patch
@@ -0,0 +1,32 @@
+From da7514b8de1c7f0554bb3264a38e9621340b6836 Mon Sep 17 00:00:00 2001
+From: amd <amd@sosxen2.amd.com>
+Date: Tue, 5 Sep 2017 17:01:51 -0500
+Subject: [PATCH 15/95] crypto: ccp - unmap pages and remove unmap objects in
+ callback
+
+Clean up the mapped pages and the unmap object once we are done with
+it. This enables the final clean-up of the object once the transfer
+is complete.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-dmaengine.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index d608043..64f4b57 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -223,6 +223,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+ desc->tx_desc.cookie, desc->status);
+
+ dma_cookie_complete(tx_desc);
++ dma_descriptor_unmap(tx_desc);
+ }
+
+ desc = __ccp_next_dma_desc(chan, desc);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0016-crypto-ccp-invoke-the-DMA-callback-in-a-standard-way.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0016-crypto-ccp-invoke-the-DMA-callback-in-a-standard-way.patch
new file mode 100644
index 00000000..c7f32e89
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0016-crypto-ccp-invoke-the-DMA-callback-in-a-standard-way.patch
@@ -0,0 +1,33 @@
+From 32b7780948ec9a7dba7507ad74dfc4a50891283b Mon Sep 17 00:00:00 2001
+From: amd <amd@sosxen2.amd.com>
+Date: Tue, 5 Sep 2017 17:08:14 -0500
+Subject: [PATCH 16/95] crypto:ccp - invoke the DMA callback in a standard way
+
+Use the provided mechanism in dmaengine.h to invoke the
+completion callback.
+
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-dmaengine.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index 64f4b57..8b9da58 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -231,9 +231,7 @@ static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ if (tx_desc) {
+- if (tx_desc->callback &&
+- (tx_desc->flags & DMA_PREP_INTERRUPT))
+- tx_desc->callback(tx_desc->callback_param);
++ dmaengine_desc_get_callback_invoke(tx_desc, NULL);
+
+ dma_run_dependencies(tx_desc);
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0017-crypto-ccp-remove-unused-variable-qim.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0017-crypto-ccp-remove-unused-variable-qim.patch
new file mode 100644
index 00000000..26b6d92c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0017-crypto-ccp-remove-unused-variable-qim.patch
@@ -0,0 +1,41 @@
+From 3c254b9e8821aa946077d5b1b24445ec491baf9f Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Thu, 12 Oct 2017 17:55:41 +0100
+Subject: [PATCH 17/95] crypto: ccp - remove unused variable qim
+
+Variable qim is assigned but never read, it is redundant and can
+be removed.
+
+Cleans up clang warning: Value stored to 'qim' is never read
+
+Fixes: 4b394a232df7 ("crypto: ccp - Let a v5 CCP provide the same function as v3")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-dev-v5.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
+index 65604fc..44a4d27 100644
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -788,13 +788,12 @@ static int ccp5_init(struct ccp_device *ccp)
+ struct ccp_cmd_queue *cmd_q;
+ struct dma_pool *dma_pool;
+ char dma_pool_name[MAX_DMAPOOL_NAME_LEN];
+- unsigned int qmr, qim, i;
++ unsigned int qmr, i;
+ u64 status;
+ u32 status_lo, status_hi;
+ int ret;
+
+ /* Find available queues */
+- qim = 0;
+ qmr = ioread32(ccp->io_regs + Q_MASK_REG);
+ for (i = 0; i < MAX_HW_QUEUES; i++) {
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0018-crypto-ccp-use-ENOSPC-for-transient-busy-indication.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0018-crypto-ccp-use-ENOSPC-for-transient-busy-indication.patch
new file mode 100644
index 00000000..2d35672c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0018-crypto-ccp-use-ENOSPC-for-transient-busy-indication.patch
@@ -0,0 +1,66 @@
+From 45ab44e350f6af9ec0d43c492c307aeb89c70b9a Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Wed, 18 Oct 2017 08:00:34 +0100
+Subject: [PATCH 18/95] crypto: ccp - use -ENOSPC for transient busy indication
+
+Replace -EBUSY with -ENOSPC when reporting transient busy
+indication in the absence of backlog.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Reviewed-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-crypto-main.c | 8 +++-----
+ drivers/crypto/ccp/ccp-dev.c | 7 +++++--
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c
+index 35a9de7..b95d199 100644
+--- a/drivers/crypto/ccp/ccp-crypto-main.c
++++ b/drivers/crypto/ccp/ccp-crypto-main.c
+@@ -222,9 +222,10 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
+
+ /* Check if the cmd can/should be queued */
+ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+- ret = -EBUSY;
+- if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
++ if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
++ ret = -ENOSPC;
+ goto e_lock;
++ }
+ }
+
+ /* Look for an entry with the same tfm. If there is a cmd
+@@ -243,9 +244,6 @@ static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
+ ret = ccp_enqueue_cmd(crypto_cmd->cmd);
+ if (!ccp_crypto_success(ret))
+ goto e_lock; /* Error, don't queue it */
+- if ((ret == -EBUSY) &&
+- !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
+- goto e_lock; /* Not backlogging, don't queue it */
+ }
+
+ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
+diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
+index 4e029b1..1b5035d 100644
+--- a/drivers/crypto/ccp/ccp-dev.c
++++ b/drivers/crypto/ccp/ccp-dev.c
+@@ -292,9 +292,12 @@ int ccp_enqueue_cmd(struct ccp_cmd *cmd)
+ i = ccp->cmd_q_count;
+
+ if (ccp->cmd_count >= MAX_CMD_QLEN) {
+- ret = -EBUSY;
+- if (cmd->flags & CCP_CMD_MAY_BACKLOG)
++ if (cmd->flags & CCP_CMD_MAY_BACKLOG) {
++ ret = -EBUSY;
+ list_add_tail(&cmd->entry, &ccp->backlog);
++ } else {
++ ret = -ENOSPC;
++ }
+ } else {
+ ret = -EINPROGRESS;
+ ccp->cmd_count++;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0019-crypto-ccp-Build-the-AMD-secure-processor-driver-onl.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0019-crypto-ccp-Build-the-AMD-secure-processor-driver-onl.patch
new file mode 100644
index 00000000..64afef16
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0019-crypto-ccp-Build-the-AMD-secure-processor-driver-onl.patch
@@ -0,0 +1,37 @@
+From 83b40cedfd473b56343ab95d562de2fbe1faa384 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Mon, 4 Dec 2017 10:57:26 -0600
+Subject: [PATCH 19/95] crypto: ccp: Build the AMD secure processor driver only
+ with AMD CPU support
+
+This is AMD-specific hardware so present it in Kconfig only when AMD
+CPU support is enabled or on ARM64 where it is also used.
+
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Gary R Hook <gary.hook@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: linux-crypto@vger.kernel.org
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
+index 6d62660..9c84f98 100644
+--- a/drivers/crypto/ccp/Kconfig
++++ b/drivers/crypto/ccp/Kconfig
+@@ -1,5 +1,6 @@
+ config CRYPTO_DEV_CCP_DD
+ tristate "Secure Processor device driver"
++ depends on CPU_SUP_AMD || ARM64
+ default m
+ help
+ Provides AMD Secure Processor device driver.
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0020-crypto-ccp-Add-Platform-Security-Processor-PSP-devic.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0020-crypto-ccp-Add-Platform-Security-Processor-PSP-devic.patch
new file mode 100644
index 00000000..53b33883
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0020-crypto-ccp-Add-Platform-Security-Processor-PSP-devic.patch
@@ -0,0 +1,462 @@
+From 93c14f68b6d56bd045e9a79e3d9b2a233bcf2e80 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:28 -0600
+Subject: [PATCH 20/95] crypto: ccp: Add Platform Security Processor (PSP)
+ device support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The Platform Security Processor (PSP) is part of the AMD Secure
+Processor (AMD-SP) functionality. The PSP is a dedicated processor
+that provides support for key management commands in Secure Encrypted
+Virtualization (SEV) mode, along with software-based Trusted Execution
+Environment (TEE) to enable third-party trusted applications.
+
+Note that the key management functionality provided by the SEV firmware
+can be used outside of the kvm-amd driver hence it doesn't need to
+depend on CONFIG_KVM_AMD.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/Kconfig | 11 +++++
+ drivers/crypto/ccp/Makefile | 1 +
+ drivers/crypto/ccp/psp-dev.c | 105 +++++++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/psp-dev.h | 59 ++++++++++++++++++++++++
+ drivers/crypto/ccp/sp-dev.c | 26 +++++++++++
+ drivers/crypto/ccp/sp-dev.h | 24 +++++++++-
+ drivers/crypto/ccp/sp-pci.c | 52 +++++++++++++++++++++
+ 7 files changed, 277 insertions(+), 1 deletion(-)
+ create mode 100644 drivers/crypto/ccp/psp-dev.c
+ create mode 100644 drivers/crypto/ccp/psp-dev.h
+
+diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig
+index 9c84f98..b9dfae4 100644
+--- a/drivers/crypto/ccp/Kconfig
++++ b/drivers/crypto/ccp/Kconfig
+@@ -33,3 +33,14 @@ config CRYPTO_DEV_CCP_CRYPTO
+ Support for using the cryptographic API with the AMD Cryptographic
+ Coprocessor. This module supports offload of SHA and AES algorithms.
+ If you choose 'M' here, this module will be called ccp_crypto.
++
++config CRYPTO_DEV_SP_PSP
++ bool "Platform Security Processor (PSP) device"
++ default y
++ depends on CRYPTO_DEV_CCP_DD && X86_64
++ help
++ Provide support for the AMD Platform Security Processor (PSP).
++ The PSP is a dedicated processor that provides support for key
++ management commands in Secure Encrypted Virtualization (SEV) mode,
++ along with software-based Trusted Execution Environment (TEE) to
++ enable third-party trusted applications.
+diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile
+index c4ce726..51d1c0c 100644
+--- a/drivers/crypto/ccp/Makefile
++++ b/drivers/crypto/ccp/Makefile
+@@ -8,6 +8,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \
+ ccp-dmaengine.o \
+ ccp-debugfs.o
+ ccp-$(CONFIG_PCI) += sp-pci.o
++ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o
+
+ obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
+ ccp-crypto-objs := ccp-crypto-main.o \
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+new file mode 100644
+index 0000000..b5789f87
+--- /dev/null
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -0,0 +1,105 @@
++/*
++ * AMD Platform Security Processor (PSP) interface
++ *
++ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
++ *
++ * Author: Brijesh Singh <brijesh.singh@amd.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/kthread.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/spinlock_types.h>
++#include <linux/types.h>
++#include <linux/mutex.h>
++#include <linux/delay.h>
++#include <linux/hw_random.h>
++#include <linux/ccp.h>
++
++#include "sp-dev.h"
++#include "psp-dev.h"
++
++static struct psp_device *psp_alloc_struct(struct sp_device *sp)
++{
++ struct device *dev = sp->dev;
++ struct psp_device *psp;
++
++ psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL);
++ if (!psp)
++ return NULL;
++
++ psp->dev = dev;
++ psp->sp = sp;
++
++ snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord);
++
++ return psp;
++}
++
++static irqreturn_t psp_irq_handler(int irq, void *data)
++{
++ return IRQ_HANDLED;
++}
++
++int psp_dev_init(struct sp_device *sp)
++{
++ struct device *dev = sp->dev;
++ struct psp_device *psp;
++ int ret;
++
++ ret = -ENOMEM;
++ psp = psp_alloc_struct(sp);
++ if (!psp)
++ goto e_err;
++
++ sp->psp_data = psp;
++
++ psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata;
++ if (!psp->vdata) {
++ ret = -ENODEV;
++ dev_err(dev, "missing driver data\n");
++ goto e_err;
++ }
++
++ psp->io_regs = sp->io_map + psp->vdata->offset;
++
++ /* Disable and clear interrupts until ready */
++ iowrite32(0, psp->io_regs + PSP_P2CMSG_INTEN);
++ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTSTS);
++
++ /* Request an irq */
++ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp);
++ if (ret) {
++ dev_err(dev, "psp: unable to allocate an IRQ\n");
++ goto e_err;
++ }
++
++ if (sp->set_psp_master_device)
++ sp->set_psp_master_device(sp);
++
++ /* Enable interrupt */
++ iowrite32(-1, psp->io_regs + PSP_P2CMSG_INTEN);
++
++ return 0;
++
++e_err:
++ sp->psp_data = NULL;
++
++ dev_notice(dev, "psp initialization failed\n");
++
++ return ret;
++}
++
++void psp_dev_destroy(struct sp_device *sp)
++{
++ struct psp_device *psp = sp->psp_data;
++
++ sp_free_psp_irq(sp, psp);
++}
+diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
+new file mode 100644
+index 0000000..55b7808
+--- /dev/null
++++ b/drivers/crypto/ccp/psp-dev.h
+@@ -0,0 +1,59 @@
++/*
++ * AMD Platform Security Processor (PSP) interface driver
++ *
++ * Copyright (C) 2017 Advanced Micro Devices, Inc.
++ *
++ * Author: Brijesh Singh <brijesh.singh@amd.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __PSP_DEV_H__
++#define __PSP_DEV_H__
++
++#include <linux/device.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/mutex.h>
++#include <linux/list.h>
++#include <linux/wait.h>
++#include <linux/dmapool.h>
++#include <linux/hw_random.h>
++#include <linux/bitops.h>
++#include <linux/interrupt.h>
++#include <linux/irqreturn.h>
++#include <linux/dmaengine.h>
++
++#include "sp-dev.h"
++
++#define PSP_P2CMSG_INTEN 0x0110
++#define PSP_P2CMSG_INTSTS 0x0114
++
++#define PSP_C2PMSG_ATTR_0 0x0118
++#define PSP_C2PMSG_ATTR_1 0x011c
++#define PSP_C2PMSG_ATTR_2 0x0120
++#define PSP_C2PMSG_ATTR_3 0x0124
++#define PSP_P2CMSG_ATTR_0 0x0128
++
++#define PSP_CMDRESP_CMD_SHIFT 16
++#define PSP_CMDRESP_IOC BIT(0)
++#define PSP_CMDRESP_RESP BIT(31)
++#define PSP_CMDRESP_ERR_MASK 0xffff
++
++#define MAX_PSP_NAME_LEN 16
++
++struct psp_device {
++ struct list_head entry;
++
++ struct psp_vdata *vdata;
++ char name[MAX_PSP_NAME_LEN];
++
++ struct device *dev;
++ struct sp_device *sp;
++
++ void __iomem *io_regs;
++};
++
++#endif /* __PSP_DEV_H */
+diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
+index bef387c8..cf101c0 100644
+--- a/drivers/crypto/ccp/sp-dev.c
++++ b/drivers/crypto/ccp/sp-dev.c
+@@ -198,6 +198,8 @@ int sp_init(struct sp_device *sp)
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_init(sp);
+
++ if (sp->dev_vdata->psp_vdata)
++ psp_dev_init(sp);
+ return 0;
+ }
+
+@@ -206,6 +208,9 @@ void sp_destroy(struct sp_device *sp)
+ if (sp->dev_vdata->ccp_vdata)
+ ccp_dev_destroy(sp);
+
++ if (sp->dev_vdata->psp_vdata)
++ psp_dev_destroy(sp);
++
+ sp_del_device(sp);
+ }
+
+@@ -237,6 +242,27 @@ int sp_resume(struct sp_device *sp)
+ }
+ #endif
+
++struct sp_device *sp_get_psp_master_device(void)
++{
++ struct sp_device *i, *ret = NULL;
++ unsigned long flags;
++
++ write_lock_irqsave(&sp_unit_lock, flags);
++ if (list_empty(&sp_units))
++ goto unlock;
++
++ list_for_each_entry(i, &sp_units, entry) {
++ if (i->psp_data)
++ break;
++ }
++
++ if (i->get_psp_master_device)
++ ret = i->get_psp_master_device();
++unlock:
++ write_unlock_irqrestore(&sp_unit_lock, flags);
++ return ret;
++}
++
+ static int __init sp_mod_init(void)
+ {
+ #ifdef CONFIG_X86
+diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
+index 5ab486a..909cf3e4 100644
+--- a/drivers/crypto/ccp/sp-dev.h
++++ b/drivers/crypto/ccp/sp-dev.h
+@@ -42,12 +42,17 @@ struct ccp_vdata {
+ const unsigned int offset;
+ const unsigned int rsamax;
+ };
++
++struct psp_vdata {
++ const unsigned int offset;
++};
++
+ /* Structure to hold SP device data */
+ struct sp_dev_vdata {
+ const unsigned int bar;
+
+ const struct ccp_vdata *ccp_vdata;
+- void *psp_vdata;
++ const struct psp_vdata *psp_vdata;
+ };
+
+ struct sp_device {
+@@ -68,6 +73,10 @@ struct sp_device {
+ /* DMA caching attribute support */
+ unsigned int axcache;
+
++ /* get and set master device */
++ struct sp_device*(*get_psp_master_device)(void);
++ void (*set_psp_master_device)(struct sp_device *);
++
+ bool irq_registered;
+ bool use_tasklet;
+
+@@ -103,6 +112,7 @@ void sp_free_ccp_irq(struct sp_device *sp, void *data);
+ int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler,
+ const char *name, void *data);
+ void sp_free_psp_irq(struct sp_device *sp, void *data);
++struct sp_device *sp_get_psp_master_device(void);
+
+ #ifdef CONFIG_CRYPTO_DEV_SP_CCP
+
+@@ -130,4 +140,16 @@ static inline int ccp_dev_resume(struct sp_device *sp)
+ }
+ #endif /* CONFIG_CRYPTO_DEV_SP_CCP */
+
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++
++int psp_dev_init(struct sp_device *sp);
++void psp_dev_destroy(struct sp_device *sp);
++
++#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
++
++static inline int psp_dev_init(struct sp_device *sp) { return 0; }
++static inline void psp_dev_destroy(struct sp_device *sp) { }
++
++#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
++
+ #endif
+diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c
+index 9859aa6..f5f43c5 100644
+--- a/drivers/crypto/ccp/sp-pci.c
++++ b/drivers/crypto/ccp/sp-pci.c
+@@ -25,6 +25,7 @@
+ #include <linux/ccp.h>
+
+ #include "ccp-dev.h"
++#include "psp-dev.h"
+
+ #define MSIX_VECTORS 2
+
+@@ -32,6 +33,7 @@ struct sp_pci {
+ int msix_count;
+ struct msix_entry msix_entry[MSIX_VECTORS];
+ };
++static struct sp_device *sp_dev_master;
+
+ static int sp_get_msix_irqs(struct sp_device *sp)
+ {
+@@ -108,6 +110,45 @@ static void sp_free_irqs(struct sp_device *sp)
+ sp->psp_irq = 0;
+ }
+
++static bool sp_pci_is_master(struct sp_device *sp)
++{
++ struct device *dev_cur, *dev_new;
++ struct pci_dev *pdev_cur, *pdev_new;
++
++ dev_new = sp->dev;
++ dev_cur = sp_dev_master->dev;
++
++ pdev_new = to_pci_dev(dev_new);
++ pdev_cur = to_pci_dev(dev_cur);
++
++ if (pdev_new->bus->number < pdev_cur->bus->number)
++ return true;
++
++ if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn))
++ return true;
++
++ if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn))
++ return true;
++
++ return false;
++}
++
++static void psp_set_master(struct sp_device *sp)
++{
++ if (!sp_dev_master) {
++ sp_dev_master = sp;
++ return;
++ }
++
++ if (sp_pci_is_master(sp))
++ sp_dev_master = sp;
++}
++
++static struct sp_device *psp_get_master(void)
++{
++ return sp_dev_master;
++}
++
+ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ {
+ struct sp_device *sp;
+@@ -166,6 +207,8 @@ static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ goto e_err;
+
+ pci_set_master(pdev);
++ sp->set_psp_master_device = psp_set_master;
++ sp->get_psp_master_device = psp_get_master;
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (ret) {
+@@ -225,6 +268,12 @@ static int sp_pci_resume(struct pci_dev *pdev)
+ }
+ #endif
+
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++static const struct psp_vdata psp_entry = {
++ .offset = 0x10500,
++};
++#endif
++
+ static const struct sp_dev_vdata dev_vdata[] = {
+ {
+ .bar = 2,
+@@ -237,6 +286,9 @@ static const struct sp_dev_vdata dev_vdata[] = {
+ #ifdef CONFIG_CRYPTO_DEV_SP_CCP
+ .ccp_vdata = &ccpv5a,
+ #endif
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++ .psp_vdata = &psp_entry
++#endif
+ },
+ {
+ .bar = 2,
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0021-crypto-ccp-Define-SEV-key-management-command-id.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0021-crypto-ccp-Define-SEV-key-management-command-id.patch
new file mode 100644
index 00000000..ed1cf2f4
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0021-crypto-ccp-Define-SEV-key-management-command-id.patch
@@ -0,0 +1,505 @@
+From ccbab7b638a8b7d8bcad0cc3b5528ea61b4e4f69 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:27 -0600
+Subject: [PATCH 21/95] crypto: ccp: Define SEV key management command id
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Define Secure Encrypted Virtualization (SEV) key management command id
+and structure. The command definition is available in SEV KM spec
+0.14 (http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf)
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/linux/psp-sev.h | 465 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 465 insertions(+)
+ create mode 100644 include/linux/psp-sev.h
+
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+new file mode 100644
+index 0000000..4a150d1
+--- /dev/null
++++ b/include/linux/psp-sev.h
+@@ -0,0 +1,465 @@
++/*
++ * AMD Secure Encrypted Virtualization (SEV) driver interface
++ *
++ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
++ *
++ * Author: Brijesh Singh <brijesh.singh@amd.com>
++ *
++ * SEV spec 0.14 is available at:
++ * http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __PSP_SEV_H__
++#define __PSP_SEV_H__
++
++#include <uapi/linux/psp-sev.h>
++
++#ifdef CONFIG_X86
++#include <linux/mem_encrypt.h>
++
++#define __psp_pa(x) __sme_pa(x)
++#else
++#define __psp_pa(x) __pa(x)
++#endif
++
++#define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */
++
++/**
++ * SEV platform state
++ */
++enum sev_state {
++ SEV_STATE_UNINIT = 0x0,
++ SEV_STATE_INIT = 0x1,
++ SEV_STATE_WORKING = 0x2,
++
++ SEV_STATE_MAX
++};
++
++/**
++ * SEV platform and guest management commands
++ */
++enum sev_cmd {
++ /* platform commands */
++ SEV_CMD_INIT = 0x001,
++ SEV_CMD_SHUTDOWN = 0x002,
++ SEV_CMD_FACTORY_RESET = 0x003,
++ SEV_CMD_PLATFORM_STATUS = 0x004,
++ SEV_CMD_PEK_GEN = 0x005,
++ SEV_CMD_PEK_CSR = 0x006,
++ SEV_CMD_PEK_CERT_IMPORT = 0x007,
++ SEV_CMD_PDH_CERT_EXPORT = 0x008,
++ SEV_CMD_PDH_GEN = 0x009,
++ SEV_CMD_DF_FLUSH = 0x00A,
++
++ /* Guest commands */
++ SEV_CMD_DECOMMISSION = 0x020,
++ SEV_CMD_ACTIVATE = 0x021,
++ SEV_CMD_DEACTIVATE = 0x022,
++ SEV_CMD_GUEST_STATUS = 0x023,
++
++ /* Guest launch commands */
++ SEV_CMD_LAUNCH_START = 0x030,
++ SEV_CMD_LAUNCH_UPDATE_DATA = 0x031,
++ SEV_CMD_LAUNCH_UPDATE_VMSA = 0x032,
++ SEV_CMD_LAUNCH_MEASURE = 0x033,
++ SEV_CMD_LAUNCH_UPDATE_SECRET = 0x034,
++ SEV_CMD_LAUNCH_FINISH = 0x035,
++
++ /* Guest migration commands (outgoing) */
++ SEV_CMD_SEND_START = 0x040,
++ SEV_CMD_SEND_UPDATE_DATA = 0x041,
++ SEV_CMD_SEND_UPDATE_VMSA = 0x042,
++ SEV_CMD_SEND_FINISH = 0x043,
++
++ /* Guest migration commands (incoming) */
++ SEV_CMD_RECEIVE_START = 0x050,
++ SEV_CMD_RECEIVE_UPDATE_DATA = 0x051,
++ SEV_CMD_RECEIVE_UPDATE_VMSA = 0x052,
++ SEV_CMD_RECEIVE_FINISH = 0x053,
++
++ /* Guest debug commands */
++ SEV_CMD_DBG_DECRYPT = 0x060,
++ SEV_CMD_DBG_ENCRYPT = 0x061,
++
++ SEV_CMD_MAX,
++};
++
++/**
++ * struct sev_data_init - INIT command parameters
++ *
++ * @flags: processing flags
++ * @tmr_address: system physical address used for SEV-ES
++ * @tmr_len: len of tmr_address
++ */
++struct sev_data_init {
++ u32 flags; /* In */
++ u32 reserved; /* In */
++ u64 tmr_address; /* In */
++ u32 tmr_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_pek_csr - PEK_CSR command parameters
++ *
++ * @address: PEK certificate chain
++ * @len: len of certificate
++ */
++struct sev_data_pek_csr {
++ u64 address; /* In */
++ u32 len; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_data_cert_import - PEK_CERT_IMPORT command parameters
++ *
++ * @pek_address: PEK certificate chain
++ * @pek_len: len of PEK certificate
++ * @oca_address: OCA certificate chain
++ * @oca_len: len of OCA certificate
++ */
++struct sev_data_pek_cert_import {
++ u64 pek_cert_address; /* In */
++ u32 pek_cert_len; /* In */
++ u32 reserved; /* In */
++ u64 oca_cert_address; /* In */
++ u32 oca_cert_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
++ *
++ * @pdh_address: PDH certificate address
++ * @pdh_len: len of PDH certificate
++ * @cert_chain_address: PDH certificate chain
++ * @cert_chain_len: len of PDH certificate chain
++ */
++struct sev_data_pdh_cert_export {
++ u64 pdh_cert_address; /* In */
++ u32 pdh_cert_len; /* In/Out */
++ u32 reserved; /* In */
++ u64 cert_chain_address; /* In */
++ u32 cert_chain_len; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_data_decommission - DECOMMISSION command parameters
++ *
++ * @handle: handle of the VM to decommission
++ */
++struct sev_data_decommission {
++ u32 handle; /* In */
++} __packed;
++
++/**
++ * struct sev_data_activate - ACTIVATE command parameters
++ *
++ * @handle: handle of the VM to activate
++ * @asid: asid assigned to the VM
++ */
++struct sev_data_activate {
++ u32 handle; /* In */
++ u32 asid; /* In */
++} __packed;
++
++/**
++ * struct sev_data_deactivate - DEACTIVATE command parameters
++ *
++ * @handle: handle of the VM to deactivate
++ */
++struct sev_data_deactivate {
++ u32 handle; /* In */
++} __packed;
++
++/**
++ * struct sev_data_guest_status - SEV GUEST_STATUS command parameters
++ *
++ * @handle: handle of the VM to retrieve status
++ * @policy: policy information for the VM
++ * @asid: current ASID of the VM
++ * @state: current state of the VM
++ */
++struct sev_data_guest_status {
++ u32 handle; /* In */
++ u32 policy; /* Out */
++ u32 asid; /* Out */
++ u8 state; /* Out */
++} __packed;
++
++/**
++ * struct sev_data_launch_start - LAUNCH_START command parameters
++ *
++ * @handle: handle assigned to the VM
++ * @policy: guest launch policy
++ * @dh_cert_address: physical address of DH certificate blob
++ * @dh_cert_len: len of DH certificate blob
++ * @session_address: physical address of session parameters
++ * @session_len: len of session parameters
++ */
++struct sev_data_launch_start {
++ u32 handle; /* In/Out */
++ u32 policy; /* In */
++ u64 dh_cert_address; /* In */
++ u32 dh_cert_len; /* In */
++ u32 reserved; /* In */
++ u64 session_address; /* In */
++ u32 session_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_launch_update_data - LAUNCH_UPDATE_DATA command parameter
++ *
++ * @handle: handle of the VM to update
++ * @len: len of memory to be encrypted
++ * @address: physical address of memory region to encrypt
++ */
++struct sev_data_launch_update_data {
++ u32 handle; /* In */
++ u32 reserved;
++ u64 address; /* In */
++ u32 len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_launch_update_vmsa - LAUNCH_UPDATE_VMSA command
++ *
++ * @handle: handle of the VM
++ * @address: physical address of memory region to encrypt
++ * @len: len of memory region to encrypt
++ */
++struct sev_data_launch_update_vmsa {
++ u32 handle; /* In */
++ u32 reserved;
++ u64 address; /* In */
++ u32 len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_launch_measure - LAUNCH_MEASURE command parameters
++ *
++ * @handle: handle of the VM to process
++ * @address: physical address containing the measurement blob
++ * @len: len of measurement blob
++ */
++struct sev_data_launch_measure {
++ u32 handle; /* In */
++ u32 reserved;
++ u64 address; /* In */
++ u32 len; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_data_launch_secret - LAUNCH_SECRET command parameters
++ *
++ * @handle: handle of the VM to process
++ * @hdr_address: physical address containing the packet header
++ * @hdr_len: len of packet header
++ * @guest_address: system physical address of guest memory region
++ * @guest_len: len of guest_paddr
++ * @trans_address: physical address of transport memory buffer
++ * @trans_len: len of transport memory buffer
++ */
++struct sev_data_launch_secret {
++ u32 handle; /* In */
++ u32 reserved1;
++ u64 hdr_address; /* In */
++ u32 hdr_len; /* In */
++ u32 reserved2;
++ u64 guest_address; /* In */
++ u32 guest_len; /* In */
++ u32 reserved3;
++ u64 trans_address; /* In */
++ u32 trans_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_launch_finish - LAUNCH_FINISH command parameters
++ *
++ * @handle: handle of the VM to process
++ */
++struct sev_data_launch_finish {
++ u32 handle; /* In */
++} __packed;
++
++/**
++ * struct sev_data_send_start - SEND_START command parameters
++ *
++ * @handle: handle of the VM to process
++ * @policy: policy information for the VM
++ * @pdh_cert_address: physical address containing PDH certificate
++ * @pdh_cert_len: len of PDH certificate
++ * @plat_certs_address: physical address containing platform certificate
++ * @plat_certs_len: len of platform certificate
++ * @amd_certs_address: physical address containing AMD certificate
++ * @amd_certs_len: len of AMD certificate
++ * @session_address: physical address containing Session data
++ * @session_len: len of session data
++ */
++struct sev_data_send_start {
++ u32 handle; /* In */
++ u32 policy; /* Out */
++ u64 pdh_cert_address; /* In */
++ u32 pdh_cert_len; /* In */
++ u32 reserved1;
++ u64 plat_cert_address; /* In */
++ u32 plat_cert_len; /* In */
++ u32 reserved2;
++ u64 amd_cert_address; /* In */
++ u32 amd_cert_len; /* In */
++ u32 reserved3;
++ u64 session_address; /* In */
++ u32 session_len; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_data_send_update - SEND_UPDATE_DATA command
++ *
++ * @handle: handle of the VM to process
++ * @hdr_address: physical address containing packet header
++ * @hdr_len: len of packet header
++ * @guest_address: physical address of guest memory region to send
++ * @guest_len: len of guest memory region to send
++ * @trans_address: physical address of host memory region
++ * @trans_len: len of host memory region
++ */
++struct sev_data_send_update_data {
++ u32 handle; /* In */
++ u32 reserved1;
++ u64 hdr_address; /* In */
++ u32 hdr_len; /* In/Out */
++ u32 reserved2;
++ u64 guest_address; /* In */
++ u32 guest_len; /* In */
++ u32 reserved3;
++ u64 trans_address; /* In */
++ u32 trans_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_send_update - SEND_UPDATE_VMSA command
++ *
++ * @handle: handle of the VM to process
++ * @hdr_address: physical address containing packet header
++ * @hdr_len: len of packet header
++ * @guest_address: physical address of guest memory region to send
++ * @guest_len: len of guest memory region to send
++ * @trans_address: physical address of host memory region
++ * @trans_len: len of host memory region
++ */
++struct sev_data_send_update_vmsa {
++ u32 handle; /* In */
++ u64 hdr_address; /* In */
++ u32 hdr_len; /* In/Out */
++ u32 reserved2;
++ u64 guest_address; /* In */
++ u32 guest_len; /* In */
++ u32 reserved3;
++ u64 trans_address; /* In */
++ u32 trans_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_send_finish - SEND_FINISH command parameters
++ *
++ * @handle: handle of the VM to process
++ */
++struct sev_data_send_finish {
++ u32 handle; /* In */
++} __packed;
++
++/**
++ * struct sev_data_receive_start - RECEIVE_START command parameters
++ *
++ * @handle: handle of the VM to perform receive operation
++ * @pdh_cert_address: system physical address containing PDH certificate blob
++ * @pdh_cert_len: len of PDH certificate blob
++ * @session_address: system physical address containing session blob
++ * @session_len: len of session blob
++ */
++struct sev_data_receive_start {
++ u32 handle; /* In/Out */
++ u32 policy; /* In */
++ u64 pdh_cert_address; /* In */
++ u32 pdh_cert_len; /* In */
++ u32 reserved1;
++ u64 session_address; /* In */
++ u32 session_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_receive_update_data - RECEIVE_UPDATE_DATA command parameters
++ *
++ * @handle: handle of the VM to update
++ * @hdr_address: physical address containing packet header blob
++ * @hdr_len: len of packet header
++ * @guest_address: system physical address of guest memory region
++ * @guest_len: len of guest memory region
++ * @trans_address: system physical address of transport buffer
++ * @trans_len: len of transport buffer
++ */
++struct sev_data_receive_update_data {
++ u32 handle; /* In */
++ u32 reserved1;
++ u64 hdr_address; /* In */
++ u32 hdr_len; /* In */
++ u32 reserved2;
++ u64 guest_address; /* In */
++ u32 guest_len; /* In */
++ u32 reserved3;
++ u64 trans_address; /* In */
++ u32 trans_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_receive_update_vmsa - RECEIVE_UPDATE_VMSA command parameters
++ *
++ * @handle: handle of the VM to update
++ * @hdr_address: physical address containing packet header blob
++ * @hdr_len: len of packet header
++ * @guest_address: system physical address of guest memory region
++ * @guest_len: len of guest memory region
++ * @trans_address: system physical address of transport buffer
++ * @trans_len: len of transport buffer
++ */
++struct sev_data_receive_update_vmsa {
++ u32 handle; /* In */
++ u32 reserved1;
++ u64 hdr_address; /* In */
++ u32 hdr_len; /* In */
++ u32 reserved2;
++ u64 guest_address; /* In */
++ u32 guest_len; /* In */
++ u32 reserved3;
++ u64 trans_address; /* In */
++ u32 trans_len; /* In */
++} __packed;
++
++/**
++ * struct sev_data_receive_finish - RECEIVE_FINISH command parameters
++ *
++ * @handle: handle of the VM to finish
++ */
++struct sev_data_receive_finish {
++ u32 handle; /* In */
++} __packed;
++
++/**
++ * struct sev_data_dbg - DBG_ENCRYPT/DBG_DECRYPT command parameters
++ *
++ * @handle: handle of the VM to perform debug operation
++ * @src_addr: source address of data to operate on
++ * @dst_addr: destination address of data to operate on
++ * @len: len of data to operate on
++ */
++struct sev_data_dbg {
++ u32 handle; /* In */
++ u32 reserved;
++ u64 src_addr; /* In */
++ u64 dst_addr; /* In */
++ u32 len; /* In */
++} __packed;
++
++#endif /* __PSP_SEV_H__ */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0022-Documentation-virtual-kvm-Add-AMD-Secure-Encrypted-V.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0022-Documentation-virtual-kvm-Add-AMD-Secure-Encrypted-V.patch
new file mode 100644
index 00000000..06faa5fd
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0022-Documentation-virtual-kvm-Add-AMD-Secure-Encrypted-V.patch
@@ -0,0 +1,97 @@
+From 76cdc1abc5facd5188ae8e0bab511bd5612b98b2 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:23 -0600
+Subject: [PATCH 22/95] Documentation/virtual/kvm: Add AMD Secure Encrypted
+ Virtualization (SEV)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Create a Documentation entry to describe the AMD Secure Encrypted
+Virtualization (SEV) feature.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ Documentation/virtual/kvm/00-INDEX | 3 ++
+ .../virtual/kvm/amd-memory-encryption.rst | 45 ++++++++++++++++++++++
+ 2 files changed, 48 insertions(+)
+ create mode 100644 Documentation/virtual/kvm/amd-memory-encryption.rst
+
+diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
+index 69fe1a8..3da73aa 100644
+--- a/Documentation/virtual/kvm/00-INDEX
++++ b/Documentation/virtual/kvm/00-INDEX
+@@ -26,3 +26,6 @@ s390-diag.txt
+ - Diagnose hypercall description (for IBM S/390)
+ timekeeping.txt
+ - timekeeping virtualization for x86-based architectures.
++amd-memory-encryption.txt
++ - notes on AMD Secure Encrypted Virtualization feature and SEV firmware
++ command description
+diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
+new file mode 100644
+index 0000000..a8ef21e
+--- /dev/null
++++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
+@@ -0,0 +1,45 @@
++======================================
++Secure Encrypted Virtualization (SEV)
++======================================
++
++Overview
++========
++
++Secure Encrypted Virtualization (SEV) is a feature found on AMD processors.
++
++SEV is an extension to the AMD-V architecture which supports running
++virtual machines (VMs) under the control of a hypervisor. When enabled,
++the memory contents of a VM will be transparently encrypted with a key
++unique to that VM.
++
++The hypervisor can determine the SEV support through the CPUID
++instruction. The CPUID function 0x8000001f reports information related
++to SEV::
++
++ 0x8000001f[eax]:
++ Bit[1] indicates support for SEV
++ ...
++ [ecx]:
++ Bits[31:0] Number of encrypted guests supported simultaneously
++
++If support for SEV is present, MSR 0xc001_0010 (MSR_K8_SYSCFG) and MSR 0xc001_0015
++(MSR_K7_HWCR) can be used to determine if it can be enabled::
++
++ 0xc001_0010:
++ Bit[23] 1 = memory encryption can be enabled
++ 0 = memory encryption can not be enabled
++
++ 0xc001_0015:
++ Bit[0] 1 = memory encryption can be enabled
++ 0 = memory encryption can not be enabled
++
++When SEV support is available, it can be enabled in a specific VM by
++setting the SEV bit before executing VMRUN.::
++
++ VMCB[0x90]:
++ Bit[1] 1 = SEV is enabled
++ 0 = SEV is disabled
++
++SEV hardware uses ASIDs to associate a memory encryption key with a VM.
++Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
++defined in the CPUID 0x8000001f[ecx] field.
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0023-crypto-ccp-Add-Secure-Encrypted-Virtualization-SEV-c.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0023-crypto-ccp-Add-Secure-Encrypted-Virtualization-SEV-c.patch
new file mode 100644
index 00000000..03c0d5a3
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0023-crypto-ccp-Add-Secure-Encrypted-Virtualization-SEV-c.patch
@@ -0,0 +1,678 @@
+From af510af728dde095118afc345e5cc4e1e6551c54 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:28 -0600
+Subject: [PATCH 23/95] crypto: ccp: Add Secure Encrypted Virtualization (SEV)
+ command support
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+AMD's new Secure Encrypted Virtualization (SEV) feature allows the
+memory contents of virtual machines to be transparently encrypted with a
+key unique to the VM. The programming and management of the encryption
+keys are handled by the AMD Secure Processor (AMD-SP) which exposes the
+commands for these tasks. The complete spec is available at:
+
+http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+
+Extend the AMD-SP driver to provide the following support:
+
+ - an in-kernel API to communicate with the SEV firmware. The API can be
+ used by the hypervisor to create encryption context for a SEV guest.
+
+ - a userspace IOCTL to manage the platform certificates.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 344 +++++++++++++++++++++++++++++++++++++++++++
+ drivers/crypto/ccp/psp-dev.h | 24 +++
+ drivers/crypto/ccp/sp-dev.c | 9 ++
+ drivers/crypto/ccp/sp-dev.h | 4 +
+ include/linux/psp-sev.h | 137 +++++++++++++++++
+ 5 files changed, 518 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index b5789f87..9915a6c 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -26,6 +26,12 @@
+ #include "sp-dev.h"
+ #include "psp-dev.h"
+
++#define DEVICE_NAME "sev"
++
++static DEFINE_MUTEX(sev_cmd_mutex);
++static struct sev_misc_dev *misc_dev;
++static struct psp_device *psp_master;
++
+ static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+ {
+ struct device *dev = sp->dev;
+@@ -45,9 +51,285 @@ static struct psp_device *psp_alloc_struct(struct sp_device *sp)
+
+ static irqreturn_t psp_irq_handler(int irq, void *data)
+ {
++ struct psp_device *psp = data;
++ unsigned int status;
++ int reg;
++
++ /* Read the interrupt status: */
++ status = ioread32(psp->io_regs + PSP_P2CMSG_INTSTS);
++
++ /* Check if it is command completion: */
++ if (!(status & BIT(PSP_CMD_COMPLETE_REG)))
++ goto done;
++
++ /* Check if it is SEV command completion: */
++ reg = ioread32(psp->io_regs + PSP_CMDRESP);
++ if (reg & PSP_CMDRESP_RESP) {
++ psp->sev_int_rcvd = 1;
++ wake_up(&psp->sev_int_queue);
++ }
++
++done:
++ /* Clear the interrupt status by writing the same value we read. */
++ iowrite32(status, psp->io_regs + PSP_P2CMSG_INTSTS);
++
+ return IRQ_HANDLED;
+ }
+
++static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg)
++{
++ psp->sev_int_rcvd = 0;
++
++ wait_event(psp->sev_int_queue, psp->sev_int_rcvd);
++ *reg = ioread32(psp->io_regs + PSP_CMDRESP);
++}
++
++static int sev_cmd_buffer_len(int cmd)
++{
++ switch (cmd) {
++ case SEV_CMD_INIT: return sizeof(struct sev_data_init);
++ case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status);
++ case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr);
++ case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import);
++ case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export);
++ case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start);
++ case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data);
++ case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa);
++ case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish);
++ case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure);
++ case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate);
++ case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate);
++ case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission);
++ case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status);
++ case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg);
++ case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg);
++ case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start);
++ case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data);
++ case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa);
++ case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish);
++ case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start);
++ case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish);
++ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
++ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
++ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
++ default: return 0;
++ }
++
++ return 0;
++}
++
++static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
++{
++ struct psp_device *psp = psp_master;
++ unsigned int phys_lsb, phys_msb;
++ unsigned int reg, ret = 0;
++
++ if (!psp)
++ return -ENODEV;
++
++ /* Get the physical address of the command buffer */
++ phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
++ phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
++
++ dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n",
++ cmd, phys_msb, phys_lsb);
++
++ print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
++ sev_cmd_buffer_len(cmd), false);
++
++ iowrite32(phys_lsb, psp->io_regs + PSP_CMDBUFF_ADDR_LO);
++ iowrite32(phys_msb, psp->io_regs + PSP_CMDBUFF_ADDR_HI);
++
++ reg = cmd;
++ reg <<= PSP_CMDRESP_CMD_SHIFT;
++ reg |= PSP_CMDRESP_IOC;
++ iowrite32(reg, psp->io_regs + PSP_CMDRESP);
++
++ /* wait for command completion */
++ sev_wait_cmd_ioc(psp, &reg);
++
++ if (psp_ret)
++ *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
++
++ if (reg & PSP_CMDRESP_ERR_MASK) {
++ dev_dbg(psp->dev, "sev command %#x failed (%#010x)\n",
++ cmd, reg & PSP_CMDRESP_ERR_MASK);
++ ret = -EIO;
++ }
++
++ print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data,
++ sev_cmd_buffer_len(cmd), false);
++
++ return ret;
++}
++
++static int sev_do_cmd(int cmd, void *data, int *psp_ret)
++{
++ int rc;
++
++ mutex_lock(&sev_cmd_mutex);
++ rc = __sev_do_cmd_locked(cmd, data, psp_ret);
++ mutex_unlock(&sev_cmd_mutex);
++
++ return rc;
++}
++
++static int __sev_platform_init_locked(int *error)
++{
++ struct psp_device *psp = psp_master;
++ int rc = 0;
++
++ if (!psp)
++ return -ENODEV;
++
++ if (psp->sev_state == SEV_STATE_INIT)
++ return 0;
++
++ rc = __sev_do_cmd_locked(SEV_CMD_INIT, &psp->init_cmd_buf, error);
++ if (rc)
++ return rc;
++
++ psp->sev_state = SEV_STATE_INIT;
++ dev_dbg(psp->dev, "SEV firmware initialized\n");
++
++ return rc;
++}
++
++int sev_platform_init(int *error)
++{
++ int rc;
++
++ mutex_lock(&sev_cmd_mutex);
++ rc = __sev_platform_init_locked(error);
++ mutex_unlock(&sev_cmd_mutex);
++
++ return rc;
++}
++EXPORT_SYMBOL_GPL(sev_platform_init);
++
++static int __sev_platform_shutdown_locked(int *error)
++{
++ int ret;
++
++ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
++ if (ret)
++ return ret;
++
++ psp_master->sev_state = SEV_STATE_UNINIT;
++ dev_dbg(psp_master->dev, "SEV firmware shutdown\n");
++
++ return ret;
++}
++
++static int sev_platform_shutdown(int *error)
++{
++ int rc;
++
++ mutex_lock(&sev_cmd_mutex);
++ rc = __sev_platform_shutdown_locked(NULL);
++ mutex_unlock(&sev_cmd_mutex);
++
++ return rc;
++}
++
++static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
++{
++ return -ENOTTY;
++}
++
++static const struct file_operations sev_fops = {
++ .owner = THIS_MODULE,
++ .unlocked_ioctl = sev_ioctl,
++};
++
++int sev_platform_status(struct sev_user_data_status *data, int *error)
++{
++ return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error);
++}
++EXPORT_SYMBOL_GPL(sev_platform_status);
++
++int sev_guest_deactivate(struct sev_data_deactivate *data, int *error)
++{
++ return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error);
++}
++EXPORT_SYMBOL_GPL(sev_guest_deactivate);
++
++int sev_guest_activate(struct sev_data_activate *data, int *error)
++{
++ return sev_do_cmd(SEV_CMD_ACTIVATE, data, error);
++}
++EXPORT_SYMBOL_GPL(sev_guest_activate);
++
++int sev_guest_decommission(struct sev_data_decommission *data, int *error)
++{
++ return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error);
++}
++EXPORT_SYMBOL_GPL(sev_guest_decommission);
++
++int sev_guest_df_flush(int *error)
++{
++ return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
++}
++EXPORT_SYMBOL_GPL(sev_guest_df_flush);
++
++static void sev_exit(struct kref *ref)
++{
++ struct sev_misc_dev *misc_dev = container_of(ref, struct sev_misc_dev, refcount);
++
++ misc_deregister(&misc_dev->misc);
++}
++
++static int sev_misc_init(struct psp_device *psp)
++{
++ struct device *dev = psp->dev;
++ int ret;
++
++ /*
++ * SEV feature support can be detected on multiple devices but the SEV
++ * FW commands must be issued on the master. During probe, we do not
++ * know the master hence we create /dev/sev on the first device probe.
++ * sev_do_cmd() finds the right master device to which to issue the
++ * command to the firmware.
++ */
++ if (!misc_dev) {
++ struct miscdevice *misc;
++
++ misc_dev = devm_kzalloc(dev, sizeof(*misc_dev), GFP_KERNEL);
++ if (!misc_dev)
++ return -ENOMEM;
++
++ misc = &misc_dev->misc;
++ misc->minor = MISC_DYNAMIC_MINOR;
++ misc->name = DEVICE_NAME;
++ misc->fops = &sev_fops;
++
++ ret = misc_register(misc);
++ if (ret)
++ return ret;
++
++ kref_init(&misc_dev->refcount);
++ } else {
++ kref_get(&misc_dev->refcount);
++ }
++
++ init_waitqueue_head(&psp->sev_int_queue);
++ psp->sev_misc = misc_dev;
++ dev_dbg(dev, "registered SEV device\n");
++
++ return 0;
++}
++
++static int sev_init(struct psp_device *psp)
++{
++ /* Check if device supports SEV feature */
++ if (!(ioread32(psp->io_regs + PSP_FEATURE_REG) & 1)) {
++ dev_dbg(psp->dev, "device does not support SEV\n");
++ return 1;
++ }
++
++ return sev_misc_init(psp);
++}
++
+ int psp_dev_init(struct sp_device *sp)
+ {
+ struct device *dev = sp->dev;
+@@ -81,6 +363,10 @@ int psp_dev_init(struct sp_device *sp)
+ goto e_err;
+ }
+
++ ret = sev_init(psp);
++ if (ret)
++ goto e_irq;
++
+ if (sp->set_psp_master_device)
+ sp->set_psp_master_device(sp);
+
+@@ -89,6 +375,8 @@ int psp_dev_init(struct sp_device *sp)
+
+ return 0;
+
++e_irq:
++ sp_free_psp_irq(psp->sp, psp);
+ e_err:
+ sp->psp_data = NULL;
+
+@@ -101,5 +389,61 @@ void psp_dev_destroy(struct sp_device *sp)
+ {
+ struct psp_device *psp = sp->psp_data;
+
++ if (psp->sev_misc)
++ kref_put(&misc_dev->refcount, sev_exit);
++
+ sp_free_psp_irq(sp, psp);
+ }
++
++int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd,
++ void *data, int *error)
++{
++ if (!filep || filep->f_op != &sev_fops)
++ return -EBADF;
++
++ return sev_do_cmd(cmd, data, error);
++}
++EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
++
++void psp_pci_init(void)
++{
++ struct sev_user_data_status *status;
++ struct sp_device *sp;
++ int error, rc;
++
++ sp = sp_get_psp_master_device();
++ if (!sp)
++ return;
++
++ psp_master = sp->psp_data;
++
++ /* Initialize the platform */
++ rc = sev_platform_init(&error);
++ if (rc) {
++ dev_err(sp->dev, "SEV: failed to INIT error %#x\n", error);
++ goto err;
++ }
++
++ /* Display SEV firmware version */
++ status = &psp_master->status_cmd_buf;
++ rc = sev_platform_status(status, &error);
++ if (rc) {
++ dev_err(sp->dev, "SEV: failed to get status error %#x\n", error);
++ goto err;
++ }
++
++ dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major,
++ status->api_minor, status->build);
++ return;
++
++err:
++ psp_master = NULL;
++}
++
++void psp_pci_exit(void)
++{
++ if (!psp_master)
++ return;
++
++ sev_platform_shutdown(NULL);
++}
+diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
+index 55b7808..c81f0b1 100644
+--- a/drivers/crypto/ccp/psp-dev.h
++++ b/drivers/crypto/ccp/psp-dev.h
+@@ -25,9 +25,21 @@
+ #include <linux/interrupt.h>
+ #include <linux/irqreturn.h>
+ #include <linux/dmaengine.h>
++#include <linux/psp-sev.h>
++#include <linux/miscdevice.h>
+
+ #include "sp-dev.h"
+
++#define PSP_C2PMSG(_num) ((_num) << 2)
++#define PSP_CMDRESP PSP_C2PMSG(32)
++#define PSP_CMDBUFF_ADDR_LO PSP_C2PMSG(56)
++#define PSP_CMDBUFF_ADDR_HI PSP_C2PMSG(57)
++#define PSP_FEATURE_REG PSP_C2PMSG(63)
++
++#define PSP_P2CMSG(_num) ((_num) << 2)
++#define PSP_CMD_COMPLETE_REG 1
++#define PSP_CMD_COMPLETE PSP_P2CMSG(PSP_CMD_COMPLETE_REG)
++
+ #define PSP_P2CMSG_INTEN 0x0110
+ #define PSP_P2CMSG_INTSTS 0x0114
+
+@@ -44,6 +56,11 @@
+
+ #define MAX_PSP_NAME_LEN 16
+
++struct sev_misc_dev {
++ struct kref refcount;
++ struct miscdevice misc;
++};
++
+ struct psp_device {
+ struct list_head entry;
+
+@@ -54,6 +71,13 @@ struct psp_device {
+ struct sp_device *sp;
+
+ void __iomem *io_regs;
++
++ int sev_state;
++ unsigned int sev_int_rcvd;
++ wait_queue_head_t sev_int_queue;
++ struct sev_misc_dev *sev_misc;
++ struct sev_user_data_status status_cmd_buf;
++ struct sev_data_init init_cmd_buf;
+ };
+
+ #endif /* __PSP_DEV_H */
+diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
+index cf101c0..eb0da65 100644
+--- a/drivers/crypto/ccp/sp-dev.c
++++ b/drivers/crypto/ccp/sp-dev.c
+@@ -272,6 +272,10 @@ static int __init sp_mod_init(void)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++ psp_pci_init();
++#endif
++
+ return 0;
+ #endif
+
+@@ -291,6 +295,11 @@ static int __init sp_mod_init(void)
+ static void __exit sp_mod_exit(void)
+ {
+ #ifdef CONFIG_X86
++
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++ psp_pci_exit();
++#endif
++
+ sp_pci_exit();
+ #endif
+
+diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h
+index 909cf3e4..acb197b 100644
+--- a/drivers/crypto/ccp/sp-dev.h
++++ b/drivers/crypto/ccp/sp-dev.h
+@@ -143,12 +143,16 @@ static inline int ccp_dev_resume(struct sp_device *sp)
+ #ifdef CONFIG_CRYPTO_DEV_SP_PSP
+
+ int psp_dev_init(struct sp_device *sp);
++void psp_pci_init(void);
+ void psp_dev_destroy(struct sp_device *sp);
++void psp_pci_exit(void);
+
+ #else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+ static inline int psp_dev_init(struct sp_device *sp) { return 0; }
++static inline void psp_pci_init(void) { }
+ static inline void psp_dev_destroy(struct sp_device *sp) { }
++static inline void psp_pci_exit(void) { }
+
+ #endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+index 4a150d1..0b6dd30 100644
+--- a/include/linux/psp-sev.h
++++ b/include/linux/psp-sev.h
+@@ -462,4 +462,141 @@ struct sev_data_dbg {
+ u32 len; /* In */
+ } __packed;
+
++#ifdef CONFIG_CRYPTO_DEV_SP_PSP
++
++/**
++ * sev_platform_init - perform SEV INIT command
++ *
++ * @error: SEV command return code
++ *
++ * Returns:
++ * 0 if the SEV successfully processed the command
++ * -%ENODEV if the SEV device is not available
++ * -%ENOTSUPP if the SEV does not support SEV
++ * -%ETIMEDOUT if the SEV command timed out
++ * -%EIO if the SEV returned a non-zero return code
++ */
++int sev_platform_init(int *error);
++
++/**
++ * sev_platform_status - perform SEV PLATFORM_STATUS command
++ *
++ * @status: sev_user_data_status structure to be processed
++ * @error: SEV command return code
++ *
++ * Returns:
++ * 0 if the SEV successfully processed the command
++ * -%ENODEV if the SEV device is not available
++ * -%ENOTSUPP if the SEV does not support SEV
++ * -%ETIMEDOUT if the SEV command timed out
++ * -%EIO if the SEV returned a non-zero return code
++ */
++int sev_platform_status(struct sev_user_data_status *status, int *error);
++
++/**
++ * sev_issue_cmd_external_user - issue SEV command by other driver with a file
++ * handle.
++ *
++ * This function can be used by other drivers to issue a SEV command on
++ * behalf of userspace. The caller must pass a valid SEV file descriptor
++ * so that we know that it has access to SEV device.
++ *
++ * @filep - SEV device file pointer
++ * @cmd - command to issue
++ * @data - command buffer
++ * @error: SEV command return code
++ *
++ * Returns:
++ * 0 if the SEV successfully processed the command
++ * -%ENODEV if the SEV device is not available
++ * -%ENOTSUPP if the SEV does not support SEV
++ * -%ETIMEDOUT if the SEV command timed out
++ * -%EIO if the SEV returned a non-zero return code
++ * -%EINVAL if the SEV file descriptor is not valid
++ */
++int sev_issue_cmd_external_user(struct file *filep, unsigned int id,
++ void *data, int *error);
++
++/**
++ * sev_guest_deactivate - perform SEV DEACTIVATE command
++ *
++ * @deactivate: sev_data_deactivate structure to be processed
++ * @sev_ret: sev command return code
++ *
++ * Returns:
++ * 0 if the sev successfully processed the command
++ * -%ENODEV if the sev device is not available
++ * -%ENOTSUPP if the sev does not support SEV
++ * -%ETIMEDOUT if the sev command timed out
++ * -%EIO if the sev returned a non-zero return code
++ */
++int sev_guest_deactivate(struct sev_data_deactivate *data, int *error);
++
++/**
++ * sev_guest_activate - perform SEV ACTIVATE command
++ *
++ * @activate: sev_data_activate structure to be processed
++ * @sev_ret: sev command return code
++ *
++ * Returns:
++ * 0 if the sev successfully processed the command
++ * -%ENODEV if the sev device is not available
++ * -%ENOTSUPP if the sev does not support SEV
++ * -%ETIMEDOUT if the sev command timed out
++ * -%EIO if the sev returned a non-zero return code
++ */
++int sev_guest_activate(struct sev_data_activate *data, int *error);
++
++/**
++ * sev_guest_df_flush - perform SEV DF_FLUSH command
++ *
++ * @sev_ret: sev command return code
++ *
++ * Returns:
++ * 0 if the sev successfully processed the command
++ * -%ENODEV if the sev device is not available
++ * -%ENOTSUPP if the sev does not support SEV
++ * -%ETIMEDOUT if the sev command timed out
++ * -%EIO if the sev returned a non-zero return code
++ */
++int sev_guest_df_flush(int *error);
++
++/**
++ * sev_guest_decommission - perform SEV DECOMMISSION command
++ *
++ * @decommission: sev_data_decommission structure to be processed
++ * @sev_ret: sev command return code
++ *
++ * Returns:
++ * 0 if the sev successfully processed the command
++ * -%ENODEV if the sev device is not available
++ * -%ENOTSUPP if the sev does not support SEV
++ * -%ETIMEDOUT if the sev command timed out
++ * -%EIO if the sev returned a non-zero return code
++ */
++int sev_guest_decommission(struct sev_data_decommission *data, int *error);
++
++#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
++
++static inline int
++sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; }
++
++static inline int sev_platform_init(int *error) { return -ENODEV; }
++
++static inline int
++sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return -ENODEV; }
++
++static inline int
++sev_guest_decommission(struct sev_data_decommission *data, int *error) { return -ENODEV; }
++
++static inline int
++sev_guest_activate(struct sev_data_activate *data, int *error) { return -ENODEV; }
++
++static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
++
++static inline int
++sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; }
++
++#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
++
+ #endif /* __PSP_SEV_H__ */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0024-crypto-ccp-Define-SEV-userspace-ioctl-and-command-id.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0024-crypto-ccp-Define-SEV-userspace-ioctl-and-command-id.patch
new file mode 100644
index 00000000..95399268
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0024-crypto-ccp-Define-SEV-userspace-ioctl-and-command-id.patch
@@ -0,0 +1,181 @@
+From 90a5684ff341f0063e62171a8fbf5fb9baf28ce4 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:27 -0600
+Subject: [PATCH 24/95] crypto: ccp: Define SEV userspace ioctl and command id
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Add a include file which defines the ioctl and command id used for
+issuing SEV platform management specific commands.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/uapi/linux/psp-sev.h | 142 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 142 insertions(+)
+ create mode 100644 include/uapi/linux/psp-sev.h
+
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+new file mode 100644
+index 0000000..3d77fe9
+--- /dev/null
++++ b/include/uapi/linux/psp-sev.h
+@@ -0,0 +1,142 @@
++/*
++ * Userspace interface for AMD Secure Encrypted Virtualization (SEV)
++ * platform management commands.
++ *
++ * Copyright (C) 2016-2017 Advanced Micro Devices, Inc.
++ *
++ * Author: Brijesh Singh <brijesh.singh@amd.com>
++ *
++ * SEV spec 0.14 is available at:
++ * http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef __PSP_SEV_USER_H__
++#define __PSP_SEV_USER_H__
++
++#include <linux/types.h>
++
++/**
++ * SEV platform commands
++ */
++enum {
++ SEV_FACTORY_RESET = 0,
++ SEV_PLATFORM_STATUS,
++ SEV_PEK_GEN,
++ SEV_PEK_CSR,
++ SEV_PDH_GEN,
++ SEV_PDH_CERT_EXPORT,
++ SEV_PEK_CERT_IMPORT,
++
++ SEV_MAX,
++};
++
++/**
++ * SEV Firmware status code
++ */
++typedef enum {
++ SEV_RET_SUCCESS = 0,
++ SEV_RET_INVALID_PLATFORM_STATE,
++ SEV_RET_INVALID_GUEST_STATE,
++ SEV_RET_INAVLID_CONFIG,
++ SEV_RET_INVALID_len,
++ SEV_RET_ALREADY_OWNED,
++ SEV_RET_INVALID_CERTIFICATE,
++ SEV_RET_POLICY_FAILURE,
++ SEV_RET_INACTIVE,
++ SEV_RET_INVALID_ADDRESS,
++ SEV_RET_BAD_SIGNATURE,
++ SEV_RET_BAD_MEASUREMENT,
++ SEV_RET_ASID_OWNED,
++ SEV_RET_INVALID_ASID,
++ SEV_RET_WBINVD_REQUIRED,
++ SEV_RET_DFFLUSH_REQUIRED,
++ SEV_RET_INVALID_GUEST,
++ SEV_RET_INVALID_COMMAND,
++ SEV_RET_ACTIVE,
++ SEV_RET_HWSEV_RET_PLATFORM,
++ SEV_RET_HWSEV_RET_UNSAFE,
++ SEV_RET_UNSUPPORTED,
++ SEV_RET_MAX,
++} sev_ret_code;
++
++/**
++ * struct sev_user_data_status - PLATFORM_STATUS command parameters
++ *
++ * @major: major API version
++ * @minor: minor API version
++ * @state: platform state
++ * @flags: platform config flags
++ * @build: firmware build id for API version
++ * @guest_count: number of active guests
++ */
++struct sev_user_data_status {
++ __u8 api_major; /* Out */
++ __u8 api_minor; /* Out */
++ __u8 state; /* Out */
++ __u32 flags; /* Out */
++ __u8 build; /* Out */
++ __u32 guest_count; /* Out */
++} __packed;
++
++/**
++ * struct sev_user_data_pek_csr - PEK_CSR command parameters
++ *
++ * @address: PEK certificate chain
++ * @length: length of certificate
++ */
++struct sev_user_data_pek_csr {
++ __u64 address; /* In */
++ __u32 length; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_user_data_cert_import - PEK_CERT_IMPORT command parameters
++ *
++ * @pek_address: PEK certificate chain
++ * @pek_len: length of PEK certificate
++ * @oca_address: OCA certificate chain
++ * @oca_len: length of OCA certificate
++ */
++struct sev_user_data_pek_cert_import {
++ __u64 pek_cert_address; /* In */
++ __u32 pek_cert_len; /* In */
++ __u64 oca_cert_address; /* In */
++ __u32 oca_cert_len; /* In */
++} __packed;
++
++/**
++ * struct sev_user_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
++ *
++ * @pdh_address: PDH certificate address
++ * @pdh_len: length of PDH certificate
++ * @cert_chain_address: PDH certificate chain
++ * @cert_chain_len: length of PDH certificate chain
++ */
++struct sev_user_data_pdh_cert_export {
++ __u64 pdh_cert_address; /* In */
++ __u32 pdh_cert_len; /* In/Out */
++ __u64 cert_chain_address; /* In */
++ __u32 cert_chain_len; /* In/Out */
++} __packed;
++
++/**
++ * struct sev_issue_cmd - SEV ioctl parameters
++ *
++ * @cmd: SEV commands to execute
++ * @opaque: pointer to the command structure
++ * @error: SEV FW return code on failure
++ */
++struct sev_issue_cmd {
++ __u32 cmd; /* In */
++ __u64 data; /* In */
++ __u32 error; /* Out */
++} __packed;
++
++#define SEV_IOC_TYPE 'S'
++#define SEV_ISSUE_CMD _IOWR(SEV_IOC_TYPE, 0x0, struct sev_issue_cmd)
++
++#endif /* __PSP_USER_SEV_H */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0025-crypto-ccp-Implement-SEV_FACTORY_RESET-ioctl-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0025-crypto-ccp-Implement-SEV_FACTORY_RESET-ioctl-command.patch
new file mode 100644
index 00000000..8cb8fa2e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0025-crypto-ccp-Implement-SEV_FACTORY_RESET-ioctl-command.patch
@@ -0,0 +1,121 @@
+From 6a488d9cf6428aaf117d57413c019f3e073914eb Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:29 -0600
+Subject: [PATCH 25/95] crypto: ccp: Implement SEV_FACTORY_RESET ioctl command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_FACTORY_RESET command can be used by the platform owner to
+reset the non-volatile SEV related data. The command is defined in
+SEV spec section 5.4
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 77 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 76 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index 9915a6c..b49583a4 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -232,9 +232,84 @@ static int sev_platform_shutdown(int *error)
+ return rc;
+ }
+
++static int sev_get_platform_state(int *state, int *error)
++{
++ int rc;
++
++ rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS,
++ &psp_master->status_cmd_buf, error);
++ if (rc)
++ return rc;
++
++ *state = psp_master->status_cmd_buf.state;
++ return rc;
++}
++
++static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
++{
++ int state, rc;
++
++ /*
++ * The SEV spec requires that FACTORY_RESET must be issued in
++ * UNINIT state. Before we go further lets check if any guest is
++ * active.
++ *
++ * If FW is in WORKING state then deny the request otherwise issue
++ * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET.
++ *
++ */
++ rc = sev_get_platform_state(&state, &argp->error);
++ if (rc)
++ return rc;
++
++ if (state == SEV_STATE_WORKING)
++ return -EBUSY;
++
++ if (state == SEV_STATE_INIT) {
++ rc = __sev_platform_shutdown_locked(&argp->error);
++ if (rc)
++ return rc;
++ }
++
++ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+- return -ENOTTY;
++ void __user *argp = (void __user *)arg;
++ struct sev_issue_cmd input;
++ int ret = -EFAULT;
++
++ if (!psp_master)
++ return -ENODEV;
++
++ if (ioctl != SEV_ISSUE_CMD)
++ return -EINVAL;
++
++ if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd)))
++ return -EFAULT;
++
++ if (input.cmd > SEV_MAX)
++ return -EINVAL;
++
++ mutex_lock(&sev_cmd_mutex);
++
++ switch (input.cmd) {
++
++ case SEV_FACTORY_RESET:
++ ret = sev_ioctl_do_reset(&input);
++ break;
++ default:
++ ret = -EINVAL;
++ goto out;
++ }
++
++ if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd)))
++ ret = -EFAULT;
++out:
++ mutex_unlock(&sev_cmd_mutex);
++
++ return ret;
+ }
+
+ static const struct file_operations sev_fops = {
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0026-crypto-ccp-Implement-SEV_PLATFORM_STATUS-ioctl-comma.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0026-crypto-ccp-Implement-SEV_PLATFORM_STATUS-ioctl-comma.patch
new file mode 100644
index 00000000..1c502b0d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0026-crypto-ccp-Implement-SEV_PLATFORM_STATUS-ioctl-comma.patch
@@ -0,0 +1,70 @@
+From 17db835d27c0176abe8fa32ca12522693514906c Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:29 -0600
+Subject: [PATCH 26/95] crypto: ccp: Implement SEV_PLATFORM_STATUS ioctl
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PLATFORM_STATUS command can be used by the platform owner to
+get the current status of the platform. The command is defined in
+SEV spec section 5.5.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index b49583a4..a5072b1 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -274,6 +274,21 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
+ }
+
++static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
++{
++ struct sev_user_data_status *data = &psp_master->status_cmd_buf;
++ int ret;
++
++ ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, data, &argp->error);
++ if (ret)
++ return ret;
++
++ if (copy_to_user((void __user *)argp->data, data, sizeof(*data)))
++ ret = -EFAULT;
++
++ return ret;
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+ void __user *argp = (void __user *)arg;
+@@ -299,6 +314,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_FACTORY_RESET:
+ ret = sev_ioctl_do_reset(&input);
+ break;
++ case SEV_PLATFORM_STATUS:
++ ret = sev_ioctl_do_platform_status(&input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0027-crypto-ccp-Implement-SEV_PEK_GEN-ioctl-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0027-crypto-ccp-Implement-SEV_PEK_GEN-ioctl-command.patch
new file mode 100644
index 00000000..3e076194
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0027-crypto-ccp-Implement-SEV_PEK_GEN-ioctl-command.patch
@@ -0,0 +1,66 @@
+From 3806307056dfc8add8b8c8420db1e3c6be328267 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:30 -0600
+Subject: [PATCH 27/95] crypto: ccp: Implement SEV_PEK_GEN ioctl command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PEK_GEN command is used to generate a new Platform Endorsement
+Key (PEK). The command is defined in SEV spec section 5.6.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index a5072b1..8aa8036 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -289,6 +289,19 @@ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+ return ret;
+ }
+
++static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
++{
++ int rc;
++
++ if (psp_master->sev_state == SEV_STATE_UNINIT) {
++ rc = __sev_platform_init_locked(&argp->error);
++ if (rc)
++ return rc;
++ }
++
++ return __sev_do_cmd_locked(cmd, 0, &argp->error);
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+ void __user *argp = (void __user *)arg;
+@@ -317,6 +330,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PLATFORM_STATUS:
+ ret = sev_ioctl_do_platform_status(&input);
+ break;
++ case SEV_PEK_GEN:
++ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0028-crypto-ccp-Implement-SEV_PDH_GEN-ioctl-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0028-crypto-ccp-Implement-SEV_PDH_GEN-ioctl-command.patch
new file mode 100644
index 00000000..0c5d701f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0028-crypto-ccp-Implement-SEV_PDH_GEN-ioctl-command.patch
@@ -0,0 +1,46 @@
+From 043a8e43f8ae090a389b9cea62702643a6e07281 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:30 -0600
+Subject: [PATCH 28/95] crypto: ccp: Implement SEV_PDH_GEN ioctl command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PDH_GEN command is used to re-generate the Platform
+Diffie-Hellman (PDH) key. The command is defined in SEV spec section
+5.6.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index 8aa8036..fd3daf0 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -333,6 +333,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PEK_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input);
+ break;
++ case SEV_PDH_GEN:
++ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0029-crypto-ccp-Implement-SEV_PEK_CSR-ioctl-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0029-crypto-ccp-Implement-SEV_PEK_CSR-ioctl-command.patch
new file mode 100644
index 00000000..7b01289f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0029-crypto-ccp-Implement-SEV_PEK_CSR-ioctl-command.patch
@@ -0,0 +1,115 @@
+From c4934b87a53c572e16af9c87e6de0fc740f49f4f Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:31 -0600
+Subject: [PATCH 29/95] crypto: ccp: Implement SEV_PEK_CSR ioctl command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PEK_CSR command can be used to generate a PEK certificate
+signing request. The command is defined in SEV spec section 5.7.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 66 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 66 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index fd3daf0..c3906bb 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -302,6 +302,69 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+ return __sev_do_cmd_locked(cmd, 0, &argp->error);
+ }
+
++static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
++{
++ struct sev_user_data_pek_csr input;
++ struct sev_data_pek_csr *data;
++ void *blob = NULL;
++ int ret;
++
++ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
++ return -EFAULT;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ /* userspace wants to query CSR length */
++ if (!input.address || !input.length)
++ goto cmd;
++
++ /* allocate a physically contiguous buffer to store the CSR blob */
++ if (!access_ok(VERIFY_WRITE, input.address, input.length) ||
++ input.length > SEV_FW_BLOB_MAX_SIZE) {
++ ret = -EFAULT;
++ goto e_free;
++ }
++
++ blob = kmalloc(input.length, GFP_KERNEL);
++ if (!blob) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ data->address = __psp_pa(blob);
++ data->len = input.length;
++
++cmd:
++ if (psp_master->sev_state == SEV_STATE_UNINIT) {
++ ret = __sev_platform_init_locked(&argp->error);
++ if (ret)
++ goto e_free_blob;
++ }
++
++ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, data, &argp->error);
++
++ /* If we query the CSR length, FW responded with expected data. */
++ input.length = data->len;
++
++ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
++ ret = -EFAULT;
++ goto e_free_blob;
++ }
++
++ if (blob) {
++ if (copy_to_user((void __user *)input.address, blob, input.length))
++ ret = -EFAULT;
++ }
++
++e_free_blob:
++ kfree(blob);
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+ void __user *argp = (void __user *)arg;
+@@ -336,6 +399,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PDH_GEN:
+ ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input);
+ break;
++ case SEV_PEK_CSR:
++ ret = sev_ioctl_do_pek_csr(&input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0030-crypto-ccp-Implement-SEV_PEK_CERT_IMPORT-ioctl-comma.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0030-crypto-ccp-Implement-SEV_PEK_CERT_IMPORT-ioctl-comma.patch
new file mode 100644
index 00000000..f053c273
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0030-crypto-ccp-Implement-SEV_PEK_CERT_IMPORT-ioctl-comma.patch
@@ -0,0 +1,155 @@
+From 9dfdbc1c0e75abf8130122c6d0661d40c884aa42 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:31 -0600
+Subject: [PATCH 30/95] crypto: ccp: Implement SEV_PEK_CERT_IMPORT ioctl
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PEK_CERT_IMPORT command can be used to import the signed PEK
+certificate. The command is defined in SEV spec section 5.8.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 81 ++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/psp-sev.h | 4 +++
+ 2 files changed, 85 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index c3906bb..9d1c460 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -365,6 +365,84 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+ return ret;
+ }
+
++void *psp_copy_user_blob(u64 __user uaddr, u32 len)
++{
++ void *data;
++
++ if (!uaddr || !len)
++ return ERR_PTR(-EINVAL);
++
++ /* verify that blob length does not exceed our limit */
++ if (len > SEV_FW_BLOB_MAX_SIZE)
++ return ERR_PTR(-EINVAL);
++
++ data = kmalloc(len, GFP_KERNEL);
++ if (!data)
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
++ goto e_free;
++
++ return data;
++
++e_free:
++ kfree(data);
++ return ERR_PTR(-EFAULT);
++}
++EXPORT_SYMBOL_GPL(psp_copy_user_blob);
++
++static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
++{
++ struct sev_user_data_pek_cert_import input;
++ struct sev_data_pek_cert_import *data;
++ void *pek_blob, *oca_blob;
++ int ret;
++
++ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
++ return -EFAULT;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ /* copy PEK certificate blobs from userspace */
++ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len);
++ if (IS_ERR(pek_blob)) {
++ ret = PTR_ERR(pek_blob);
++ goto e_free;
++ }
++
++ data->pek_cert_address = __psp_pa(pek_blob);
++ data->pek_cert_len = input.pek_cert_len;
++
++ /* copy PEK certificate blobs from userspace */
++ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len);
++ if (IS_ERR(oca_blob)) {
++ ret = PTR_ERR(oca_blob);
++ goto e_free_pek;
++ }
++
++ data->oca_cert_address = __psp_pa(oca_blob);
++ data->oca_cert_len = input.oca_cert_len;
++
++ /* If platform is not in INIT state then transition it to INIT */
++ if (psp_master->sev_state != SEV_STATE_INIT) {
++ ret = __sev_platform_init_locked(&argp->error);
++ if (ret)
++ goto e_free_oca;
++ }
++
++ ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, data, &argp->error);
++
++e_free_oca:
++ kfree(oca_blob);
++e_free_pek:
++ kfree(pek_blob);
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+ void __user *argp = (void __user *)arg;
+@@ -402,6 +480,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PEK_CSR:
+ ret = sev_ioctl_do_pek_csr(&input);
+ break;
++ case SEV_PEK_CERT_IMPORT:
++ ret = sev_ioctl_do_pek_import(&input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+index 0b6dd30..93addfa 100644
+--- a/include/linux/psp-sev.h
++++ b/include/linux/psp-sev.h
+@@ -576,6 +576,8 @@ int sev_guest_df_flush(int *error);
+ */
+ int sev_guest_decommission(struct sev_data_decommission *data, int *error);
+
++void *psp_copy_user_blob(u64 __user uaddr, u32 len);
++
+ #else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+ static inline int
+@@ -597,6 +599,8 @@ static inline int sev_guest_df_flush(int *error) { return -ENODEV; }
+ static inline int
+ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int *error) { return -ENODEV; }
+
++static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); }
++
+ #endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+
+ #endif /* __PSP_SEV_H__ */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0031-crypto-ccp-Implement-SEV_PDH_CERT_EXPORT-ioctl-comma.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0031-crypto-ccp-Implement-SEV_PDH_CERT_EXPORT-ioctl-comma.patch
new file mode 100644
index 00000000..091165e4
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0031-crypto-ccp-Implement-SEV_PDH_CERT_EXPORT-ioctl-comma.patch
@@ -0,0 +1,147 @@
+From 179b8d37cbd2fee5cc2ec40c4d233ebea55e762a Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:31 -0600
+Subject: [PATCH 31/95] crypto: ccp: Implement SEV_PDH_CERT_EXPORT ioctl
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV_PDH_CERT_EXPORT command can be used to export the PDH and its
+certificate chain. The command is defined in SEV spec section 5.10.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-crypto@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 97 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 97 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index 9d1c460..fcfa5b1 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -443,6 +443,100 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+ return ret;
+ }
+
++static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
++{
++ struct sev_user_data_pdh_cert_export input;
++ void *pdh_blob = NULL, *cert_blob = NULL;
++ struct sev_data_pdh_cert_export *data;
++ int ret;
++
++ if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
++ return -EFAULT;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ /* Userspace wants to query the certificate length. */
++ if (!input.pdh_cert_address ||
++ !input.pdh_cert_len ||
++ !input.cert_chain_address)
++ goto cmd;
++
++ /* Allocate a physically contiguous buffer to store the PDH blob. */
++ if ((input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) ||
++ !access_ok(VERIFY_WRITE, input.pdh_cert_address, input.pdh_cert_len)) {
++ ret = -EFAULT;
++ goto e_free;
++ }
++
++ /* Allocate a physically contiguous buffer to store the cert chain blob. */
++ if ((input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) ||
++ !access_ok(VERIFY_WRITE, input.cert_chain_address, input.cert_chain_len)) {
++ ret = -EFAULT;
++ goto e_free;
++ }
++
++ pdh_blob = kmalloc(input.pdh_cert_len, GFP_KERNEL);
++ if (!pdh_blob) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ data->pdh_cert_address = __psp_pa(pdh_blob);
++ data->pdh_cert_len = input.pdh_cert_len;
++
++ cert_blob = kmalloc(input.cert_chain_len, GFP_KERNEL);
++ if (!cert_blob) {
++ ret = -ENOMEM;
++ goto e_free_pdh;
++ }
++
++ data->cert_chain_address = __psp_pa(cert_blob);
++ data->cert_chain_len = input.cert_chain_len;
++
++cmd:
++ /* If platform is not in INIT state then transition it to INIT. */
++ if (psp_master->sev_state != SEV_STATE_INIT) {
++ ret = __sev_platform_init_locked(&argp->error);
++ if (ret)
++ goto e_free_cert;
++ }
++
++ ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, data, &argp->error);
++
++ /* If we query the length, FW responded with expected data. */
++ input.cert_chain_len = data->cert_chain_len;
++ input.pdh_cert_len = data->pdh_cert_len;
++
++ if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) {
++ ret = -EFAULT;
++ goto e_free_cert;
++ }
++
++ if (pdh_blob) {
++ if (copy_to_user((void __user *)input.pdh_cert_address,
++ pdh_blob, input.pdh_cert_len)) {
++ ret = -EFAULT;
++ goto e_free_cert;
++ }
++ }
++
++ if (cert_blob) {
++ if (copy_to_user((void __user *)input.cert_chain_address,
++ cert_blob, input.cert_chain_len))
++ ret = -EFAULT;
++ }
++
++e_free_cert:
++ kfree(cert_blob);
++e_free_pdh:
++ kfree(pdh_blob);
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ {
+ void __user *argp = (void __user *)arg;
+@@ -483,6 +577,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PEK_CERT_IMPORT:
+ ret = sev_ioctl_do_pek_import(&input);
+ break;
++ case SEV_PDH_CERT_EXPORT:
++ ret = sev_ioctl_do_pdh_export(&input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0032-crypto-drivers-remove-duplicate-includes.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0032-crypto-drivers-remove-duplicate-includes.patch
new file mode 100644
index 00000000..7dd23fb9
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0032-crypto-drivers-remove-duplicate-includes.patch
@@ -0,0 +1,30 @@
+From cbc0b0be1854cd3f49ccdc438a9d6e51b6c576cf Mon Sep 17 00:00:00 2001
+From: Pravin Shedge <pravin.shedge4linux@gmail.com>
+Date: Tue, 5 Dec 2017 07:27:22 +0530
+Subject: [PATCH 32/95] crypto: drivers - remove duplicate includes
+
+These duplicate includes have been found with scripts/checkincludes.pl but
+they have been removed manually to avoid removing false positives.
+
+Signed-off-by: Pravin Shedge <pravin.shedge4linux@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-crypto-aes-galois.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+index ff02b71..ca1f0d7 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -21,7 +21,6 @@
+ #include <crypto/ctr.h>
+ #include <crypto/gcm.h>
+ #include <crypto/scatterwalk.h>
+-#include <linux/delay.h>
+
+ #include "ccp-crypto.h"
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0033-crypto-ccp-Make-function-ccp_get_dma_chan_attr-stati.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0033-crypto-ccp-Make-function-ccp_get_dma_chan_attr-stati.patch
new file mode 100644
index 00000000..0a53a202
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0033-crypto-ccp-Make-function-ccp_get_dma_chan_attr-stati.patch
@@ -0,0 +1,36 @@
+From 788a7b92dc824b406d629db2c2da6f414c636c51 Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Tue, 6 Feb 2018 23:20:01 +0000
+Subject: [PATCH 33/95] crypto: ccp - Make function ccp_get_dma_chan_attr
+ static
+
+Function ccp_get_dma_chan_attr is local to the source and does not
+need to be in global scope, so make it static.
+
+Cleans up sparse warning:
+drivers/crypto/ccp/ccp-dmaengine.c:41:14: warning: symbol
+'ccp_get_dma_chan_attr' was not declared. Should it be static?
+
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-dmaengine.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index 8b9da58..67155cb 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -38,7 +38,7 @@ static unsigned int dma_chan_attr = CCP_DMA_DFLT;
+ module_param(dma_chan_attr, uint, 0444);
+ MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
+
+-unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
++static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
+ {
+ switch (dma_chan_attr) {
+ case CCP_DMA_DFLT:
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0034-crypto-ccp-add-check-to-get-PSP-master-only-when-PSP.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0034-crypto-ccp-add-check-to-get-PSP-master-only-when-PSP.patch
new file mode 100644
index 00000000..bdb37f1c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0034-crypto-ccp-add-check-to-get-PSP-master-only-when-PSP.patch
@@ -0,0 +1,72 @@
+From 45bca3e63fe71fb04abfbf0d6a1416f79e0acf01 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Wed, 21 Feb 2018 08:41:39 -0600
+Subject: [PATCH 34/95] crypto: ccp - add check to get PSP master only when PSP
+ is detected
+
+Paulian reported the below kernel crash on Ryzen 5 system:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000073
+RIP: 0010:.LC0+0x41f/0xa00
+RSP: 0018:ffffa9968003bdd0 EFLAGS: 00010002
+RAX: ffffffffb113b130 RBX: 0000000000000000 RCX: 00000000000005a7
+RDX: 00000000000000ff RSI: ffff8b46dee651a0 RDI: ffffffffb1bd617c
+RBP: 0000000000000246 R08: 00000000000251a0 R09: 0000000000000000
+R10: ffffd81f11a38200 R11: ffff8b52e8e0a161 R12: ffffffffb19db220
+R13: 0000000000000007 R14: ffffffffb17e4888 R15: 5dccd7affc30a31e
+FS: 0000000000000000(0000) GS:ffff8b46dee40000(0000) knlGS:0000000000000000
+CR2: 0000000000000073 CR3: 000080128120a000 CR4: 00000000003406e0
+Call Trace:
+ ? sp_get_psp_master_device+0x56/0x80
+ ? map_properties+0x540/0x540
+ ? psp_pci_init+0x20/0xe0
+ ? map_properties+0x540/0x540
+ ? sp_mod_init+0x16/0x1a
+ ? do_one_initcall+0x4b/0x190
+ ? kernel_init_freeable+0x19b/0x23c
+ ? rest_init+0xb0/0xb0
+ ? kernel_init+0xa/0x100
+ ? ret_from_fork+0x22/0x40
+
+Since Ryzen does not support PSP/SEV firmware hence i->psp_data will
+NULL in all sp instances. In those cases, 'i' will point to the
+list head after list_for_each_entry(). Dereferencing the head will
+cause kernel crash.
+
+Add check to call get master device only when PSP/SEV is detected.
+
+Reported-by: Paulian Bogdan Marinca <paulian@marinca.net>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+CC: Gary R Hook <gary.hook@amd.com>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/sp-dev.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
+index eb0da65..e045900 100644
+--- a/drivers/crypto/ccp/sp-dev.c
++++ b/drivers/crypto/ccp/sp-dev.c
+@@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
+ goto unlock;
+
+ list_for_each_entry(i, &sp_units, entry) {
+- if (i->psp_data)
++ if (i->psp_data && i->get_psp_master_device) {
++ ret = i->get_psp_master_device();
+ break;
++ }
+ }
+
+- if (i->get_psp_master_device)
+- ret = i->get_psp_master_device();
+ unlock:
+ write_unlock_irqrestore(&sp_unit_lock, flags);
+ return ret;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0035-crypto-ccp-Fix-sparse-use-plain-integer-as-NULL-poin.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0035-crypto-ccp-Fix-sparse-use-plain-integer-as-NULL-poin.patch
new file mode 100644
index 00000000..320b7709
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0035-crypto-ccp-Fix-sparse-use-plain-integer-as-NULL-poin.patch
@@ -0,0 +1,65 @@
+From 8ac2dab92c8b700b785d14cbfd95dfb811894b9f Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Thu, 15 Feb 2018 13:34:44 -0600
+Subject: [PATCH 35/95] crypto: ccp - Fix sparse, use plain integer as NULL
+ pointer
+
+Fix sparse warning: Using plain integer as NULL pointer. Replaces
+assignment of 0 to pointer with NULL assignment.
+
+Fixes: 200664d5237f (Add Secure Encrypted Virtualization ...)
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Gary Hook <gary.hook@amd.com>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index fcfa5b1..b3afb6c 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -211,7 +211,7 @@ static int __sev_platform_shutdown_locked(int *error)
+ {
+ int ret;
+
+- ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, 0, error);
++ ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error);
+ if (ret)
+ return ret;
+
+@@ -271,7 +271,7 @@ static int sev_ioctl_do_reset(struct sev_issue_cmd *argp)
+ return rc;
+ }
+
+- return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, 0, &argp->error);
++ return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error);
+ }
+
+ static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp)
+@@ -299,7 +299,7 @@ static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp)
+ return rc;
+ }
+
+- return __sev_do_cmd_locked(cmd, 0, &argp->error);
++ return __sev_do_cmd_locked(cmd, NULL, &argp->error);
+ }
+
+ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+@@ -624,7 +624,7 @@ EXPORT_SYMBOL_GPL(sev_guest_decommission);
+
+ int sev_guest_df_flush(int *error)
+ {
+- return sev_do_cmd(SEV_CMD_DF_FLUSH, 0, error);
++ return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error);
+ }
+ EXPORT_SYMBOL_GPL(sev_guest_df_flush);
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0036-crypto-ccp-Fill-the-result-buffer-only-on-digest-fin.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0036-crypto-ccp-Fill-the-result-buffer-only-on-digest-fin.patch
new file mode 100644
index 00000000..c3d83c3f
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0036-crypto-ccp-Fill-the-result-buffer-only-on-digest-fin.patch
@@ -0,0 +1,49 @@
+From 3490cd0f4cfce7f49666ecd18a5cebeed8922854 Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Wed, 7 Mar 2018 11:37:42 -0600
+Subject: [PATCH 36/95] crypto: ccp - Fill the result buffer only on digest,
+ finup, and final ops
+
+Any change to the result buffer should only happen on final, finup
+and digest operations. Changes to the buffer for update, import, export,
+etc, are not allowed.
+
+Fixes: 66d7b9f6175e ("crypto: testmgr - test misuse of result in ahash")
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-crypto-aes-cmac.c | 2 +-
+ drivers/crypto/ccp/ccp-crypto-sha.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+index 60fc0fa..26687f31 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
+@@ -46,7 +46,7 @@ static int ccp_aes_cmac_complete(struct crypto_async_request *async_req,
+ }
+
+ /* Update result area if supplied */
+- if (req->result)
++ if (req->result && rctx->final)
+ memcpy(req->result, rctx->iv, digest_size);
+
+ e_free:
+diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
+index 8b9b16d..871c962 100644
+--- a/drivers/crypto/ccp/ccp-crypto-sha.c
++++ b/drivers/crypto/ccp/ccp-crypto-sha.c
+@@ -47,7 +47,7 @@ static int ccp_sha_complete(struct crypto_async_request *async_req, int ret)
+ }
+
+ /* Update result area if supplied */
+- if (req->result)
++ if (req->result && rctx->final)
+ memcpy(req->result, rctx->ctx, digest_size);
+
+ e_free:
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0037-crypto-ccp-Use-memdup_user-rather-than-duplicating-i.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0037-crypto-ccp-Use-memdup_user-rather-than-duplicating-i.patch
new file mode 100644
index 00000000..e3db1f08
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0037-crypto-ccp-Use-memdup_user-rather-than-duplicating-i.patch
@@ -0,0 +1,56 @@
+From c1255479db311904c69b38aaa83c8c1de96b29c9 Mon Sep 17 00:00:00 2001
+From: Markus Elfring <elfring@users.sourceforge.net>
+Date: Mon, 5 Mar 2018 13:50:13 +0100
+Subject: [PATCH 37/95] crypto: ccp - Use memdup_user() rather than duplicating
+ its implementation
+
+Reuse existing functionality from memdup_user() instead of keeping
+duplicate source code.
+
+This issue was detected by using the Coccinelle software.
+
+Signed-off-by: Markus Elfring <elfring@users.sourceforge.net>
+Reviewed-by: Brijesh Singh <brijesh.singh@amd.com>
+Acked-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 15 +--------------
+ 1 file changed, 1 insertion(+), 14 deletions(-)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index b3afb6c..d95ec52 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -367,8 +367,6 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp)
+
+ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+ {
+- void *data;
+-
+ if (!uaddr || !len)
+ return ERR_PTR(-EINVAL);
+
+@@ -376,18 +374,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+ if (len > SEV_FW_BLOB_MAX_SIZE)
+ return ERR_PTR(-EINVAL);
+
+- data = kmalloc(len, GFP_KERNEL);
+- if (!data)
+- return ERR_PTR(-ENOMEM);
+-
+- if (copy_from_user(data, (void __user *)(uintptr_t)uaddr, len))
+- goto e_free;
+-
+- return data;
+-
+-e_free:
+- kfree(data);
+- return ERR_PTR(-EFAULT);
++ return memdup_user((void __user *)(uintptr_t)uaddr, len);
+ }
+ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0038-crypto-ccp-Validate-buffer-lengths-for-copy-operatio.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0038-crypto-ccp-Validate-buffer-lengths-for-copy-operatio.patch
new file mode 100644
index 00000000..ef3fa16e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0038-crypto-ccp-Validate-buffer-lengths-for-copy-operatio.patch
@@ -0,0 +1,265 @@
+From 3991cd925a72592063dda6df1c302c7ed11c90cb Mon Sep 17 00:00:00 2001
+From: Gary R Hook <gary.hook@amd.com>
+Date: Wed, 7 Mar 2018 11:31:14 -0600
+Subject: [PATCH 38/95] crypto: ccp - Validate buffer lengths for copy
+ operations
+
+The CCP driver copies data between scatter/gather lists and DMA buffers.
+The length of the requested copy operation must be checked against
+the available destination buffer length.
+
+Reported-by: Maciej S. Szmigiero <mail@maciej.szmigiero.name>
+Signed-off-by: Gary R Hook <gary.hook@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/ccp-ops.c | 108 +++++++++++++++++++++++++++++++------------
+ 1 file changed, 78 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 406b9532..0ea43cd 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -178,14 +178,18 @@ static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
+ return 0;
+ }
+
+-static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+- struct scatterlist *sg, unsigned int sg_offset,
+- unsigned int len)
++static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
++ struct scatterlist *sg, unsigned int sg_offset,
++ unsigned int len)
+ {
+ WARN_ON(!wa->address);
+
++ if (len > (wa->length - wa_offset))
++ return -EINVAL;
++
+ scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
+ 0);
++ return 0;
+ }
+
+ static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
+@@ -205,8 +209,11 @@ static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
+ unsigned int len)
+ {
+ u8 *p, *q;
++ int rc;
+
+- ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++ rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len);
++ if (rc)
++ return rc;
+
+ p = wa->address + wa_offset;
+ q = p + len - 1;
+@@ -509,7 +516,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -528,7 +537,9 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+ goto e_key;
+
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -556,8 +567,10 @@ static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+ goto e_src;
+ }
+
+- ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
+- aes->cmac_key_len);
++ ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
++ aes->cmac_key_len);
++ if (ret)
++ goto e_src;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -666,7 +679,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -685,7 +700,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ goto e_key;
+
+ dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -777,7 +794,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ goto e_dst;
+ }
+
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_dst;
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -820,7 +839,9 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+- ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ if (ret)
++ goto e_tag;
+
+ ret = memcmp(tag.address, final_wa.address, AES_BLOCK_SIZE);
+ ccp_dm_free(&tag);
+@@ -914,7 +935,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ return ret;
+
+ dm_offset = CCP_SB_BYTES - aes->key_len;
+- ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
++ if (ret)
++ goto e_key;
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -935,7 +958,9 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ if (aes->mode != CCP_AES_MODE_ECB) {
+ /* Load the AES context - convert to LE */
+ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE;
+- ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+@@ -1113,8 +1138,12 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ * big endian to little endian.
+ */
+ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+- ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++ ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++ if (ret)
++ goto e_key;
+ } else {
+ /* Version 5 CCPs use a 512-bit space for the key: each portion
+ * occupies 256 bits, or one entire slot, and is zero-padded.
+@@ -1123,9 +1152,13 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+
+ dm_offset = CCP_SB_BYTES;
+ pad = dm_offset - xts->key_len;
+- ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
+- ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
+- xts->key_len);
++ ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key,
++ xts->key_len, xts->key_len);
++ if (ret)
++ goto e_key;
+ }
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+@@ -1144,7 +1177,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ if (ret)
+ goto e_key;
+
+- ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++ ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
++ if (ret)
++ goto e_ctx;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+@@ -1287,12 +1322,18 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */
+
+ len_singlekey = des3->key_len / 3;
+- ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
+- des3->key, 0, len_singlekey);
+- ccp_set_dm_area(&key, dm_offset + len_singlekey,
+- des3->key, len_singlekey, len_singlekey);
+- ccp_set_dm_area(&key, dm_offset,
+- des3->key, 2 * len_singlekey, len_singlekey);
++ ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey,
++ des3->key, 0, len_singlekey);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset + len_singlekey,
++ des3->key, len_singlekey, len_singlekey);
++ if (ret)
++ goto e_key;
++ ret = ccp_set_dm_area(&key, dm_offset,
++ des3->key, 2 * len_singlekey, len_singlekey);
++ if (ret)
++ goto e_key;
+
+ /* Copy the key to the SB */
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+@@ -1320,7 +1361,10 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+
+ /* Load the context into the LSB */
+ dm_offset = CCP_SB_BYTES - des3->iv_len;
+- ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len);
++ ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0,
++ des3->iv_len);
++ if (ret)
++ goto e_ctx;
+
+ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+ load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+@@ -1604,8 +1648,10 @@ static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ }
+ } else {
+ /* Restore the context */
+- ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
+- sb_count * CCP_SB_BYTES);
++ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0,
++ sb_count * CCP_SB_BYTES);
++ if (ret)
++ goto e_ctx;
+ }
+
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+@@ -1927,7 +1973,9 @@ static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
+ if (ret)
+ return ret;
+
+- ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++ ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
++ if (ret)
++ goto e_mask;
+ ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_NOOP);
+ if (ret) {
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0039-crypto-ccp-Add-DOWNLOAD_FIRMWARE-SEV-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0039-crypto-ccp-Add-DOWNLOAD_FIRMWARE-SEV-command.patch
new file mode 100644
index 00000000..73e46ca2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0039-crypto-ccp-Add-DOWNLOAD_FIRMWARE-SEV-command.patch
@@ -0,0 +1,223 @@
+From 1856c4c431831ab97dce791f992bb70765521965 Mon Sep 17 00:00:00 2001
+From: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Date: Fri, 25 May 2018 15:23:29 -0500
+Subject: [PATCH 39/95] crypto: ccp - Add DOWNLOAD_FIRMWARE SEV command
+
+The DOWNLOAD_FIRMWARE command, added as of SEV API v0.15, allows the OS
+to install SEV firmware newer than the currently active SEV firmware.
+
+For the new SEV firmware to be applied it must:
+* Pass the validation test performed by the existing firmware.
+* Be of the same build or a newer build compared to the existing firmware.
+
+For more information please refer to "Section 5.11 DOWNLOAD_FIRMWARE" of
+https://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+
+Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 99 +++++++++++++++++++++++++++++++++++++++-----
+ drivers/crypto/ccp/psp-dev.h | 4 ++
+ include/linux/psp-sev.h | 12 ++++++
+ 3 files changed, 105 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index d95ec52..12838b4 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -22,11 +22,17 @@
+ #include <linux/delay.h>
+ #include <linux/hw_random.h>
+ #include <linux/ccp.h>
++#include <linux/firmware.h>
+
+ #include "sp-dev.h"
+ #include "psp-dev.h"
+
++#define SEV_VERSION_GREATER_OR_EQUAL(_maj, _min) \
++ ((psp_master->api_major) >= _maj && \
++ (psp_master->api_minor) >= _min)
++
+ #define DEVICE_NAME "sev"
++#define SEV_FW_FILE "amd/sev.fw"
+
+ static DEFINE_MUTEX(sev_cmd_mutex);
+ static struct sev_misc_dev *misc_dev;
+@@ -112,6 +118,7 @@ static int sev_cmd_buffer_len(int cmd)
+ case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data);
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
++ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
+ default: return 0;
+ }
+
+@@ -378,6 +385,79 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+ }
+ EXPORT_SYMBOL_GPL(psp_copy_user_blob);
+
++static int sev_get_api_version(void)
++{
++ struct sev_user_data_status *status;
++ int error, ret;
++
++ status = &psp_master->status_cmd_buf;
++ ret = sev_platform_status(status, &error);
++ if (ret) {
++ dev_err(psp_master->dev,
++ "SEV: failed to get status. Error: %#x\n", error);
++ return 1;
++ }
++
++ psp_master->api_major = status->api_major;
++ psp_master->api_minor = status->api_minor;
++ psp_master->build = status->build;
++
++ return 0;
++}
++
++/* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */
++static int sev_update_firmware(struct device *dev)
++{
++ struct sev_data_download_firmware *data;
++ const struct firmware *firmware;
++ int ret, error, order;
++ struct page *p;
++ u64 data_size;
++
++ ret = request_firmware(&firmware, SEV_FW_FILE, dev);
++ if (ret < 0)
++ return -1;
++
++ /*
++ * SEV FW expects the physical address given to it to be 32
++ * byte aligned. Memory allocated has structure placed at the
++ * beginning followed by the firmware being passed to the SEV
++ * FW. Allocate enough memory for data structure + alignment
++ * padding + SEV FW.
++ */
++ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32);
++
++ order = get_order(firmware->size + data_size);
++ p = alloc_pages(GFP_KERNEL, order);
++ if (!p) {
++ ret = -1;
++ goto fw_err;
++ }
++
++ /*
++ * Copy firmware data to a kernel allocated contiguous
++ * memory region.
++ */
++ data = page_address(p);
++ memcpy(page_address(p) + data_size, firmware->data, firmware->size);
++
++ data->address = __psp_pa(page_address(p) + data_size);
++ data->len = firmware->size;
++
++ ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error);
++ if (ret)
++ dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error);
++ else
++ dev_info(dev, "SEV firmware update successful\n");
++
++ __free_pages(p, order);
++
++fw_err:
++ release_firmware(firmware);
++
++ return ret;
++}
++
+ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+ {
+ struct sev_user_data_pek_cert_import input;
+@@ -750,7 +830,6 @@ EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user);
+
+ void psp_pci_init(void)
+ {
+- struct sev_user_data_status *status;
+ struct sp_device *sp;
+ int error, rc;
+
+@@ -760,6 +839,13 @@ void psp_pci_init(void)
+
+ psp_master = sp->psp_data;
+
++ if (sev_get_api_version())
++ goto err;
++
++ if (SEV_VERSION_GREATER_OR_EQUAL(0, 15) &&
++ sev_update_firmware(psp_master->dev) == 0)
++ sev_get_api_version();
++
+ /* Initialize the platform */
+ rc = sev_platform_init(&error);
+ if (rc) {
+@@ -767,16 +853,9 @@ void psp_pci_init(void)
+ goto err;
+ }
+
+- /* Display SEV firmware version */
+- status = &psp_master->status_cmd_buf;
+- rc = sev_platform_status(status, &error);
+- if (rc) {
+- dev_err(sp->dev, "SEV: failed to get status error %#x\n", error);
+- goto err;
+- }
++ dev_info(sp->dev, "SEV API:%d.%d build:%d\n", psp_master->api_major,
++ psp_master->api_minor, psp_master->build);
+
+- dev_info(sp->dev, "SEV API:%d.%d build:%d\n", status->api_major,
+- status->api_minor, status->build);
+ return;
+
+ err:
+diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h
+index c81f0b1..c7e9098a 100644
+--- a/drivers/crypto/ccp/psp-dev.h
++++ b/drivers/crypto/ccp/psp-dev.h
+@@ -78,6 +78,10 @@ struct psp_device {
+ struct sev_misc_dev *sev_misc;
+ struct sev_user_data_status status_cmd_buf;
+ struct sev_data_init init_cmd_buf;
++
++ u8 api_major;
++ u8 api_minor;
++ u8 build;
+ };
+
+ #endif /* __PSP_DEV_H */
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+index 93addfa..1d24962 100644
+--- a/include/linux/psp-sev.h
++++ b/include/linux/psp-sev.h
+@@ -54,6 +54,7 @@ enum sev_cmd {
+ SEV_CMD_PDH_CERT_EXPORT = 0x008,
+ SEV_CMD_PDH_GEN = 0x009,
+ SEV_CMD_DF_FLUSH = 0x00A,
++ SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
+
+ /* Guest commands */
+ SEV_CMD_DECOMMISSION = 0x020,
+@@ -130,6 +131,17 @@ struct sev_data_pek_cert_import {
+ } __packed;
+
+ /**
++ * struct sev_data_download_firmware - DOWNLOAD_FIRMWARE command parameters
++ *
++ * @address: physical address of firmware image
++ * @len: len of the firmware image
++ */
++struct sev_data_download_firmware {
++ u64 address; /* In */
++ u32 len; /* In */
++} __packed;
++
++/**
+ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0040-crypto-ccp-Add-GET_ID-SEV-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0040-crypto-ccp-Add-GET_ID-SEV-command.patch
new file mode 100644
index 00000000..a0193e93
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0040-crypto-ccp-Add-GET_ID-SEV-command.patch
@@ -0,0 +1,153 @@
+From ff15b826aa4c78ec7069916e1b4f2b803c7427ac Mon Sep 17 00:00:00 2001
+From: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Date: Fri, 25 May 2018 15:23:30 -0500
+Subject: [PATCH 40/95] crypto: ccp - Add GET_ID SEV command
+
+The GET_ID command, added as of SEV API v0.16, allows the SEV firmware
+to be queried about a unique CPU ID. This unique ID can then be used
+to obtain the public certificate containing the Chip Endorsement Key
+(CEK) public key signed by the AMD SEV Signing Key (ASK).
+
+For more information please refer to "Section 5.12 GET_ID" of
+https://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
+
+Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/crypto/ccp/psp-dev.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/psp-sev.h | 11 +++++++++++
+ include/uapi/linux/psp-sev.h | 12 ++++++++++++
+ 3 files changed, 67 insertions(+)
+
+diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
+index 12838b4..ff478d8 100644
+--- a/drivers/crypto/ccp/psp-dev.c
++++ b/drivers/crypto/ccp/psp-dev.c
+@@ -119,6 +119,7 @@ static int sev_cmd_buffer_len(int cmd)
+ case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa);
+ case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret);
+ case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
++ case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
+ default: return 0;
+ }
+
+@@ -510,6 +511,46 @@ static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp)
+ return ret;
+ }
+
++static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp)
++{
++ struct sev_data_get_id *data;
++ u64 data_size, user_size;
++ void *id_blob, *mem;
++ int ret;
++
++ /* SEV GET_ID available from SEV API v0.16 and up */
++ if (!SEV_VERSION_GREATER_OR_EQUAL(0, 16))
++ return -ENOTSUPP;
++
++ /* SEV FW expects the buffer it fills with the ID to be
++ * 8-byte aligned. Memory allocated should be enough to
++ * hold data structure + alignment padding + memory
++ * where SEV FW writes the ID.
++ */
++ data_size = ALIGN(sizeof(struct sev_data_get_id), 8);
++ user_size = sizeof(struct sev_user_data_get_id);
++
++ mem = kzalloc(data_size + user_size, GFP_KERNEL);
++ if (!mem)
++ return -ENOMEM;
++
++ data = mem;
++ id_blob = mem + data_size;
++
++ data->address = __psp_pa(id_blob);
++ data->len = user_size;
++
++ ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error);
++ if (!ret) {
++ if (copy_to_user((void __user *)argp->data, id_blob, data->len))
++ ret = -EFAULT;
++ }
++
++ kfree(mem);
++
++ return ret;
++}
++
+ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp)
+ {
+ struct sev_user_data_pdh_cert_export input;
+@@ -647,6 +688,9 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg)
+ case SEV_PDH_CERT_EXPORT:
+ ret = sev_ioctl_do_pdh_export(&input);
+ break;
++ case SEV_GET_ID:
++ ret = sev_ioctl_do_get_id(&input);
++ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h
+index 1d24962..827c601 100644
+--- a/include/linux/psp-sev.h
++++ b/include/linux/psp-sev.h
+@@ -55,6 +55,7 @@ enum sev_cmd {
+ SEV_CMD_PDH_GEN = 0x009,
+ SEV_CMD_DF_FLUSH = 0x00A,
+ SEV_CMD_DOWNLOAD_FIRMWARE = 0x00B,
++ SEV_CMD_GET_ID = 0x00C,
+
+ /* Guest commands */
+ SEV_CMD_DECOMMISSION = 0x020,
+@@ -142,6 +143,16 @@ struct sev_data_download_firmware {
+ } __packed;
+
+ /**
++ * struct sev_data_get_id - GET_ID command parameters
++ *
++ * @address: physical address of region to place unique CPU ID(s)
++ * @len: len of the region
++ */
++struct sev_data_get_id {
++ u64 address; /* In */
++ u32 len; /* In/Out */
++} __packed;
++/**
+ * struct sev_data_pdh_cert_export - PDH_CERT_EXPORT command parameters
+ *
+ * @pdh_address: PDH certificate address
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+index 3d77fe9..86c7e0a 100644
+--- a/include/uapi/linux/psp-sev.h
++++ b/include/uapi/linux/psp-sev.h
+@@ -30,6 +30,7 @@ enum {
+ SEV_PDH_GEN,
+ SEV_PDH_CERT_EXPORT,
+ SEV_PEK_CERT_IMPORT,
++ SEV_GET_ID,
+
+ SEV_MAX,
+ };
+@@ -124,6 +125,17 @@ struct sev_user_data_pdh_cert_export {
+ } __packed;
+
+ /**
++ * struct sev_user_data_get_id - GET_ID command parameters
++ *
++ * @socket1: Buffer to pass unique ID of first socket
++ * @socket2: Buffer to pass unique ID of second socket
++ */
++struct sev_user_data_get_id {
++ __u8 socket1[64]; /* Out */
++ __u8 socket2[64]; /* Out */
++} __packed;
++
++/**
+ * struct sev_issue_cmd - SEV ioctl parameters
+ *
+ * @cmd: SEV commands to execute
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0041-include-psp-sev-Capitalize-invalid-length-enum.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0041-include-psp-sev-Capitalize-invalid-length-enum.patch
new file mode 100644
index 00000000..98d56e30
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0041-include-psp-sev-Capitalize-invalid-length-enum.patch
@@ -0,0 +1,41 @@
+From 7050f652da6f559538e127c9a3c38e894210a0db Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 15 Jan 2018 07:32:04 -0600
+Subject: [PATCH 41/95] include: psp-sev: Capitalize invalid length enum
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Commit 1d57b17c60ff ("crypto: ccp: Define SEV userspace ioctl and command
+id") added the invalid length enum but we missed capitalizing it.
+
+Fixes: 1d57b17c60ff (crypto: ccp: Define SEV userspace ioctl ...)
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+CC: Gary R Hook <gary.hook@amd.com>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/uapi/linux/psp-sev.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
+index 86c7e0a..ac8c60b 100644
+--- a/include/uapi/linux/psp-sev.h
++++ b/include/uapi/linux/psp-sev.h
+@@ -43,7 +43,7 @@ typedef enum {
+ SEV_RET_INVALID_PLATFORM_STATE,
+ SEV_RET_INVALID_GUEST_STATE,
+ SEV_RET_INAVLID_CONFIG,
+- SEV_RET_INVALID_len,
++ SEV_RET_INVALID_LEN,
+ SEV_RET_ALREADY_OWNED,
+ SEV_RET_INVALID_CERTIFICATE,
+ SEV_RET_POLICY_FAILURE,
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0042-Documentation-x86-Add-AMD-Secure-Encrypted-Virtualiz.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0042-Documentation-x86-Add-AMD-Secure-Encrypted-Virtualiz.patch
new file mode 100644
index 00000000..619087b4
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0042-Documentation-x86-Add-AMD-Secure-Encrypted-Virtualiz.patch
@@ -0,0 +1,92 @@
+From 73e22bb33ff668812fefcfb4b4faa003666bb790 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Fri, 20 Oct 2017 09:30:43 -0500
+Subject: [PATCH 42/95] Documentation/x86: Add AMD Secure Encrypted
+ Virtualization (SEV) description
+
+Update the AMD memory encryption document describing the Secure Encrypted
+Virtualization (SEV) feature.
+
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Link: https://lkml.kernel.org/r/20171020143059.3291-2-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ Documentation/x86/amd-memory-encryption.txt | 30 +++++++++++++++++++++++++----
+ 1 file changed, 26 insertions(+), 4 deletions(-)
+
+diff --git a/Documentation/x86/amd-memory-encryption.txt b/Documentation/x86/amd-memory-encryption.txt
+index f512ab7..afc41f5 100644
+--- a/Documentation/x86/amd-memory-encryption.txt
++++ b/Documentation/x86/amd-memory-encryption.txt
+@@ -1,4 +1,5 @@
+-Secure Memory Encryption (SME) is a feature found on AMD processors.
++Secure Memory Encryption (SME) and Secure Encrypted Virtualization (SEV) are
++features found on AMD processors.
+
+ SME provides the ability to mark individual pages of memory as encrypted using
+ the standard x86 page tables. A page that is marked encrypted will be
+@@ -6,24 +7,38 @@ automatically decrypted when read from DRAM and encrypted when written to
+ DRAM. SME can therefore be used to protect the contents of DRAM from physical
+ attacks on the system.
+
++SEV enables running encrypted virtual machines (VMs) in which the code and data
++of the guest VM are secured so that a decrypted version is available only
++within the VM itself. SEV guest VMs have the concept of private and shared
++memory. Private memory is encrypted with the guest-specific key, while shared
++memory may be encrypted with hypervisor key. When SME is enabled, the hypervisor
++key is the same key which is used in SME.
++
+ A page is encrypted when a page table entry has the encryption bit set (see
+ below on how to determine its position). The encryption bit can also be
+ specified in the cr3 register, allowing the PGD table to be encrypted. Each
+ successive level of page tables can also be encrypted by setting the encryption
+ bit in the page table entry that points to the next table. This allows the full
+ page table hierarchy to be encrypted. Note, this means that just because the
+-encryption bit is set in cr3, doesn't imply the full hierarchy is encyrpted.
++encryption bit is set in cr3, doesn't imply the full hierarchy is encrypted.
+ Each page table entry in the hierarchy needs to have the encryption bit set to
+ achieve that. So, theoretically, you could have the encryption bit set in cr3
+ so that the PGD is encrypted, but not set the encryption bit in the PGD entry
+ for a PUD which results in the PUD pointed to by that entry to not be
+ encrypted.
+
+-Support for SME can be determined through the CPUID instruction. The CPUID
+-function 0x8000001f reports information related to SME:
++When SEV is enabled, instruction pages and guest page tables are always treated
++as private. All the DMA operations inside the guest must be performed on shared
++memory. Since the memory encryption bit is controlled by the guest OS when it
++is operating in 64-bit or 32-bit PAE mode, in all other modes the SEV hardware
++forces the memory encryption bit to 1.
++
++Support for SME and SEV can be determined through the CPUID instruction. The
++CPUID function 0x8000001f reports information related to SME:
+
+ 0x8000001f[eax]:
+ Bit[0] indicates support for SME
++ Bit[1] indicates support for SEV
+ 0x8000001f[ebx]:
+ Bits[5:0] pagetable bit number used to activate memory
+ encryption
+@@ -39,6 +54,13 @@ determine if SME is enabled and/or to enable memory encryption:
+ Bit[23] 0 = memory encryption features are disabled
+ 1 = memory encryption features are enabled
+
++If SEV is supported, MSR 0xc0010131 (MSR_AMD64_SEV) can be used to determine if
++SEV is active:
++
++ 0xc0010131:
++ Bit[0] 0 = memory encryption is not active
++ 1 = memory encryption is active
++
+ Linux relies on BIOS to set this bit if BIOS has determined that the reduction
+ in the physical address space as a result of enabling memory encryption (see
+ CPUID information above) will not conflict with the address space resource
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch
new file mode 100644
index 00000000..3ce54c27
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0043-x86-mm-Add-Secure-Encrypted-Virtualization-SEV-suppo.patch
@@ -0,0 +1,121 @@
+From 6bcf9c90963dc4402738e2875e446770d283b08c Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 13 Aug 2018 12:19:45 +0530
+Subject: [PATCH 43/95] x86/mm: Add Secure Encrypted Virtualization (SEV)
+ support
+
+From d8aa7eea78a1401cce39b3bb61ead0150044a3df
+
+Provide support for Secure Encrypted Virtualization (SEV). This initial
+support defines a flag that is used by the kernel to determine if it is
+running with SEV active.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: https://lkml.kernel.org/r/20171020143059.3291-3-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/mem_encrypt.h | 6 ++++++
+ arch/x86/mm/mem_encrypt.c | 26 ++++++++++++++++++++++++++
+ include/linux/mem_encrypt.h | 7 +++++--
+ 3 files changed, 37 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index e7d96c0..ad91ab5 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -47,6 +47,9 @@ void __init mem_encrypt_init(void);
+
+ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size);
+
++bool sme_active(void);
++bool sev_active(void);
++
+ #else /* !CONFIG_AMD_MEM_ENCRYPT */
+
+ #define sme_me_mask 0ULL
+@@ -64,6 +67,9 @@ static inline void __init sme_early_init(void) { }
+ static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
+ static inline void __init sme_enable(struct boot_params *bp) { }
+
++static inline bool sme_active(void) { return false; }
++static inline bool sev_active(void) { return false; }
++
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+ /*
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 48c03c7..4e4a304 100644
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -42,6 +42,8 @@ static char sme_cmdline_off[] __initdata = "off";
+ u64 sme_me_mask __section(.data) = 0;
+ EXPORT_SYMBOL(sme_me_mask);
+
++static bool sev_enabled __section(.data);
++
+ /* Buffer used for early in-place encryption by BSP, no locking needed */
+ static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+@@ -192,6 +194,30 @@ void __init sme_early_init(void)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
+ }
+
++/*
++ * SME and SEV are very similar but they are not the same, so there are
++ * times that the kernel will need to distinguish between SME and SEV. The
++ * sme_active() and sev_active() functions are used for this. When a
++ * distinction isn't needed, the mem_encrypt_active() function can be used.
++ *
++ * The trampoline code is a good example for this requirement. Before
++ * paging is activated, SME will access all memory as decrypted, but SEV
++ * will access all memory as encrypted. So, when APs are being brought
++ * up under SME the trampoline area cannot be encrypted, whereas under SEV
++ * the trampoline area must be encrypted.
++ */
++bool sme_active(void)
++{
++ return sme_me_mask && !sev_enabled;
++}
++EXPORT_SYMBOL_GPL(sme_active);
++
++bool sev_active(void)
++{
++ return sme_me_mask && sev_enabled;
++}
++EXPORT_SYMBOL_GPL(sev_active);
++
+ /* Architecture __weak replacement functions */
+ void __init mem_encrypt_init(void)
+ {
+diff --git a/include/linux/mem_encrypt.h b/include/linux/mem_encrypt.h
+index 265a9cd..b310a9c 100644
+--- a/include/linux/mem_encrypt.h
++++ b/include/linux/mem_encrypt.h
+@@ -23,11 +23,14 @@
+
+ #define sme_me_mask 0ULL
+
++static inline bool sme_active(void) { return false; }
++static inline bool sev_active(void) { return false; }
++
+ #endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */
+
+-static inline bool sme_active(void)
++static inline bool mem_encrypt_active(void)
+ {
+- return !!sme_me_mask;
++ return sme_me_mask;
+ }
+
+ static inline u64 sme_get_me_mask(void)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0044-x86-mm-Insure-that-boot-memory-areas-are-mapped-prop.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0044-x86-mm-Insure-that-boot-memory-areas-are-mapped-prop.patch
new file mode 100644
index 00000000..1556de93
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0044-x86-mm-Insure-that-boot-memory-areas-are-mapped-prop.patch
@@ -0,0 +1,76 @@
+From ecf8d81ff98e04cce37a8d4453c58a2f0d0b818a Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 13 Aug 2018 13:02:46 +0530
+Subject: [PATCH 44/95] x86/mm: Insure that boot memory areas are mapped
+ properly
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From b9d05200bc12444c7778a49c9694d8382ed06aa8
+The boot data and command line data are present in memory in a decrypted
+state and are copied early in the boot process. The early page fault
+support will map these areas as encrypted, so before attempting to copy
+them, add decrypted mappings so the data is accessed properly when copied.
+
+For the initrd, encrypt this data in place. Since the future mapping of
+the initrd area will be mapped as encrypted the data will be accessed
+properly.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Larry Woodman <lwoodman@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Michael S. Tsirkin <mst@redhat.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Toshimitsu Kani <toshi.kani@hpe.com>
+Cc: kasan-dev@googlegroups.com
+Cc: kvm@vger.kernel.org
+Cc: linux-arch@vger.kernel.org
+Cc: linux-doc@vger.kernel.org
+Cc: linux-efi@vger.kernel.org
+Cc: linux-mm@kvack.org
+Link: http://lkml.kernel.org/r/bb0d430b41efefd45ee515aaf0979dcfda8b6a44.1500319216.git.thomas.lendacky@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/pgtable.h | 3 +++
+ arch/x86/mm/mem_encrypt.c | 0
+ 2 files changed, 3 insertions(+)
+ mode change 100644 => 100755 arch/x86/mm/mem_encrypt.c
+
+diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
+index 6a4b1a5..dbf9fc7 100644
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -27,6 +27,9 @@
+ extern pgd_t early_top_pgt[PTRS_PER_PGD];
+ int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
+
++extern pgd_t early_top_pgt[PTRS_PER_PGD];
++int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
++
+ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
+ void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
+ void ptdump_walk_pgd_level_checkwx(void);
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+old mode 100644
+new mode 100755
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0045-x86-mm-Don-t-attempt-to-encrypt-initrd-under-SEV.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0045-x86-mm-Don-t-attempt-to-encrypt-initrd-under-SEV.patch
new file mode 100644
index 00000000..1da2e057
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0045-x86-mm-Don-t-attempt-to-encrypt-initrd-under-SEV.patch
@@ -0,0 +1,52 @@
+From e15f9433db1cdc2f48c3b9529b0d009b66ed4613 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 13 Aug 2018 14:34:26 +0530
+Subject: [PATCH 45/95] x86/mm: Don't attempt to encrypt initrd under SEV
+
+From 682af54399b6111730aec0be63e5f6a3a3359a76
+
+When SEV is active the initrd/initramfs will already have already been
+placed in memory encrypted so do not try to encrypt it.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: https://lkml.kernel.org/r/20171020143059.3291-4-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kernel/setup.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+ mode change 100644 => 100755 arch/x86/kernel/setup.c
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+old mode 100644
+new mode 100755
+index dcb00ac..aa23f8c
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -376,6 +376,16 @@ static void __init reserve_initrd(void)
+ !ramdisk_image || !ramdisk_size)
+ return; /* No initrd provided by bootloader */
+
++ /*
++ * If SME is active, this memory will be marked encrypted by the
++ * kernel when it is accessed (including relocation). However, the
++ * ramdisk image was loaded decrypted by the bootloader, so make
++ * sure that it is encrypted before accessing it. For SEV the
++ * ramdisk will already be encrypted, so only do this for SME.
++ */
++ if (sme_active())
++ sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
++
+ initrd_start = 0;
+
+ mapped_size = memblock_mem_size(max_pfn_mapped);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0046-x86-mm-Use-encrypted-access-of-boot-related-data-wit.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0046-x86-mm-Use-encrypted-access-of-boot-related-data-wit.patch
new file mode 100644
index 00000000..a2d28208
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0046-x86-mm-Use-encrypted-access-of-boot-related-data-wit.patch
@@ -0,0 +1,118 @@
+From 95a0bf447324daaff28bc9cd7b2f6f0990421dc7 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:47 -0500
+Subject: [PATCH 46/95] x86/mm: Use encrypted access of boot related data with
+ SEV
+
+When Secure Encrypted Virtualization (SEV) is active, boot data (such as
+EFI related data, setup data) is encrypted and needs to be accessed as
+such when mapped. Update the architecture override in early_memremap to
+keep the encryption attribute when mapping this data.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: kvm@vger.kernel.org
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-6-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/ioremap.c | 44 ++++++++++++++++++++++++++++++--------------
+ 1 file changed, 30 insertions(+), 14 deletions(-)
+
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index 7bebdd0..f0b91a2 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -422,6 +422,9 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
+ * areas should be mapped decrypted. And since the encryption key can
+ * change across reboots, persistent memory should also be mapped
+ * decrypted.
++ *
++ * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
++ * only persistent memory should be mapped decrypted.
+ */
+ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
+ unsigned long size)
+@@ -458,6 +461,11 @@ static bool memremap_should_map_decrypted(resource_size_t phys_addr,
+ case E820_TYPE_ACPI:
+ case E820_TYPE_NVS:
+ case E820_TYPE_UNUSABLE:
++ /* For SEV, these areas are encrypted */
++ if (sev_active())
++ break;
++ /* Fallthrough */
++
+ case E820_TYPE_PRAM:
+ return true;
+ default:
+@@ -581,7 +589,7 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
+ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
+ unsigned long flags)
+ {
+- if (!sme_active())
++ if (!mem_encrypt_active())
+ return true;
+
+ if (flags & MEMREMAP_ENC)
+@@ -590,12 +598,13 @@ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
+ if (flags & MEMREMAP_DEC)
+ return false;
+
+- if (memremap_is_setup_data(phys_addr, size) ||
+- memremap_is_efi_data(phys_addr, size) ||
+- memremap_should_map_decrypted(phys_addr, size))
+- return false;
++ if (sme_active()) {
++ if (memremap_is_setup_data(phys_addr, size) ||
++ memremap_is_efi_data(phys_addr, size))
++ return false;
++ }
+
+- return true;
++ return !memremap_should_map_decrypted(phys_addr, size);
+ }
+
+ /*
+@@ -608,17 +617,24 @@ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
+ unsigned long size,
+ pgprot_t prot)
+ {
+- if (!sme_active())
++ bool encrypted_prot;
++
++ if (!mem_encrypt_active())
+ return prot;
+
+- if (early_memremap_is_setup_data(phys_addr, size) ||
+- memremap_is_efi_data(phys_addr, size) ||
+- memremap_should_map_decrypted(phys_addr, size))
+- prot = pgprot_decrypted(prot);
+- else
+- prot = pgprot_encrypted(prot);
++ encrypted_prot = true;
++
++ if (sme_active()) {
++ if (early_memremap_is_setup_data(phys_addr, size) ||
++ memremap_is_efi_data(phys_addr, size))
++ encrypted_prot = false;
++ }
++
++ if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
++ encrypted_prot = false;
+
+- return prot;
++ return encrypted_prot ? pgprot_encrypted(prot)
++ : pgprot_decrypted(prot);
+ }
+
+ bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0047-x86-mm-Include-SEV-for-encryption-memory-attribute-c.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0047-x86-mm-Include-SEV-for-encryption-memory-attribute-c.patch
new file mode 100644
index 00000000..620faa8d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0047-x86-mm-Include-SEV-for-encryption-memory-attribute-c.patch
@@ -0,0 +1,47 @@
+From ff57f938f629f70bce162adee6ee007315a4e4e2 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:48 -0500
+Subject: [PATCH 47/95] x86/mm: Include SEV for encryption memory attribute
+ changes
+
+The current code checks only for sme_active() when determining whether
+to perform the encryption attribute change. Include sev_active() in this
+check so that memory attribute changes can occur under SME and SEV.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: John Ogness <john.ogness@linutronix.de>
+Cc: kvm@vger.kernel.org
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-7-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/pageattr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
+index 464f53d..be517db 100644
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -1783,8 +1783,8 @@ static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
+ unsigned long start;
+ int ret;
+
+- /* Nothing to do if the SME is not active */
+- if (!sme_active())
++ /* Nothing to do if memory encryption is not active */
++ if (!mem_encrypt_active())
+ return 0;
+
+ /* Should not be working on unaligned addresses */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0048-x86-efi-Access-EFI-data-as-encrypted-when-SEV-is-act.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0048-x86-efi-Access-EFI-data-as-encrypted-when-SEV-is-act.patch
new file mode 100644
index 00000000..91605218
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0048-x86-efi-Access-EFI-data-as-encrypted-when-SEV-is-act.patch
@@ -0,0 +1,85 @@
+From 5738318ceb0dc0f9877ce766246fbae6759affd8 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:49 -0500
+Subject: [PATCH 48/95] x86/efi: Access EFI data as encrypted when SEV is
+ active
+
+EFI data is encrypted when the kernel is run under SEV. Update the
+page table references to be sure the EFI memory areas are accessed
+encrypted.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: linux-efi@vger.kernel.org
+Cc: kvm@vger.kernel.org
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Matt Fleming <matt@codeblueprint.co.uk>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Link: https://lkml.kernel.org/r/20171020143059.3291-8-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/platform/efi/efi_64.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index ae369c2..2833e66 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -33,6 +33,7 @@
+ #include <linux/reboot.h>
+ #include <linux/slab.h>
+ #include <linux/ucs2_string.h>
++#include <linux/mem_encrypt.h>
+
+ #include <asm/setup.h>
+ #include <asm/page.h>
+@@ -375,7 +376,11 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+ * as trim_bios_range() will reserve the first page and isolate it away
+ * from memory allocators anyway.
+ */
+- if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
++ pf = _PAGE_RW;
++ if (sev_active())
++ pf |= _PAGE_ENC;
++
++ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
+ pr_err("Failed to create 1:1 mapping for the first page!\n");
+ return 1;
+ }
+@@ -418,6 +423,9 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va)
+ if (!(md->attribute & EFI_MEMORY_WB))
+ flags |= _PAGE_PCD;
+
++ if (sev_active())
++ flags |= _PAGE_ENC;
++
+ pfn = md->phys_addr >> PAGE_SHIFT;
+ if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
+ pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+@@ -544,6 +552,9 @@ static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *m
+ if (!(md->attribute & EFI_MEMORY_RO))
+ pf |= _PAGE_RW;
+
++ if (sev_active())
++ pf |= _PAGE_ENC;
++
+ return efi_update_mappings(md, pf);
+ }
+
+@@ -595,6 +606,9 @@ void __init efi_runtime_update_mappings(void)
+ (md->type != EFI_RUNTIME_SERVICES_CODE))
+ pf |= _PAGE_RW;
+
++ if (sev_active())
++ pf |= _PAGE_ENC;
++
+ efi_update_mappings(md, pf);
+ }
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0049-resource-Consolidate-resource-walking-code.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0049-resource-Consolidate-resource-walking-code.patch
new file mode 100644
index 00000000..7ca4eaa4
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0049-resource-Consolidate-resource-walking-code.patch
@@ -0,0 +1,120 @@
+From 414df3a2d92d7d8659d418306e3ae13256497224 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:50 -0500
+Subject: [PATCH 49/95] resource: Consolidate resource walking code
+
+The walk_iomem_res_desc(), walk_system_ram_res() and walk_system_ram_range()
+functions each have much of the same code.
+
+Create a new function that consolidates the common code from these
+functions in one place to reduce the amount of duplicated code.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Link: https://lkml.kernel.org/r/20171020143059.3291-9-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ kernel/resource.c | 52 +++++++++++++++++++++++++---------------------------
+ 1 file changed, 25 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 7ee3dd1..1260f30 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -400,6 +400,26 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
+ return 0;
+ }
+
++static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
++ bool first_level_children_only,
++ void *arg, int (*func)(u64, u64, void *))
++{
++ u64 orig_end = res->end;
++ int ret = -1;
++
++ while ((res->start < res->end) &&
++ !find_next_iomem_res(res, desc, first_level_children_only)) {
++ ret = (*func)(res->start, res->end, arg);
++ if (ret)
++ break;
++
++ res->start = res->end + 1;
++ res->end = orig_end;
++ }
++
++ return ret;
++}
++
+ /*
+ * Walks through iomem resources and calls func() with matching resource
+ * ranges. This walks through whole tree and not just first level children.
+@@ -418,26 +438,12 @@ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
+ u64 end, void *arg, int (*func)(u64, u64, void *))
+ {
+ struct resource res;
+- u64 orig_end;
+- int ret = -1;
+
+ res.start = start;
+ res.end = end;
+ res.flags = flags;
+- orig_end = res.end;
+-
+- while ((res.start < res.end) &&
+- (!find_next_iomem_res(&res, desc, false))) {
+-
+- ret = (*func)(res.start, res.end, arg);
+- if (ret)
+- break;
+-
+- res.start = res.end + 1;
+- res.end = orig_end;
+- }
+
+- return ret;
++ return __walk_iomem_res_desc(&res, desc, false, arg, func);
+ }
+
+ /*
+@@ -451,22 +457,13 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
+ int (*func)(u64, u64, void *))
+ {
+ struct resource res;
+- u64 orig_end;
+- int ret = -1;
+
+ res.start = start;
+ res.end = end;
+ res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+- orig_end = res.end;
+- while ((res.start < res.end) &&
+- (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) {
+- ret = (*func)(res.start, res.end, arg);
+- if (ret)
+- break;
+- res.start = res.end + 1;
+- res.end = orig_end;
+- }
+- return ret;
++
++ return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
++ arg, func);
+ }
+
+ #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
+@@ -508,6 +505,7 @@ static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
+ {
+ return 1;
+ }
++
+ /*
+ * This generic page_is_ram() returns true if specified address is
+ * registered as System RAM in iomem_resource list.
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0050-resource-Provide-resource-struct-in-resource-walk-ca.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0050-resource-Provide-resource-struct-in-resource-walk-ca.patch
new file mode 100644
index 00000000..74f4fed2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0050-resource-Provide-resource-struct-in-resource-walk-ca.patch
@@ -0,0 +1,251 @@
+From d6af0f1dc8fc9dbc056ce5e79750747f6d1ca300 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:51 -0500
+Subject: [PATCH 50/95] resource: Provide resource struct in resource walk
+ callback
+
+In preperation for a new function that will need additional resource
+information during the resource walk, update the resource walk callback to
+pass the resource structure. Since the current callback start and end
+arguments are pulled from the resource structure, the callback functions
+can obtain them from the resource structure directly.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: https://lkml.kernel.org/r/20171020143059.3291-10-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/powerpc/kernel/machine_kexec_file_64.c | 12 +++++++++---
+ arch/x86/kernel/crash.c | 18 +++++++++---------
+ arch/x86/kernel/pmem.c | 2 +-
+ include/linux/ioport.h | 4 ++--
+ include/linux/kexec.h | 2 +-
+ kernel/kexec_file.c | 5 +++--
+ kernel/resource.c | 9 +++++----
+ 7 files changed, 30 insertions(+), 22 deletions(-)
+
+diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
+index c66132b..45e0b7d 100644
+--- a/arch/powerpc/kernel/machine_kexec_file_64.c
++++ b/arch/powerpc/kernel/machine_kexec_file_64.c
+@@ -91,11 +91,13 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
+ * and that value will be returned. If all free regions are visited without
+ * func returning non-zero, then zero will be returned.
+ */
+-int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
++int arch_kexec_walk_mem(struct kexec_buf *kbuf,
++ int (*func)(struct resource *, void *))
+ {
+ int ret = 0;
+ u64 i;
+ phys_addr_t mstart, mend;
++ struct resource res = { };
+
+ if (kbuf->top_down) {
+ for_each_free_mem_range_reverse(i, NUMA_NO_NODE, 0,
+@@ -105,7 +107,9 @@ int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+- ret = func(mstart, mend - 1, kbuf);
++ res.start = mstart;
++ res.end = mend - 1;
++ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+@@ -117,7 +121,9 @@ int arch_kexec_walk_mem(struct kexec_buf *kbuf, int (*func)(u64, u64, void *))
+ * range while in kexec, end points to the last byte
+ * in the range.
+ */
+- ret = func(mstart, mend - 1, kbuf);
++ res.start = mstart;
++ res.end = mend - 1;
++ ret = func(&res, kbuf);
+ if (ret)
+ break;
+ }
+diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
+index 44404e2..815008c 100644
+--- a/arch/x86/kernel/crash.c
++++ b/arch/x86/kernel/crash.c
+@@ -209,7 +209,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
+ }
+
+ #ifdef CONFIG_KEXEC_FILE
+-static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
++static int get_nr_ram_ranges_callback(struct resource *res, void *arg)
+ {
+ unsigned int *nr_ranges = arg;
+
+@@ -342,7 +342,7 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
+ return ret;
+ }
+
+-static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
++static int prepare_elf64_ram_headers_callback(struct resource *res, void *arg)
+ {
+ struct crash_elf_data *ced = arg;
+ Elf64_Ehdr *ehdr;
+@@ -355,7 +355,7 @@ static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
+ ehdr = ced->ehdr;
+
+ /* Exclude unwanted mem ranges */
+- ret = elf_header_exclude_ranges(ced, start, end);
++ ret = elf_header_exclude_ranges(ced, res->start, res->end);
+ if (ret)
+ return ret;
+
+@@ -518,14 +518,14 @@ static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
+ return 0;
+ }
+
+-static int memmap_entry_callback(u64 start, u64 end, void *arg)
++static int memmap_entry_callback(struct resource *res, void *arg)
+ {
+ struct crash_memmap_data *cmd = arg;
+ struct boot_params *params = cmd->params;
+ struct e820_entry ei;
+
+- ei.addr = start;
+- ei.size = end - start + 1;
++ ei.addr = res->start;
++ ei.size = res->end - res->start + 1;
+ ei.type = cmd->type;
+ add_e820_entry(params, &ei);
+
+@@ -619,12 +619,12 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
+ return ret;
+ }
+
+-static int determine_backup_region(u64 start, u64 end, void *arg)
++static int determine_backup_region(struct resource *res, void *arg)
+ {
+ struct kimage *image = arg;
+
+- image->arch.backup_src_start = start;
+- image->arch.backup_src_sz = end - start + 1;
++ image->arch.backup_src_start = res->start;
++ image->arch.backup_src_sz = res->end - res->start + 1;
+
+ /* Expecting only one range for backup region */
+ return 1;
+diff --git a/arch/x86/kernel/pmem.c b/arch/x86/kernel/pmem.c
+index 3fe6900..6b07faa 100644
+--- a/arch/x86/kernel/pmem.c
++++ b/arch/x86/kernel/pmem.c
+@@ -7,7 +7,7 @@
+ #include <linux/init.h>
+ #include <linux/ioport.h>
+
+-static int found(u64 start, u64 end, void *data)
++static int found(struct resource *res, void *data)
+ {
+ return 1;
+ }
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index 83c8d65..c0070d7 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -272,10 +272,10 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
+ extern int
+ walk_system_ram_res(u64 start, u64 end, void *arg,
+- int (*func)(u64, u64, void *));
++ int (*func)(struct resource *, void *));
+ extern int
+ walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
+- void *arg, int (*func)(u64, u64, void *));
++ void *arg, int (*func)(struct resource *, void *));
+
+ /* True if any part of r1 overlaps r2 */
+ static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 1c08c92..f16f6ce 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -160,7 +160,7 @@ struct kexec_buf {
+ };
+
+ int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+- int (*func)(u64, u64, void *));
++ int (*func)(struct resource *, void *));
+ extern int kexec_add_buffer(struct kexec_buf *kbuf);
+ int kexec_locate_mem_hole(struct kexec_buf *kbuf);
+ #endif /* CONFIG_KEXEC_FILE */
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 9f48f44..e5bcd94 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -406,9 +406,10 @@ static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
+ return 1;
+ }
+
+-static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
++static int locate_mem_hole_callback(struct resource *res, void *arg)
+ {
+ struct kexec_buf *kbuf = (struct kexec_buf *)arg;
++ u64 start = res->start, end = res->end;
+ unsigned long sz = end - start + 1;
+
+ /* Returning 0 will take to next memory range */
+@@ -437,7 +438,7 @@ static int locate_mem_hole_callback(u64 start, u64 end, void *arg)
+ * func returning non-zero, then zero will be returned.
+ */
+ int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
+- int (*func)(u64, u64, void *))
++ int (*func)(struct resource *, void *))
+ {
+ if (kbuf->image->type == KEXEC_TYPE_CRASH)
+ return walk_iomem_res_desc(crashk_res.desc,
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 1260f30..78cae82 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -402,14 +402,15 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
+
+ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
+ bool first_level_children_only,
+- void *arg, int (*func)(u64, u64, void *))
++ void *arg,
++ int (*func)(struct resource *, void *))
+ {
+ u64 orig_end = res->end;
+ int ret = -1;
+
+ while ((res->start < res->end) &&
+ !find_next_iomem_res(res, desc, first_level_children_only)) {
+- ret = (*func)(res->start, res->end, arg);
++ ret = (*func)(res, arg);
+ if (ret)
+ break;
+
+@@ -435,7 +436,7 @@ static int __walk_iomem_res_desc(struct resource *res, unsigned long desc,
+ * <linux/ioport.h> and set it in 'desc' of a target resource entry.
+ */
+ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
+- u64 end, void *arg, int (*func)(u64, u64, void *))
++ u64 end, void *arg, int (*func)(struct resource *, void *))
+ {
+ struct resource res;
+
+@@ -454,7 +455,7 @@ int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
+ * ranges.
+ */
+ int walk_system_ram_res(u64 start, u64 end, void *arg,
+- int (*func)(u64, u64, void *))
++ int (*func)(struct resource *, void *))
+ {
+ struct resource res;
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch
new file mode 100644
index 00000000..99776243
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch
@@ -0,0 +1,218 @@
+From 8a49eeabdeed5b414efeb82c97ba44278e591819 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:52 -0500
+Subject: [PATCH 51/95] x86/mm, resource: Use PAGE_KERNEL protection for
+ ioremap of memory pages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In order for memory pages to be properly mapped when SEV is active, it's
+necessary to use the PAGE_KERNEL protection attribute as the base
+protection. This ensures that memory mapping of, e.g. ACPI tables,
+receives the proper mapping attributes.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kvm@vger.kernel.org
+Cc: Jérôme Glisse <jglisse@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-11-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/ioremap.c | 79 ++++++++++++++++++++++++++++++++++++++++++--------
+ include/linux/ioport.h | 3 ++
+ kernel/resource.c | 19 ++++++++++++
+ 3 files changed, 89 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index f0b91a2..e2db83b 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -27,6 +27,11 @@
+
+ #include "physaddr.h"
+
++struct ioremap_mem_flags {
++ bool system_ram;
++ bool desc_other;
++};
++
+ /*
+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
+ * conflicts.
+@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ return err;
+ }
+
+-static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+- void *arg)
++static bool __ioremap_check_ram(struct resource *res)
+ {
++ unsigned long start_pfn, stop_pfn;
+ unsigned long i;
+
+- for (i = 0; i < nr_pages; ++i)
+- if (pfn_valid(start_pfn + i) &&
+- !PageReserved(pfn_to_page(start_pfn + i)))
+- return 1;
++ if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
++ return false;
+
+- return 0;
++ start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stop_pfn = (res->end + 1) >> PAGE_SHIFT;
++ if (stop_pfn > start_pfn) {
++ for (i = 0; i < (stop_pfn - start_pfn); ++i)
++ if (pfn_valid(start_pfn + i) &&
++ !PageReserved(pfn_to_page(start_pfn + i)))
++ return true;
++ }
++
++ return false;
++}
++
++static int __ioremap_check_desc_other(struct resource *res)
++{
++ return (res->desc != IORES_DESC_NONE);
++}
++
++static int __ioremap_res_check(struct resource *res, void *arg)
++{
++ struct ioremap_mem_flags *flags = arg;
++
++ if (!flags->system_ram)
++ flags->system_ram = __ioremap_check_ram(res);
++
++ if (!flags->desc_other)
++ flags->desc_other = __ioremap_check_desc_other(res);
++
++ return flags->system_ram && flags->desc_other;
++}
++
++/*
++ * To avoid multiple resource walks, this function walks resources marked as
++ * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
++ * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
++ */
++static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
++ struct ioremap_mem_flags *flags)
++{
++ u64 start, end;
++
++ start = (u64)addr;
++ end = start + size - 1;
++ memset(flags, 0, sizeof(*flags));
++
++ walk_mem_res(start, end, flags, __ioremap_res_check);
+ }
+
+ /*
+@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ unsigned long size, enum page_cache_mode pcm, void *caller)
+ {
+ unsigned long offset, vaddr;
+- resource_size_t pfn, last_pfn, last_addr;
++ resource_size_t last_addr;
+ const resource_size_t unaligned_phys_addr = phys_addr;
+ const unsigned long unaligned_size = size;
++ struct ioremap_mem_flags mem_flags;
+ struct vm_struct *area;
+ enum page_cache_mode new_pcm;
+ pgprot_t prot;
+@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ return NULL;
+ }
+
++ __ioremap_check_mem(phys_addr, size, &mem_flags);
++
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+- pfn = phys_addr >> PAGE_SHIFT;
+- last_pfn = last_addr >> PAGE_SHIFT;
+- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+- __ioremap_check_ram) == 1) {
++ if (mem_flags.system_ram) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
+@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ pcm = new_pcm;
+ }
+
++ /*
++ * If the page being mapped is in memory and SEV is active then
++ * make sure the memory encryption attribute is enabled in the
++ * resulting mapping.
++ */
+ prot = PAGE_KERNEL_IO;
++ if (sev_active() && mem_flags.desc_other)
++ prot = pgprot_encrypted(prot);
++
+ switch (pcm) {
+ case _PAGE_CACHE_MODE_UC:
+ default:
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index c0070d7..93b4183 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -271,6 +271,9 @@ extern int
+ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
+ extern int
++walk_mem_res(u64 start, u64 end, void *arg,
++ int (*func)(struct resource *, void *));
++extern int
+ walk_system_ram_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+ extern int
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 78cae82..790f1b8 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
+ res->start = p->start;
+ if (res->end > p->end)
+ res->end = p->end;
++ res->flags = p->flags;
++ res->desc = p->desc;
+ return 0;
+ }
+
+@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
+ arg, func);
+ }
+
++/*
++ * This function calls the @func callback against all memory ranges, which
++ * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
++ */
++int walk_mem_res(u64 start, u64 end, void *arg,
++ int (*func)(struct resource *, void *))
++{
++ struct resource res;
++
++ res.start = start;
++ res.end = end;
++ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++
++ return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
++ arg, func);
++}
++
+ #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
+
+ /*
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch
new file mode 100644
index 00000000..611d718b
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch
@@ -0,0 +1,154 @@
+From 45f7a023504c57c0820c1d816a7298bf35ba0134 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:53 -0500
+Subject: [PATCH 52/95] x86/mm: Add DMA support for SEV memory encryption
+
+DMA access to encrypted memory cannot be performed when SEV is active.
+In order for DMA to properly work when SEV is active, the SWIOTLB bounce
+buffers must be used.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>C
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Link: https://lkml.kernel.org/r/20171020143059.3291-12-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/mem_encrypt.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++
+ lib/swiotlb.c | 5 +--
+ 2 files changed, 89 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 4e4a304..3c82d64 100755
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -192,6 +192,70 @@ void __init sme_early_init(void)
+ /* Update the protection map with memory encryption mask */
+ for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+ protection_map[i] = pgprot_encrypted(protection_map[i]);
++
++ if (sev_active())
++ swiotlb_force = SWIOTLB_FORCE;
++}
++
++static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
++ gfp_t gfp, unsigned long attrs)
++{
++ unsigned long dma_mask;
++ unsigned int order;
++ struct page *page;
++ void *vaddr = NULL;
++
++ dma_mask = dma_alloc_coherent_mask(dev, gfp);
++ order = get_order(size);
++
++ /*
++ * Memory will be memset to zero after marking decrypted, so don't
++ * bother clearing it before.
++ */
++ gfp &= ~__GFP_ZERO;
++
++ page = alloc_pages_node(dev_to_node(dev), gfp, order);
++ if (page) {
++ dma_addr_t addr;
++
++ /*
++ * Since we will be clearing the encryption bit, check the
++ * mask with it already cleared.
++ */
++ addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
++ if ((addr + size) > dma_mask) {
++ __free_pages(page, get_order(size));
++ } else {
++ vaddr = page_address(page);
++ *dma_handle = addr;
++ }
++ }
++
++ if (!vaddr)
++ vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
++
++ if (!vaddr)
++ return NULL;
++
++ /* Clear the SME encryption bit for DMA use if not swiotlb area */
++ if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
++ set_memory_decrypted((unsigned long)vaddr, 1 << order);
++ memset(vaddr, 0, PAGE_SIZE << order);
++ *dma_handle = __sme_clr(*dma_handle);
++ }
++
++ return vaddr;
++}
++
++static void sev_free(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle, unsigned long attrs)
++{
++ /* Set the SME encryption bit for re-use if not swiotlb area */
++ if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
++ set_memory_encrypted((unsigned long)vaddr,
++ 1 << get_order(size));
++
++ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
+
+ /*
+@@ -218,6 +282,20 @@ bool sev_active(void)
+ }
+ EXPORT_SYMBOL_GPL(sev_active);
+
++static const struct dma_map_ops sev_dma_ops = {
++ .alloc = sev_alloc,
++ .free = sev_free,
++ .map_page = swiotlb_map_page,
++ .unmap_page = swiotlb_unmap_page,
++ .map_sg = swiotlb_map_sg_attrs,
++ .unmap_sg = swiotlb_unmap_sg_attrs,
++ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
++ .sync_single_for_device = swiotlb_sync_single_for_device,
++ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
++ .sync_sg_for_device = swiotlb_sync_sg_for_device,
++ .mapping_error = swiotlb_dma_mapping_error,
++};
++
+ /* Architecture __weak replacement functions */
+ void __init mem_encrypt_init(void)
+ {
+@@ -227,6 +305,14 @@ void __init mem_encrypt_init(void)
+ /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
+ swiotlb_update_mem_attributes();
+
++ /*
++ * With SEV, DMA operations cannot use encryption. New DMA ops
++ * are required in order to mark the DMA areas as decrypted or
++ * to use bounce buffers.
++ */
++ if (sev_active())
++ dma_ops = &sev_dma_ops;
++
+ pr_info("AMD Secure Memory Encryption (SME) active\n");
+ }
+
+diff --git a/lib/swiotlb.c b/lib/swiotlb.c
+index 20df2fd..0d7f46f 100644
+--- a/lib/swiotlb.c
++++ b/lib/swiotlb.c
+@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
+ if (no_iotlb_memory)
+ panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
+
+- if (sme_active())
+- pr_warn_once("SME is active and system is using DMA bounce buffers\n");
++ if (mem_encrypt_active())
++ pr_warn_once("%s is active and system is using DMA bounce buffers\n",
++ sme_active() ? "SME" : "SEV");
+
+ mask = dma_get_seg_boundary(hwdev);
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0053-x86-boot-Add-early-boot-support-when-running-with-SE.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0053-x86-boot-Add-early-boot-support-when-running-with-SE.patch
new file mode 100644
index 00000000..d7cbd3ef
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0053-x86-boot-Add-early-boot-support-when-running-with-SE.patch
@@ -0,0 +1,399 @@
+From 0fe83f5f2a10a2f54dbfcaf26859c434c4034dc9 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 12:48:16 +0530
+Subject: [PATCH 53/95] x86/boot: Add early boot support when running with SEV
+ active
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 1958b5fc401067662ec11a6fcbe0daa26c813603
+Early in the boot process, add checks to determine if the kernel is
+running with Secure Encrypted Virtualization (SEV) active.
+
+Checking for SEV requires checking that the kernel is running under a
+hypervisor (CPUID 0x00000001, bit 31), that the SEV feature is available
+(CPUID 0x8000001f, bit 1) and then checking a non-interceptable SEV MSR
+(0xc0010131, bit 0).
+
+This check is required so that during early compressed kernel booting the
+pagetables (both the boot pagetables and KASLR pagetables (if enabled) are
+updated to include the encryption mask so that when the kernel is
+decompressed into encrypted memory, it can boot properly.
+
+After the kernel is decompressed and continues booting the same logic is
+used to check if SEV is active and set a flag indicating so. This allows
+to distinguish between SME and SEV, each of which have unique differences
+in how certain things are handled: e.g. DMA (always bounce buffered with
+SEV) or EFI tables (always access decrypted with SME).
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kvm@vger.kernel.org
+Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-13-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/boot/compressed/Makefile | 1 +
+ arch/x86/boot/compressed/head_64.S | 16 +++++
+ arch/x86/boot/compressed/mem_encrypt.S | 120 +++++++++++++++++++++++++++++++++
+ arch/x86/boot/compressed/misc.h | 2 +
+ arch/x86/boot/compressed/pagetable.c | 8 ++-
+ arch/x86/include/asm/msr-index.h | 3 +
+ arch/x86/include/uapi/asm/kvm_para.h | 1 -
+ arch/x86/mm/mem_encrypt.c | 50 +++++++++++---
+ 8 files changed, 186 insertions(+), 15 deletions(-)
+ mode change 100644 => 100755 arch/x86/boot/compressed/Makefile
+ create mode 100644 arch/x86/boot/compressed/mem_encrypt.S
+
+diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
+old mode 100644
+new mode 100755
+index 3a250ca..32559aa
+--- a/arch/x86/boot/compressed/Makefile
++++ b/arch/x86/boot/compressed/Makefile
+@@ -79,6 +79,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o
+ ifdef CONFIG_X86_64
+ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o
+ vmlinux-objs-y += $(obj)/pgtable_64.o
++ vmlinux-objs-y += $(obj)/mem_encrypt.o
+ endif
+
+ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
+index 4b3d92a..fc313e2 100644
+--- a/arch/x86/boot/compressed/head_64.S
++++ b/arch/x86/boot/compressed/head_64.S
+@@ -131,6 +131,19 @@ ENTRY(startup_32)
+ /*
+ * Build early 4G boot pagetable
+ */
++ /*
++ * If SEV is active then set the encryption mask in the page tables.
++ * This will insure that when the kernel is copied and decompressed
++ * it will be done so encrypted.
++ */
++ call get_sev_encryption_bit
++ xorl %edx, %edx
++ testl %eax, %eax
++ jz 1f
++ subl $32, %eax /* Encryption bit is always above bit 31 */
++ bts %eax, %edx /* Set encryption mask for page tables */
++1:
++
+ /* Initialize Page tables to 0 */
+ leal pgtable(%ebx), %edi
+ xorl %eax, %eax
+@@ -141,12 +154,14 @@ ENTRY(startup_32)
+ leal pgtable + 0(%ebx), %edi
+ leal 0x1007 (%edi), %eax
+ movl %eax, 0(%edi)
++ addl %edx, 4(%edi)
+
+ /* Build Level 3 */
+ leal pgtable + 0x1000(%ebx), %edi
+ leal 0x1007(%edi), %eax
+ movl $4, %ecx
+ 1: movl %eax, 0x00(%edi)
++ addl %edx, 0x04(%edi)
+ addl $0x00001000, %eax
+ addl $8, %edi
+ decl %ecx
+@@ -157,6 +172,7 @@ ENTRY(startup_32)
+ movl $0x00000183, %eax
+ movl $2048, %ecx
+ 1: movl %eax, 0(%edi)
++ addl %edx, 4(%edi)
+ addl $0x00200000, %eax
+ addl $8, %edi
+ decl %ecx
+diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
+new file mode 100644
+index 0000000..54f5f66
+--- /dev/null
++++ b/arch/x86/boot/compressed/mem_encrypt.S
+@@ -0,0 +1,120 @@
++/*
++ * AMD Memory Encryption Support
++ *
++ * Copyright (C) 2017 Advanced Micro Devices, Inc.
++ *
++ * Author: Tom Lendacky <thomas.lendacky@amd.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/linkage.h>
++
++#include <asm/processor-flags.h>
++#include <asm/msr.h>
++#include <asm/asm-offsets.h>
++
++ .text
++ .code32
++ENTRY(get_sev_encryption_bit)
++ xor %eax, %eax
++
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++ push %ebx
++ push %ecx
++ push %edx
++ push %edi
++
++ /*
++ * RIP-relative addressing is needed to access the encryption bit
++ * variable. Since we are running in 32-bit mode we need this call/pop
++ * sequence to get the proper relative addressing.
++ */
++ call 1f
++1: popl %edi
++ subl $1b, %edi
++
++ movl enc_bit(%edi), %eax
++ cmpl $0, %eax
++ jge .Lsev_exit
++
++ /* Check if running under a hypervisor */
++ movl $1, %eax
++ cpuid
++ bt $31, %ecx /* Check the hypervisor bit */
++ jnc .Lno_sev
++
++ movl $0x80000000, %eax /* CPUID to check the highest leaf */
++ cpuid
++ cmpl $0x8000001f, %eax /* See if 0x8000001f is available */
++ jb .Lno_sev
++
++ /*
++ * Check for the SEV feature:
++ * CPUID Fn8000_001F[EAX] - Bit 1
++ * CPUID Fn8000_001F[EBX] - Bits 5:0
++ * Pagetable bit position used to indicate encryption
++ */
++ movl $0x8000001f, %eax
++ cpuid
++ bt $1, %eax /* Check if SEV is available */
++ jnc .Lno_sev
++
++ movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */
++ rdmsr
++ bt $MSR_AMD64_SEV_ENABLED_BIT, %eax /* Check if SEV is active */
++ jnc .Lno_sev
++
++ movl %ebx, %eax
++ andl $0x3f, %eax /* Return the encryption bit location */
++ movl %eax, enc_bit(%edi)
++ jmp .Lsev_exit
++
++.Lno_sev:
++ xor %eax, %eax
++ movl %eax, enc_bit(%edi)
++
++.Lsev_exit:
++ pop %edi
++ pop %edx
++ pop %ecx
++ pop %ebx
++
++#endif /* CONFIG_AMD_MEM_ENCRYPT */
++
++ ret
++ENDPROC(get_sev_encryption_bit)
++
++ .code64
++ENTRY(get_sev_encryption_mask)
++ xor %rax, %rax
++
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++ push %rbp
++ push %rdx
++
++ movq %rsp, %rbp /* Save current stack pointer */
++
++ call get_sev_encryption_bit /* Get the encryption bit position */
++ testl %eax, %eax
++ jz .Lno_sev_mask
++
++ xor %rdx, %rdx
++ bts %rax, %rdx /* Create the encryption mask */
++ mov %rdx, %rax /* ... and return it */
++
++.Lno_sev_mask:
++ movq %rbp, %rsp /* Restore original stack pointer */
++
++ pop %rdx
++ pop %rbp
++#endif
++
++ ret
++ENDPROC(get_sev_encryption_mask)
++
++ .data
++enc_bit:
++ .int 0xffffffff
+diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
+index 32d4ec2..9d323dc 100644
+--- a/arch/x86/boot/compressed/misc.h
++++ b/arch/x86/boot/compressed/misc.h
+@@ -109,4 +109,6 @@ static inline void console_init(void)
+ { }
+ #endif
+
++unsigned long get_sev_encryption_mask(void);
++
+ #endif
+diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c
+index e691ff7..b5e5e02 100644
+--- a/arch/x86/boot/compressed/pagetable.c
++++ b/arch/x86/boot/compressed/pagetable.c
+@@ -80,16 +80,18 @@ static unsigned long top_level_pgt;
+ * Mapping information structure passed to kernel_ident_mapping_init().
+ * Due to relocation, pointers must be assigned at run time not build time.
+ */
+-static struct x86_mapping_info mapping_info = {
+- .page_flag = __PAGE_KERNEL_LARGE_EXEC,
+-};
++static struct x86_mapping_info mapping_info;
+
+ /* Locates and clears a region for a new top level page table. */
+ void initialize_identity_maps(void)
+ {
++ unsigned long sev_me_mask = get_sev_encryption_mask();
++
+ /* Init mapping_info with run-time function/buffer pointers. */
+ mapping_info.alloc_pgt_page = alloc_pgt_page;
+ mapping_info.context = &pgt_data;
++ mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask;
++ mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask;
+
+ /*
+ * It should be impossible for this not to already be true,
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+index ef7eec6..42b18cc 100644
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -350,6 +350,9 @@
+ #define MSR_AMD64_IBSBRTARGET 0xc001103b
+ #define MSR_AMD64_IBSOPDATA4 0xc001103d
+ #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
++#define MSR_AMD64_SEV 0xc0010131
++#define MSR_AMD64_SEV_ENABLED_BIT 0
++#define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT)
+
+ #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f
+
+diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
+index 341db04..989db88 100644
+--- a/arch/x86/include/uapi/asm/kvm_para.h
++++ b/arch/x86/include/uapi/asm/kvm_para.h
+@@ -111,5 +111,4 @@ struct kvm_vcpu_pv_apf_data {
+ #define KVM_PV_EOI_ENABLED KVM_PV_EOI_MASK
+ #define KVM_PV_EOI_DISABLED 0x0
+
+-
+ #endif /* _UAPI_ASM_X86_KVM_PARA_H */
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 3c82d64..94fc818 100755
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -313,7 +313,9 @@ void __init mem_encrypt_init(void)
+ if (sev_active())
+ dma_ops = &sev_dma_ops;
+
+- pr_info("AMD Secure Memory Encryption (SME) active\n");
++ pr_info("AMD %s active\n",
++ sev_active() ? "Secure Encrypted Virtualization (SEV)"
++ : "Secure Memory Encryption (SME)");
+ }
+
+ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size)
+@@ -805,37 +807,63 @@ void __init __nostackprotector sme_enable(struct boot_params *bp)
+ {
+ const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
+ unsigned int eax, ebx, ecx, edx;
++ unsigned long feature_mask;
+ bool active_by_default;
+ unsigned long me_mask;
+ char buffer[16];
+ u64 msr;
+
+- /* Check for the SME support leaf */
++ /* Check for the SME/SEV support leaf */
+ eax = 0x80000000;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+ if (eax < 0x8000001f)
+ return;
+
++#define AMD_SME_BIT BIT(0)
++#define AMD_SEV_BIT BIT(1)
+ /*
+- * Check for the SME feature:
+- * CPUID Fn8000_001F[EAX] - Bit 0
+- * Secure Memory Encryption support
+- * CPUID Fn8000_001F[EBX] - Bits 5:0
+- * Pagetable bit position used to indicate encryption
++ * Set the feature mask (SME or SEV) based on whether we are
++ * running under a hypervisor.
++ */
++ eax = 1;
++ ecx = 0;
++ native_cpuid(&eax, &ebx, &ecx, &edx);
++ feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
++
++ /*
++ * Check for the SME/SEV feature:
++ * CPUID Fn8000_001F[EAX]
++ * - Bit 0 - Secure Memory Encryption support
++ * - Bit 1 - Secure Encrypted Virtualization support
++ * CPUID Fn8000_001F[EBX]
++ * - Bits 5:0 - Pagetable bit position used to indicate encryption
+ */
+ eax = 0x8000001f;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
+- if (!(eax & 1))
++ if (!(eax & feature_mask))
+ return;
+
+ me_mask = 1UL << (ebx & 0x3f);
+
+- /* Check if SME is enabled */
+- msr = __rdmsr(MSR_K8_SYSCFG);
+- if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++ /* Check if memory encryption is enabled */
++ if (feature_mask == AMD_SME_BIT) {
++ /* For SME, check the SYSCFG MSR */
++ msr = __rdmsr(MSR_K8_SYSCFG);
++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++ return;
++ } else {
++ /* For SEV, check the SEV MSR */
++ msr = __rdmsr(MSR_AMD64_SEV);
++ if (!(msr & MSR_AMD64_SEV_ENABLED))
++ return;
++
++ /* SEV state cannot be controlled by a command line option */
++ sme_me_mask = me_mask;
++ sev_enabled = true;
+ return;
++ }
+
+ /*
+ * Fixups have not been applied to phys_base yet and we're running
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0054-x86-io-Unroll-string-I-O-when-SEV-is-active.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0054-x86-io-Unroll-string-I-O-when-SEV-is-active.patch
new file mode 100644
index 00000000..96d88245
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0054-x86-io-Unroll-string-I-O-when-SEV-is-active.patch
@@ -0,0 +1,124 @@
+From 295d8a0fc04f5f07a892766fc646680c124dc423 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 13:29:25 +0530
+Subject: [PATCH 54/95] x86/io: Unroll string I/O when SEV is active
+
+From 606b21d4a6498c23632a4693c81b7b24feedd038
+Secure Encrypted Virtualization (SEV) does not support string I/O, so
+unroll the string I/O operation into a loop operating on one element at
+a time.
+
+[ tglx: Gave the static key a real name instead of the obscure __sev ]
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: kvm@vger.kernel.org
+Cc: David Laight <David.Laight@ACULAB.COM>
+Cc: Borislav Petkov <bp@alien8.de>
+Link: https://lkml.kernel.org/r/20171020143059.3291-14-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/io.h | 43 +++++++++++++++++++++++++++++++++++++++----
+ arch/x86/mm/mem_encrypt.c | 8 ++++++++
+ 2 files changed, 47 insertions(+), 4 deletions(-)
+
+diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
+index 11398d5..93ae8ae 100644
+--- a/arch/x86/include/asm/io.h
++++ b/arch/x86/include/asm/io.h
+@@ -266,6 +266,21 @@ static inline void slow_down_io(void)
+
+ #endif
+
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++#include <linux/jump_label.h>
++
++extern struct static_key_false sev_enable_key;
++static inline bool sev_key_active(void)
++{
++ return static_branch_unlikely(&sev_enable_key);
++}
++
++#else /* !CONFIG_AMD_MEM_ENCRYPT */
++
++static inline bool sev_key_active(void) { return false; }
++
++#endif /* CONFIG_AMD_MEM_ENCRYPT */
++
+ #define BUILDIO(bwl, bw, type) \
+ static inline void out##bwl(unsigned type value, int port) \
+ { \
+@@ -296,14 +311,34 @@ static inline unsigned type in##bwl##_p(int port) \
+ \
+ static inline void outs##bwl(int port, const void *addr, unsigned long count) \
+ { \
+- asm volatile("rep; outs" #bwl \
+- : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
++ if (sev_key_active()) { \
++ unsigned type *value = (unsigned type *)addr; \
++ while (count) { \
++ out##bwl(*value, port); \
++ value++; \
++ count--; \
++ } \
++ } else { \
++ asm volatile("rep; outs" #bwl \
++ : "+S"(addr), "+c"(count) \
++ : "d"(port) : "memory"); \
++ } \
+ } \
+ \
+ static inline void ins##bwl(int port, void *addr, unsigned long count) \
+ { \
+- asm volatile("rep; ins" #bwl \
+- : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
++ if (sev_key_active()) { \
++ unsigned type *value = (unsigned type *)addr; \
++ while (count) { \
++ *value = in##bwl(port); \
++ value++; \
++ count--; \
++ } \
++ } else { \
++ asm volatile("rep; ins" #bwl \
++ : "+D"(addr), "+c"(count) \
++ : "d"(port) : "memory"); \
++ } \
+ }
+
+ BUILDIO(b, b, char)
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 94fc818..81f50fa 100755
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -41,6 +41,8 @@ static char sme_cmdline_off[] __initdata = "off";
+ */
+ u64 sme_me_mask __section(.data) = 0;
+ EXPORT_SYMBOL(sme_me_mask);
++DEFINE_STATIC_KEY_FALSE(sev_enable_key);
++EXPORT_SYMBOL_GPL(sev_enable_key);
+
+ static bool sev_enabled __section(.data);
+
+@@ -313,6 +315,12 @@ void __init mem_encrypt_init(void)
+ if (sev_active())
+ dma_ops = &sev_dma_ops;
+
++ /*
++ * With SEV, we need to unroll the rep string I/O instructions.
++ */
++ if (sev_active())
++ static_branch_enable(&sev_enable_key);
++
+ pr_info("AMD %s active\n",
+ sev_active() ? "Secure Encrypted Virtualization (SEV)"
+ : "Secure Memory Encryption (SME)");
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0055-x86-Add-support-for-changing-memory-encryption-attri.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0055-x86-Add-support-for-changing-memory-encryption-attri.patch
new file mode 100644
index 00000000..05c3ea97
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0055-x86-Add-support-for-changing-memory-encryption-attri.patch
@@ -0,0 +1,207 @@
+From ec33712d9ef583c4e7d2ff016faadc2e5de94f9b Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 13:47:45 +0530
+Subject: [PATCH 55/95] x86: Add support for changing memory encryption
+ attribute in early boot
+
+From dfaaec9033b80d71056e21cda920752e55f2c514
+
+Some KVM-specific custom MSRs share the guest physical address with the
+hypervisor in early boot. When SEV is active, the shared physical address
+must be mapped with memory encryption attribute cleared so that both
+hypervisor and guest can access the data.
+
+Add APIs to change the memory encryption attribute in early boot code.
+
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: Borislav Petkov <bp@alien8.de>
+Link: https://lkml.kernel.org/r/20171020143059.3291-15-brijesh.singh@amd.com
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/mem_encrypt.h | 8 +++
+ arch/x86/mm/mem_encrypt.c | 130 +++++++++++++++++++++++++++++++++++++
+ 2 files changed, 138 insertions(+)
+
+diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h
+index ad91ab5..22c5f3e 100644
+--- a/arch/x86/include/asm/mem_encrypt.h
++++ b/arch/x86/include/asm/mem_encrypt.h
+@@ -42,6 +42,9 @@ void __init sme_early_init(void);
+ void __init sme_encrypt_kernel(struct boot_params *bp);
+ void __init sme_enable(struct boot_params *bp);
+
++int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
++int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
++
+ /* Architecture __weak replacement functions */
+ void __init mem_encrypt_init(void);
+
+@@ -70,6 +73,11 @@ static inline void __init sme_enable(struct boot_params *bp) { }
+ static inline bool sme_active(void) { return false; }
+ static inline bool sev_active(void) { return false; }
+
++static inline int __init
++early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
++static inline int __init
++early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
++
+ #endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+ /*
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 81f50fa..6d59032 100755
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -30,6 +30,8 @@
+ #include <asm/msr.h>
+ #include <asm/cmdline.h>
+
++#include "mm_internal.h"
++
+ static char sme_cmdline_arg[] __initdata = "mem_encrypt";
+ static char sme_cmdline_on[] __initdata = "on";
+ static char sme_cmdline_off[] __initdata = "off";
+@@ -260,6 +262,134 @@ static void sev_free(struct device *dev, size_t size, void *vaddr,
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
+
++static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
++{
++ pgprot_t old_prot, new_prot;
++ unsigned long pfn, pa, size;
++ pte_t new_pte;
++
++ switch (level) {
++ case PG_LEVEL_4K:
++ pfn = pte_pfn(*kpte);
++ old_prot = pte_pgprot(*kpte);
++ break;
++ case PG_LEVEL_2M:
++ pfn = pmd_pfn(*(pmd_t *)kpte);
++ old_prot = pmd_pgprot(*(pmd_t *)kpte);
++ break;
++ case PG_LEVEL_1G:
++ pfn = pud_pfn(*(pud_t *)kpte);
++ old_prot = pud_pgprot(*(pud_t *)kpte);
++ break;
++ default:
++ return;
++ }
++
++ new_prot = old_prot;
++ if (enc)
++ pgprot_val(new_prot) |= _PAGE_ENC;
++ else
++ pgprot_val(new_prot) &= ~_PAGE_ENC;
++
++ /* If prot is same then do nothing. */
++ if (pgprot_val(old_prot) == pgprot_val(new_prot))
++ return;
++
++ pa = pfn << page_level_shift(level);
++ size = page_level_size(level);
++
++ /*
++ * We are going to perform in-place en-/decryption and change the
++ * physical page attribute from C=1 to C=0 or vice versa. Flush the
++ * caches to ensure that data gets accessed with the correct C-bit.
++ */
++ clflush_cache_range(__va(pa), size);
++
++ /* Encrypt/decrypt the contents in-place */
++ if (enc)
++ sme_early_encrypt(pa, size);
++ else
++ sme_early_decrypt(pa, size);
++
++ /* Change the page encryption mask. */
++ new_pte = pfn_pte(pfn, new_prot);
++ set_pte_atomic(kpte, new_pte);
++}
++
++static int __init early_set_memory_enc_dec(unsigned long vaddr,
++ unsigned long size, bool enc)
++{
++ unsigned long vaddr_end, vaddr_next;
++ unsigned long psize, pmask;
++ int split_page_size_mask;
++ int level, ret;
++ pte_t *kpte;
++
++ vaddr_next = vaddr;
++ vaddr_end = vaddr + size;
++
++ for (; vaddr < vaddr_end; vaddr = vaddr_next) {
++ kpte = lookup_address(vaddr, &level);
++ if (!kpte || pte_none(*kpte)) {
++ ret = 1;
++ goto out;
++ }
++
++ if (level == PG_LEVEL_4K) {
++ __set_clr_pte_enc(kpte, level, enc);
++ vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
++ continue;
++ }
++
++ psize = page_level_size(level);
++ pmask = page_level_mask(level);
++
++ /*
++ * Check whether we can change the large page in one go.
++ * We request a split when the address is not aligned and
++ * the number of pages to set/clear encryption bit is smaller
++ * than the number of pages in the large page.
++ */
++ if (vaddr == (vaddr & pmask) &&
++ ((vaddr_end - vaddr) >= psize)) {
++ __set_clr_pte_enc(kpte, level, enc);
++ vaddr_next = (vaddr & pmask) + psize;
++ continue;
++ }
++
++ /*
++ * The virtual address is part of a larger page, create the next
++ * level page table mapping (4K or 2M). If it is part of a 2M
++ * page then we request a split of the large page into 4K
++ * chunks. A 1GB large page is split into 2M pages, resp.
++ */
++ if (level == PG_LEVEL_2M)
++ split_page_size_mask = 0;
++ else
++ split_page_size_mask = 1 << PG_LEVEL_2M;
++
++ kernel_physical_mapping_init(__pa(vaddr & pmask),
++ __pa((vaddr_end & pmask) + psize),
++ split_page_size_mask);
++ }
++
++ ret = 0;
++
++out:
++ __flush_tlb_all();
++ return ret;
++}
++
++int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
++{
++ return early_set_memory_enc_dec(vaddr, size, false);
++}
++
++int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
++{
++ return early_set_memory_enc_dec(vaddr, size, true);
++}
++
+ /*
+ * SME and SEV are very similar but they are not the same, so there are
+ * times that the kernel will need to distinguish between SME and SEV. The
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0056-percpu-Introduce-DEFINE_PER_CPU_DECRYPTED.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0056-percpu-Introduce-DEFINE_PER_CPU_DECRYPTED.patch
new file mode 100644
index 00000000..326fb11c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0056-percpu-Introduce-DEFINE_PER_CPU_DECRYPTED.patch
@@ -0,0 +1,99 @@
+From eb07f7d793db030645abf9bad60dc9d1f40870a7 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Fri, 20 Oct 2017 09:30:57 -0500
+Subject: [PATCH 56/95] percpu: Introduce DEFINE_PER_CPU_DECRYPTED
+
+KVM guest defines three per-CPU variables (steal-time, apf_reason, and
+kvm_pic_eoi) which are shared between a guest and a hypervisor.
+
+When SEV is active, memory is encrypted with a guest-specific key, and if
+the guest OS wants to share the memory region with the hypervisor then it
+must clear the C-bit (i.e set decrypted) before sharing it.
+
+DEFINE_PER_CPU_DECRYPTED can be used to define the per-CPU variables
+which will be shared between a guest and a hypervisor.
+
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Acked-by: Tejun Heo <tj@kernel.org>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: linux-arch@vger.kernel.org
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Christoph Lameter <cl@linux.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-16-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/asm-generic/vmlinux.lds.h | 19 +++++++++++++++++++
+ include/linux/percpu-defs.h | 15 +++++++++++++++
+ 2 files changed, 34 insertions(+)
+
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index fcec26d..873d2e70 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -779,6 +779,24 @@
+ #endif
+
+ /*
++ * Memory encryption operates on a page basis. Since we need to clear
++ * the memory encryption mask for this section, it needs to be aligned
++ * on a page boundary and be a page-size multiple in length.
++ *
++ * Note: We use a separate section so that only this section gets
++ * decrypted to avoid exposing more than we wish.
++ */
++#ifdef CONFIG_AMD_MEM_ENCRYPT
++#define PERCPU_DECRYPTED_SECTION \
++ . = ALIGN(PAGE_SIZE); \
++ *(.data..percpu..decrypted) \
++ . = ALIGN(PAGE_SIZE);
++#else
++#define PERCPU_DECRYPTED_SECTION
++#endif
++
++
++/*
+ * Default discarded sections.
+ *
+ * Some archs want to discard exit text/data at runtime rather than
+@@ -816,6 +834,7 @@
+ . = ALIGN(cacheline); \
+ *(.data..percpu) \
+ *(.data..percpu..shared_aligned) \
++ PERCPU_DECRYPTED_SECTION \
+ VMLINUX_SYMBOL(__per_cpu_end) = .;
+
+ /**
+diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
+index 8f16299..2d2096b 100644
+--- a/include/linux/percpu-defs.h
++++ b/include/linux/percpu-defs.h
+@@ -173,6 +173,21 @@
+ DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
+
+ /*
++ * Declaration/definition used for per-CPU variables that should be accessed
++ * as decrypted when memory encryption is enabled in the guest.
++ */
++#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
++
++#define DECLARE_PER_CPU_DECRYPTED(type, name) \
++ DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
++
++#define DEFINE_PER_CPU_DECRYPTED(type, name) \
++ DEFINE_PER_CPU_SECTION(type, name, "..decrypted")
++#else
++#define DEFINE_PER_CPU_DECRYPTED(type, name) DEFINE_PER_CPU(type, name)
++#endif
++
++/*
+ * Intermodule exports for per-CPU variables. sparse forgets about
+ * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to
+ * noop if __CHECKER__.
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0057-X86-KVM-Decrypt-shared-per-cpu-variables-when-SEV-is.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0057-X86-KVM-Decrypt-shared-per-cpu-variables-when-SEV-is.patch
new file mode 100644
index 00000000..98bdb0fe
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0057-X86-KVM-Decrypt-shared-per-cpu-variables-when-SEV-is.patch
@@ -0,0 +1,106 @@
+From 1b6f44106316b90f38138efe684016e46fcd8cbd Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Fri, 20 Oct 2017 09:30:58 -0500
+Subject: [PATCH 57/95] X86/KVM: Decrypt shared per-cpu variables when SEV is
+ active
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When SEV is active, guest memory is encrypted with a guest-specific key, a
+guest memory region shared with the hypervisor must be mapped as decrypted
+before it can be shared.
+
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-17-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kernel/kvm.c | 40 +++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 37 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index 652bdd8..ead1a22 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -75,8 +75,8 @@ static int parse_no_kvmclock_vsyscall(char *arg)
+
+ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
+
+-static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
+-static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
++static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
++static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
+ static int has_steal_clock = 0;
+
+ /*
+@@ -312,7 +312,7 @@ static void kvm_register_steal_time(void)
+ cpu, (unsigned long long) slow_virt_to_phys(st));
+ }
+
+-static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
++static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
+
+ static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
+ {
+@@ -426,9 +426,42 @@ void kvm_disable_steal_time(void)
+ wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
+ }
+
++static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
++{
++ early_set_memory_decrypted((unsigned long) ptr, size);
++}
++
++/*
++ * Iterate through all possible CPUs and map the memory region pointed
++ * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
++ *
++ * Note: we iterate through all possible CPUs to ensure that CPUs
++ * hotplugged will have their per-cpu variable already mapped as
++ * decrypted.
++ */
++static void __init sev_map_percpu_data(void)
++{
++ int cpu;
++
++ if (!sev_active())
++ return;
++
++ for_each_possible_cpu(cpu) {
++ __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
++ __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
++ __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
++ }
++}
++
+ #ifdef CONFIG_SMP
+ static void __init kvm_smp_prepare_boot_cpu(void)
+ {
++ /*
++ * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
++ * shares the guest physical address with the hypervisor.
++ */
++ sev_map_percpu_data();
++
+ kvm_guest_cpu_init();
+ native_smp_prepare_boot_cpu();
+ kvm_spinlock_init();
+@@ -496,6 +529,7 @@ void __init kvm_guest_init(void)
+ kvm_cpu_online, kvm_cpu_down_prepare) < 0)
+ pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
+ #else
++ sev_map_percpu_data();
+ kvm_guest_cpu_init();
+ #endif
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0058-X86-KVM-Clear-encryption-attribute-when-SEV-is-activ.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0058-X86-KVM-Clear-encryption-attribute-when-SEV-is-activ.patch
new file mode 100644
index 00000000..3b62cacf
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0058-X86-KVM-Clear-encryption-attribute-when-SEV-is-activ.patch
@@ -0,0 +1,175 @@
+From d66120a2576395bea6260dfa0d42004b5c5d1fca Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Fri, 20 Oct 2017 09:30:59 -0500
+Subject: [PATCH 58/95] X86/KVM: Clear encryption attribute when SEV is active
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The guest physical memory area holding the struct pvclock_wall_clock and
+struct pvclock_vcpu_time_info are shared with the hypervisor. It
+periodically updates the contents of the memory.
+
+When SEV is active, the encryption attributes from the shared memory pages
+must be cleared so that both hypervisor and guest can access the data.
+
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: kvm@vger.kernel.org
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-18-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/entry/vdso/vma.c | 5 ++--
+ arch/x86/kernel/kvmclock.c | 65 ++++++++++++++++++++++++++++++++++++++--------
+ 2 files changed, 57 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
+index 1911310..d630531 100644
+--- a/arch/x86/entry/vdso/vma.c
++++ b/arch/x86/entry/vdso/vma.c
+@@ -114,10 +114,11 @@ static int vvar_fault(const struct vm_special_mapping *sm,
+ struct pvclock_vsyscall_time_info *pvti =
+ pvclock_pvti_cpu0_va();
+ if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
+- ret = vm_insert_pfn(
++ ret = vm_insert_pfn_prot(
+ vma,
+ vmf->address,
+- __pa(pvti) >> PAGE_SHIFT);
++ __pa(pvti) >> PAGE_SHIFT,
++ pgprot_decrypted(vma->vm_page_prot));
+ }
+ } else if (sym_offset == image->sym_hvclock_page) {
+ struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 48703d4..42153ef 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -27,6 +27,7 @@
+ #include <linux/sched.h>
+ #include <linux/sched/clock.h>
+
++#include <asm/mem_encrypt.h>
+ #include <asm/x86_init.h>
+ #include <asm/reboot.h>
+ #include <asm/kvmclock.h>
+@@ -45,7 +46,7 @@ early_param("no-kvmclock", parse_no_kvmclock);
+
+ /* The hypervisor will put information about time periodically here */
+ static struct pvclock_vsyscall_time_info *hv_clock;
+-static struct pvclock_wall_clock wall_clock;
++static struct pvclock_wall_clock *wall_clock;
+
+ struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void)
+ {
+@@ -64,15 +65,15 @@ static void kvm_get_wallclock(struct timespec *now)
+ int low, high;
+ int cpu;
+
+- low = (int)__pa_symbol(&wall_clock);
+- high = ((u64)__pa_symbol(&wall_clock) >> 32);
++ low = (int)slow_virt_to_phys(wall_clock);
++ high = ((u64)slow_virt_to_phys(wall_clock) >> 32);
+
+ native_write_msr(msr_kvm_wall_clock, low, high);
+
+ cpu = get_cpu();
+
+ vcpu_time = &hv_clock[cpu].pvti;
+- pvclock_read_wallclock(&wall_clock, vcpu_time, now);
++ pvclock_read_wallclock(wall_clock, vcpu_time, now);
+
+ put_cpu();
+ }
+@@ -250,11 +251,39 @@ static void kvm_shutdown(void)
+ native_machine_shutdown();
+ }
+
++static phys_addr_t __init kvm_memblock_alloc(phys_addr_t size,
++ phys_addr_t align)
++{
++ phys_addr_t mem;
++
++ mem = memblock_alloc(size, align);
++ if (!mem)
++ return 0;
++
++ if (sev_active()) {
++ if (early_set_memory_decrypted((unsigned long)__va(mem), size))
++ goto e_free;
++ }
++
++ return mem;
++e_free:
++ memblock_free(mem, size);
++ return 0;
++}
++
++static void __init kvm_memblock_free(phys_addr_t addr, phys_addr_t size)
++{
++ if (sev_active())
++ early_set_memory_encrypted((unsigned long)__va(addr), size);
++
++ memblock_free(addr, size);
++}
++
+ void __init kvmclock_init(void)
+ {
+ struct pvclock_vcpu_time_info *vcpu_time;
+- unsigned long mem;
+- int size, cpu;
++ unsigned long mem, mem_wall_clock;
++ int size, cpu, wall_clock_size;
+ u8 flags;
+
+ size = PAGE_ALIGN(sizeof(struct pvclock_vsyscall_time_info)*NR_CPUS);
+@@ -268,21 +297,35 @@ void __init kvmclock_init(void)
+ } else if (!(kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)))
+ return;
+
+- printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
+- msr_kvm_system_time, msr_kvm_wall_clock);
++ wall_clock_size = PAGE_ALIGN(sizeof(struct pvclock_wall_clock));
++ mem_wall_clock = kvm_memblock_alloc(wall_clock_size, PAGE_SIZE);
++ if (!mem_wall_clock)
++ return;
+
+- mem = memblock_alloc(size, PAGE_SIZE);
+- if (!mem)
++ wall_clock = __va(mem_wall_clock);
++ memset(wall_clock, 0, wall_clock_size);
++
++ mem = kvm_memblock_alloc(size, PAGE_SIZE);
++ if (!mem) {
++ kvm_memblock_free(mem_wall_clock, wall_clock_size);
++ wall_clock = NULL;
+ return;
++ }
++
+ hv_clock = __va(mem);
+ memset(hv_clock, 0, size);
+
+ if (kvm_register_clock("primary cpu clock")) {
+ hv_clock = NULL;
+- memblock_free(mem, size);
++ kvm_memblock_free(mem, size);
++ kvm_memblock_free(mem_wall_clock, wall_clock_size);
++ wall_clock = NULL;
+ return;
+ }
+
++ printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
++ msr_kvm_system_time, msr_kvm_wall_clock);
++
+ if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
+ pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch
new file mode 100644
index 00000000..a9ca3a12
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0059-x86-CPU-AMD-Add-the-Secure-Encrypted-Virtualization-.patch
@@ -0,0 +1,188 @@
+From 194c3226782279235d34de040e318d30665b7b5b Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 22 Oct 2018 13:28:34 +0530
+Subject: [PATCH 59/95] x86/CPU/AMD: Add the Secure Encrypted Virtualization
+ CPU feature
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Update the CPU features to include identifying and reporting on the
+Secure Encrypted Virtualization (SEV) feature. SEV is identified by
+CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of
+MSR_K8_SYSCFG and set bit 0 of MSR_K7_HWCR). Only show the SEV feature
+as available if reported by CPUID and enabled by BIOS.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: kvm@vger.kernel.org
+Cc: x86@kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/cpufeatures.h | 2 +-
+ arch/x86/include/asm/msr-index.h | 2 +
+ arch/x86/kernel/cpu/amd.c | 82 +++++++++++++++++++++-----------------
+ arch/x86/kernel/cpu/scattered.c | 1 +
+ 4 files changed, 50 insertions(+), 37 deletions(-)
+ mode change 100644 => 100755 arch/x86/include/asm/cpufeatures.h
+ mode change 100644 => 100755 arch/x86/include/asm/msr-index.h
+ mode change 100644 => 100755 arch/x86/kernel/cpu/amd.c
+ mode change 100644 => 100755 arch/x86/kernel/cpu/scattered.c
+
+diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
+old mode 100644
+new mode 100755
+index 8418462..76a0ba0
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -210,7 +210,7 @@
+ #define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
+ #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
+ #define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
+-
++#define X86_FEATURE_SEV ( 7*32+20) /* AMD Secure Encrypted Virtualization */
+ #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
+ #define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
+ #define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
+diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
+old mode 100644
+new mode 100755
+index 42b18cc..55d802c
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -413,6 +413,8 @@
+ #define MSR_K7_PERFCTR3 0xc0010007
+ #define MSR_K7_CLK_CTL 0xc001001b
+ #define MSR_K7_HWCR 0xc0010015
++#define MSR_K7_HWCR_SMMLOCK_BIT 0
++#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+ #define MSR_K7_FID_VID_CTL 0xc0010041
+ #define MSR_K7_FID_VID_STATUS 0xc0010042
+
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+old mode 100644
+new mode 100755
+index dda741b..c6333e7
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -575,6 +575,51 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
+ }
+ }
+
++static void early_detect_mem_encrypt(struct cpuinfo_x86 *c)
++{
++ u64 msr;
++
++ /*
++ * BIOS support is required for SME and SEV.
++ * For SME: If BIOS has enabled SME then adjust x86_phys_bits by
++ * the SME physical address space reduction value.
++ * If BIOS has not enabled SME then don't advertise the
++ * SME feature (set in scattered.c).
++ * For SEV: If BIOS has not enabled SEV then don't advertise the
++ * SEV feature (set in scattered.c).
++ *
++ * In all cases, since support for SME and SEV requires long mode,
++ * don't advertise the feature under CONFIG_X86_32.
++ */
++ if (cpu_has(c, X86_FEATURE_SME) || cpu_has(c, X86_FEATURE_SEV)) {
++ /* Check if memory encryption is enabled */
++ rdmsrl(MSR_K8_SYSCFG, msr);
++ if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
++ goto clear_all;
++
++ /*
++ * Always adjust physical address bits. Even though this
++ * will be a value above 32-bits this is still done for
++ * CONFIG_X86_32 so that accurate values are reported.
++ */
++ c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
++
++ if (IS_ENABLED(CONFIG_X86_32))
++ goto clear_all;
++
++ rdmsrl(MSR_K7_HWCR, msr);
++ if (!(msr & MSR_K7_HWCR_SMMLOCK))
++ goto clear_sev;
++
++ return;
++
++clear_all:
++ clear_cpu_cap(c, X86_FEATURE_SME);
++clear_sev:
++ clear_cpu_cap(c, X86_FEATURE_SEV);
++ }
++}
++
+ static void early_init_amd(struct cpuinfo_x86 *c)
+ {
+ u64 value;
+@@ -647,42 +692,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
+ set_cpu_bug(c, X86_BUG_AMD_E400);
+
+- /*
+- * BIOS support is required for SME. If BIOS has enabled SME then
+- * adjust x86_phys_bits by the SME physical address space reduction
+- * value. If BIOS has not enabled SME then don't advertise the
+- * feature (set in scattered.c). Also, since the SME support requires
+- * long mode, don't advertise the feature under CONFIG_X86_32.
+- */
+- if (cpu_has(c, X86_FEATURE_SME)) {
+- u64 msr;
+-
+- /* Check if SME is enabled */
+- rdmsrl(MSR_K8_SYSCFG, msr);
+- if (msr & MSR_K8_SYSCFG_MEM_ENCRYPT) {
+- c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f;
+- if (IS_ENABLED(CONFIG_X86_32))
+- clear_cpu_cap(c, X86_FEATURE_SME);
+- } else {
+- clear_cpu_cap(c, X86_FEATURE_SME);
+- }
+- }
+-
+- /* Re-enable TopologyExtensions if switched off by BIOS */
+- if (c->x86 == 0x15 &&
+- (c->x86_model >= 0x10 && c->x86_model <= 0x6f) &&
+- !cpu_has(c, X86_FEATURE_TOPOEXT)) {
+-
+- if (msr_set_bit(0xc0011005, 54) > 0) {
+- rdmsrl(0xc0011005, value);
+- if (value & BIT_64(54)) {
+- set_cpu_cap(c, X86_FEATURE_TOPOEXT);
+- pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n");
+- }
+- }
+- }
+-
+- amd_get_topology_early(c);
++ early_detect_mem_encrypt(c);
+ }
+
+ static void init_amd_k8(struct cpuinfo_x86 *c)
+diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
+old mode 100644
+new mode 100755
+index df11f5d..3320773
+--- a/arch/x86/kernel/cpu/scattered.c
++++ b/arch/x86/kernel/cpu/scattered.c
+@@ -29,6 +29,7 @@ static const struct cpuid_bit cpuid_bits[] = {
+ { X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
+ { X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
+ { X86_FEATURE_SME, CPUID_EAX, 0, 0x8000001f, 0 },
++ { X86_FEATURE_SEV, CPUID_EAX, 1, 0x8000001f, 0 },
+ { 0, 0, 0, 0, 0 }
+ };
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0060-kvm-svm-prepare-for-new-bit-definition-in-nested_ctl.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0060-kvm-svm-prepare-for-new-bit-definition-in-nested_ctl.patch
new file mode 100644
index 00000000..1c80a72d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0060-kvm-svm-prepare-for-new-bit-definition-in-nested_ctl.patch
@@ -0,0 +1,81 @@
+From 99e4847c8110fd6e3f654cf07ea0e71964760bed Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 4 Dec 2017 10:57:24 -0600
+Subject: [PATCH 60/95] kvm: svm: prepare for new bit definition in nested_ctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently the nested_ctl variable in the vmcb_control_area structure is
+used to indicate nested paging support. The nested paging support field
+is actually defined as bit 0 of the field. In order to support a new
+feature flag the usage of the nested_ctl and nested paging support must
+be converted to operate on a single bit.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/svm.h | 2 ++
+ arch/x86/kvm/svm.c | 7 ++++---
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index 78dd9df..c936c98 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -146,6 +146,8 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
+ #define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+ #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
++#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
++
+ struct __attribute__ ((__packed__)) vmcb_seg {
+ u16 selector;
+ u16 attrib;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index f6bebce..3c637b97 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1330,7 +1330,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+
+ if (npt_enabled) {
+ /* Setup VMCB for Nested Paging */
+- control->nested_ctl = 1;
++ control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
+ clr_intercept(svm, INTERCEPT_INVLPG);
+ clr_exception_intercept(svm, PF_VECTOR);
+ clr_cr_intercept(svm, INTERCEPT_CR3_READ);
+@@ -2960,7 +2960,8 @@ static bool nested_vmcb_checks(struct vmcb *vmcb)
+ if (vmcb->control.asid == 0)
+ return false;
+
+- if (vmcb->control.nested_ctl && !npt_enabled)
++ if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
++ !npt_enabled)
+ return false;
+
+ return true;
+@@ -3035,7 +3036,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
+ else
+ svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
+
+- if (nested_vmcb->control.nested_ctl) {
++ if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
+ kvm_mmu_unload(&svm->vcpu);
+ svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
+ nested_svm_init_mmu_context(&svm->vcpu);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0061-kvm-svm-Add-SEV-feature-definitions-to-KVM.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0061-kvm-svm-Add-SEV-feature-definitions-to-KVM.patch
new file mode 100644
index 00000000..63dc7c88
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0061-kvm-svm-Add-SEV-feature-definitions-to-KVM.patch
@@ -0,0 +1,44 @@
+From aec912bb8da476c00b624cf3d92d0aadea7c7954 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 4 Dec 2017 10:57:24 -0600
+Subject: [PATCH 61/95] kvm: svm: Add SEV feature definitions to KVM
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Define the SEV enable bit for the VMCB control structure. The hypervisor
+will use this bit to enable SEV in the guest.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/svm.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
+index c936c98..0487ac0 100644
+--- a/arch/x86/include/asm/svm.h
++++ b/arch/x86/include/asm/svm.h
+@@ -147,6 +147,7 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
+ #define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
+ #define SVM_NESTED_CTL_NP_ENABLE BIT(0)
++#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
+
+ struct __attribute__ ((__packed__)) vmcb_seg {
+ u16 selector;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0062-KVM-SVM-Prepare-to-reserve-asid-for-SEV-guest.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0062-KVM-SVM-Prepare-to-reserve-asid-for-SEV-guest.patch
new file mode 100644
index 00000000..c1501a41
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0062-KVM-SVM-Prepare-to-reserve-asid-for-SEV-guest.patch
@@ -0,0 +1,62 @@
+From 0b7dc18d9169d47409271489c9fab44495ccf63d Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:25 -0600
+Subject: [PATCH 62/95] KVM: SVM: Prepare to reserve asid for SEV guest
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Currently, ASID allocation start at 1. Add a svm_vcpu_data.min_asid
+which allows supplying a dynamic start ASID.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 3c637b97..abc6cd8 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -539,6 +539,7 @@ struct svm_cpu_data {
+ u64 asid_generation;
+ u32 max_asid;
+ u32 next_asid;
++ u32 min_asid;
+ struct kvm_ldttss_desc *tss_desc;
+
+ struct page *save_area;
+@@ -797,6 +798,7 @@ static int svm_hardware_enable(void)
+ sd->asid_generation = 1;
+ sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+ sd->next_asid = sd->max_asid + 1;
++ sd->min_asid = 1;
+
+ gdt = get_current_gdt_rw();
+ sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+@@ -2139,7 +2141,7 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
+ {
+ if (sd->next_asid > sd->max_asid) {
+ ++sd->asid_generation;
+- sd->next_asid = 1;
++ sd->next_asid = sd->min_asid;
+ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
+ }
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0063-KVM-X86-Extend-CPUID-range-to-include-new-leaf.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0063-KVM-X86-Extend-CPUID-range-to-include-new-leaf.patch
new file mode 100644
index 00000000..88ac9265
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0063-KVM-X86-Extend-CPUID-range-to-include-new-leaf.patch
@@ -0,0 +1,64 @@
+From b2bf70eb427d51d7126d7f7ce5354653dc1069a2 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:25 -0600
+Subject: [PATCH 63/95] KVM: X86: Extend CPUID range to include new leaf
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+This CPUID leaf provides the memory encryption support information on
+AMD Platform. Its complete description is available in APM volume 2,
+Section 15.34
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/cpuid.c | 2 +-
+ arch/x86/kvm/svm.c | 6 ++++++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index d1f5c74..7d87dda 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -613,7 +613,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
+ entry->edx = 0;
+ break;
+ case 0x80000000:
+- entry->eax = min(entry->eax, 0x8000001a);
++ entry->eax = min(entry->eax, 0x8000001f);
+ break;
+ case 0x80000001:
+ entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index abc6cd8..69b5ce2 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -5356,6 +5356,12 @@ static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
+ entry->edx |= SVM_FEATURE_NPT;
+
+ break;
++ case 0x8000001F:
++ /* Support memory encryption cpuid if host supports it */
++ if (boot_cpu_has(X86_FEATURE_SEV))
++ cpuid(0x8000001f, &entry->eax, &entry->ebx,
++ &entry->ecx, &entry->edx);
++
+ }
+ }
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0064-KVM-Introduce-KVM_MEMORY_ENCRYPT_OP-ioctl.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0064-KVM-Introduce-KVM_MEMORY_ENCRYPT_OP-ioctl.patch
new file mode 100644
index 00000000..7f1238cf
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0064-KVM-Introduce-KVM_MEMORY_ENCRYPT_OP-ioctl.patch
@@ -0,0 +1,111 @@
+From 13dfbee9fe761b68fbcee7546723a171892f09c1 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 22 Oct 2018 14:09:53 +0530
+Subject: [PATCH 64/95] KVM: Introduce KVM_MEMORY_ENCRYPT_OP ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 5acc5c063196b4a531a761a954023c1848ec832b
+
+If the hardware supports memory encryption then the
+KVM_MEMORY_ENCRYPT_OP ioctl can be used by qemu to issue a platform
+specific memory encryption commands.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ Documentation/virtual/kvm/api.txt | 16 ++++++++++++++++
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/x86.c | 6 ++++++
+ include/uapi/linux/kvm.h | 2 ++
+ 4 files changed, 25 insertions(+)
+ mode change 100644 => 100755 arch/x86/include/asm/kvm_host.h
+
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 5d12166..8e11bb6 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -3414,6 +3414,22 @@ invalid, if invalid pages are written to (e.g. after the end of memory)
+ or if no page table is present for the addresses (e.g. when using
+ hugepages).
+
++4.109 KVM_MEMORY_ENCRYPT_OP
++
++Capability: basic
++Architectures: x86
++Type: system
++Parameters: an opaque platform specific structure (in/out)
++Returns: 0 on success; -1 on error
++
++If the platform supports creating encrypted VMs then this ioctl can be used
++for issuing platform-specific memory encryption commands to manage those
++encrypted VMs.
++
++Currently, this ioctl is used for issuing Secure Encrypted Virtualization
++(SEV) commands on AMD Processors. The SEV commands are defined in
++Documentation/virtual/kvm/amd-memory-encryption.txt.
++
+ 5. The kvm_run structure
+ ------------------------
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+old mode 100644
+new mode 100755
+index 4015b88..7595643
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1067,6 +1067,7 @@ struct kvm_x86_ops {
+ void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
+
+ void (*setup_mce)(struct kvm_vcpu *vcpu);
++ int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
+
+ int (*get_msr_feature)(struct kvm_msr_entry *entry);
+ };
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 3856828..5243482 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4384,6 +4384,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ r = kvm_vm_ioctl_enable_cap(kvm, &cap);
+ break;
+ }
++ case KVM_MEMORY_ENCRYPT_OP: {
++ r = -ENOTTY;
++ if (kvm_x86_ops->mem_enc_op)
++ r = kvm_x86_ops->mem_enc_op(kvm, argp);
++ break;
++ }
+ default:
+ r = -ENOTTY;
+ }
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 27c62ab..409f266 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -1360,6 +1360,8 @@ struct kvm_s390_ucas_mapping {
+ /* Available with KVM_CAP_S390_CMMA_MIGRATION */
+ #define KVM_S390_GET_CMMA_BITS _IOWR(KVMIO, 0xb8, struct kvm_s390_cmma_log)
+ #define KVM_S390_SET_CMMA_BITS _IOW(KVMIO, 0xb9, struct kvm_s390_cmma_log)
++/* Memory Encryption Commands */
++#define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+
+ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+ #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0065-KVM-Introduce-KVM_MEMORY_ENCRYPT_-UN-REG_REGION.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0065-KVM-Introduce-KVM_MEMORY_ENCRYPT_-UN-REG_REGION.patch
new file mode 100644
index 00000000..df8994c8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0065-KVM-Introduce-KVM_MEMORY_ENCRYPT_-UN-REG_REGION.patch
@@ -0,0 +1,151 @@
+From 6a4347f1283b37a8367df7774fb4d8375e67cc1c Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Mon, 22 Oct 2018 14:13:40 +0530
+Subject: [PATCH 65/95] KVM: Introduce KVM_MEMORY_ENCRYPT_{UN,}REG_REGION
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 69eaedee411c1fc1cf123520897a96b7cf04d8a0
+If hardware supports memory encryption then KVM_MEMORY_ENCRYPT_REG_REGION
+and KVM_MEMORY_ENCRYPT_UNREG_REGION ioctl's can be used by userspace to
+register/unregister the guest memory regions which may contain the encrypted
+data (e.g guest RAM, PCI BAR, SMRAM etc).
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ Documentation/virtual/kvm/api.txt | 34 ++++++++++++++++++++++++++++++++++
+ arch/x86/include/asm/kvm_host.h | 2 ++
+ arch/x86/kvm/x86.c | 24 ++++++++++++++++++++++++
+ include/uapi/linux/kvm.h | 8 ++++++++
+ 4 files changed, 68 insertions(+)
+
+diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
+index 8e11bb6..6c96d44 100644
+--- a/Documentation/virtual/kvm/api.txt
++++ b/Documentation/virtual/kvm/api.txt
+@@ -3430,6 +3430,40 @@ Currently, this ioctl is used for issuing Secure Encrypted Virtualization
+ (SEV) commands on AMD Processors. The SEV commands are defined in
+ Documentation/virtual/kvm/amd-memory-encryption.txt.
+
++4.110 KVM_MEMORY_ENCRYPT_REG_REGION
++
++Capability: basic
++Architectures: x86
++Type: system
++Parameters: struct kvm_enc_region (in)
++Returns: 0 on success; -1 on error
++
++This ioctl can be used to register a guest memory region which may
++contain encrypted data (e.g. guest RAM, SMRAM etc).
++
++It is used in the SEV-enabled guest. When encryption is enabled, a guest
++memory region may contain encrypted data. The SEV memory encryption
++engine uses a tweak such that two identical plaintext pages, each at
++different locations will have differing ciphertexts. So swapping or
++moving ciphertext of those pages will not result in plaintext being
++swapped. So relocating (or migrating) physical backing pages for the SEV
++guest will require some additional steps.
++
++Note: The current SEV key management spec does not provide commands to
++swap or migrate (move) ciphertext pages. Hence, for now we pin the guest
++memory region registered with the ioctl.
++
++4.111 KVM_MEMORY_ENCRYPT_UNREG_REGION
++
++Capability: basic
++Architectures: x86
++Type: system
++Parameters: struct kvm_enc_region (in)
++Returns: 0 on success; -1 on error
++
++This ioctl can be used to unregister the guest memory region registered
++with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above.
++
+ 5. The kvm_run structure
+ ------------------------
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 7595643..430eeb3 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1068,6 +1068,8 @@ struct kvm_x86_ops {
+
+ void (*setup_mce)(struct kvm_vcpu *vcpu);
+ int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
++ int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
++ int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
+
+ int (*get_msr_feature)(struct kvm_msr_entry *entry);
+ };
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5243482..25af617 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4390,6 +4390,30 @@ long kvm_arch_vm_ioctl(struct file *filp,
+ r = kvm_x86_ops->mem_enc_op(kvm, argp);
+ break;
+ }
++ case KVM_MEMORY_ENCRYPT_REG_REGION: {
++ struct kvm_enc_region region;
++
++ r = -EFAULT;
++ if (copy_from_user(&region, argp, sizeof(region)))
++ goto out;
++
++ r = -ENOTTY;
++ if (kvm_x86_ops->mem_enc_reg_region)
++ r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
++ break;
++ }
++ case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
++ struct kvm_enc_region region;
++
++ r = -EFAULT;
++ if (copy_from_user(&region, argp, sizeof(region)))
++ goto out;
++
++ r = -ENOTTY;
++ if (kvm_x86_ops->mem_enc_unreg_region)
++ r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
++ break;
++ }
+ default:
+ r = -ENOTTY;
+ }
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 409f266..24f9ae2 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -1363,6 +1363,14 @@ struct kvm_s390_ucas_mapping {
+ /* Memory Encryption Commands */
+ #define KVM_MEMORY_ENCRYPT_OP _IOWR(KVMIO, 0xba, unsigned long)
+
++struct kvm_enc_region {
++ __u64 addr;
++ __u64 size;
++};
++
++#define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
++#define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
++
+ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+ #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
+ #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0066-KVM-X86-Add-CONFIG_KVM_AMD_SEV.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0066-KVM-X86-Add-CONFIG_KVM_AMD_SEV.patch
new file mode 100644
index 00000000..cbc2d8c8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0066-KVM-X86-Add-CONFIG_KVM_AMD_SEV.patch
@@ -0,0 +1,52 @@
+From 2ba55248ca7e3996b5c3758ae7f56b396bca7b49 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:32 -0600
+Subject: [PATCH 66/95] KVM: X86: Add CONFIG_KVM_AMD_SEV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The config option can be used to enable SEV support on AMD Processors.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/Kconfig | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
+index 3df51c2..148ea32 100644
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -81,6 +81,16 @@ config KVM_AMD
+ To compile this as a module, choose M here: the module
+ will be called kvm-amd.
+
++config KVM_AMD_SEV
++ def_bool y
++ bool "AMD Secure Encrypted Virtualization (SEV) support"
++ depends on KVM_AMD && X86_64
++ select CRYPTO_DEV_CCP
++ select CRYPTO_DEV_CCP_DD
++ select CRYPTO_DEV_SP_PSP
++ ---help---
++ Provides support for launching Encrypted VMs on AMD processors.
++
+ config KVM_MMU_AUDIT
+ bool "Audit KVM MMU"
+ depends on KVM && TRACEPOINTS
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0067-KVM-SVM-Reserve-ASID-range-for-SEV-guest.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0067-KVM-SVM-Reserve-ASID-range-for-SEV-guest.patch
new file mode 100644
index 00000000..906fdc79
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0067-KVM-SVM-Reserve-ASID-range-for-SEV-guest.patch
@@ -0,0 +1,56 @@
+From 30b685840d3edf4a1e0750c0f0f88acf0d2e1629 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:32 -0600
+Subject: [PATCH 67/95] KVM: SVM: Reserve ASID range for SEV guest
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+A SEV-enabled guest must use ASIDs from the defined subset, while non-SEV
+guests can use the remaining ASID range. The range of allowed SEV guest
+ASIDs is [1 - CPUID_8000_001F[ECX][31:0]].
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 69b5ce2..77914701 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -333,6 +333,8 @@ enum {
+
+ #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
++static unsigned int max_sev_asid;
++
+ static inline void mark_all_dirty(struct vmcb *vmcb)
+ {
+ vmcb->control.clean = 0;
+@@ -798,7 +800,7 @@ static int svm_hardware_enable(void)
+ sd->asid_generation = 1;
+ sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
+ sd->next_asid = sd->max_asid + 1;
+- sd->min_asid = 1;
++ sd->min_asid = max_sev_asid + 1;
+
+ gdt = get_current_gdt_rw();
+ sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0068-KVM-SVM-Add-sev-module_param.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0068-KVM-SVM-Add-sev-module_param.patch
new file mode 100644
index 00000000..ad28d312
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0068-KVM-SVM-Add-sev-module_param.patch
@@ -0,0 +1,112 @@
+From c4db443adf12808cf037cfe61c08dedc360aacee Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:33 -0600
+Subject: [PATCH 68/95] KVM: SVM: Add sev module_param
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The module parameter can be used to control the SEV feature support.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 77914701..df8e1e3 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -37,6 +37,7 @@
+ #include <linux/amd-iommu.h>
+ #include <linux/hashtable.h>
+ #include <linux/frame.h>
++#include <linux/psp-sev.h>
+
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
+@@ -298,6 +299,10 @@ module_param(vls, int, 0444);
+ static int vgif = true;
+ module_param(vgif, int, 0444);
+
++/* enable/disable SEV support */
++static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
++module_param(sev, int, 0444);
++
+ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+ static void svm_complete_interrupts(struct vcpu_svm *svm);
+@@ -1086,6 +1091,39 @@ static int avic_ga_log_notifier(u32 ga_tag)
+ return 0;
+ }
+
++static __init int sev_hardware_setup(void)
++{
++ struct sev_user_data_status *status;
++ int rc;
++
++ /* Maximum number of encrypted guests supported simultaneously */
++ max_sev_asid = cpuid_ecx(0x8000001F);
++
++ if (!max_sev_asid)
++ return 1;
++
++ status = kmalloc(sizeof(*status), GFP_KERNEL);
++ if (!status)
++ return 1;
++
++ /*
++ * Check SEV platform status.
++ *
++ * PLATFORM_STATUS can be called in any state, if we failed to query
++ * the PLATFORM status then either PSP firmware does not support SEV
++ * feature or SEV firmware is dead.
++ */
++ rc = sev_platform_status(status, NULL);
++ if (rc)
++ goto err;
++
++ pr_info("SEV supported\n");
++
++err:
++ kfree(status);
++ return rc;
++}
++
+ static __init int svm_hardware_setup(void)
+ {
+ int cpu;
+@@ -1121,6 +1159,17 @@ static __init int svm_hardware_setup(void)
+ kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
+ }
+
++ if (sev) {
++ if (boot_cpu_has(X86_FEATURE_SEV) &&
++ IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
++ r = sev_hardware_setup();
++ if (r)
++ sev = false;
++ } else {
++ sev = false;
++ }
++ }
++
+ for_each_possible_cpu(cpu) {
+ r = svm_cpu_init(cpu);
+ if (r)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0069-KVM-Define-SEV-key-management-command-id.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0069-KVM-Define-SEV-key-management-command-id.patch
new file mode 100644
index 00000000..ce56e286
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0069-KVM-Define-SEV-key-management-command-id.patch
@@ -0,0 +1,338 @@
+From eeedcb731ae3b3ede9ba37634a0ae021e6d6c6ad Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:33 -0600
+Subject: [PATCH 69/95] KVM: Define SEV key management command id
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Define Secure Encrypted Virtualization (SEV) key management command id
+and structure. The command definition is available in SEV KM spec
+0.14 (http://support.amd.com/TechDocs/55766_SEV-KM API_Specification.pdf)
+and Documentation/virtual/kvm/amd-memory-encryption.txt.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ .../virtual/kvm/amd-memory-encryption.rst | 202 +++++++++++++++++++++
+ include/uapi/linux/kvm.h | 80 ++++++++
+ 2 files changed, 282 insertions(+)
+
+diff --git a/Documentation/virtual/kvm/amd-memory-encryption.rst b/Documentation/virtual/kvm/amd-memory-encryption.rst
+index a8ef21e..71d6d25 100644
+--- a/Documentation/virtual/kvm/amd-memory-encryption.rst
++++ b/Documentation/virtual/kvm/amd-memory-encryption.rst
+@@ -43,3 +43,205 @@ setting the SEV bit before executing VMRUN.::
+ SEV hardware uses ASIDs to associate a memory encryption key with a VM.
+ Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
+ defined in the CPUID 0x8000001f[ecx] field.
++
++SEV Key Management
++==================
++
++The SEV guest key management is handled by a separate processor called the AMD
++Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
++key management interface to perform common hypervisor activities such as
++encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
++information, see the SEV Key Management spec [api-spec]_
++
++KVM implements the following commands to support common lifecycle events of SEV
++guests, such as launching, running, snapshotting, migrating and decommissioning.
++
++1. KVM_SEV_INIT
++---------------
++
++The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
++context. In a typical workflow, this command should be the first command issued.
++
++Returns: 0 on success, -negative on error
++
++2. KVM_SEV_LAUNCH_START
++-----------------------
++
++The KVM_SEV_LAUNCH_START command is used for creating the memory encryption
++context. To create the encryption context, user must provide a guest policy,
++the owner's public Diffie-Hellman (PDH) key and session information.
++
++Parameters: struct kvm_sev_launch_start (in/out)
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_launch_start {
++ __u32 handle; /* if zero then firmware creates a new handle */
++ __u32 policy; /* guest's policy */
++
++ __u64 dh_uaddr; /* userspace address pointing to the guest owner's PDH key */
++ __u32 dh_len;
++
++ __u64 session_addr; /* userspace address which points to the guest session information */
++ __u32 session_len;
++ };
++
++On success, the 'handle' field contains a new handle and on error, a negative value.
++
++For more details, see SEV spec Section 6.2.
++
++3. KVM_SEV_LAUNCH_UPDATE_DATA
++-----------------------------
++
++The KVM_SEV_LAUNCH_UPDATE_DATA is used for encrypting a memory region. It also
++calculates a measurement of the memory contents. The measurement is a signature
++of the memory contents that can be sent to the guest owner as an attestation
++that the memory was encrypted correctly by the firmware.
++
++Parameters (in): struct kvm_sev_launch_update_data
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_launch_update {
++ __u64 uaddr; /* userspace address to be encrypted (must be 16-byte aligned) */
++ __u32 len; /* length of the data to be encrypted (must be 16-byte aligned) */
++ };
++
++For more details, see SEV spec Section 6.3.
++
++4. KVM_SEV_LAUNCH_MEASURE
++-------------------------
++
++The KVM_SEV_LAUNCH_MEASURE command is used to retrieve the measurement of the
++data encrypted by the KVM_SEV_LAUNCH_UPDATE_DATA command. The guest owner may
++wait to provide the guest with confidential information until it can verify the
++measurement. Since the guest owner knows the initial contents of the guest at
++boot, the measurement can be verified by comparing it to what the guest owner
++expects.
++
++Parameters (in): struct kvm_sev_launch_measure
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_launch_measure {
++ __u64 uaddr; /* where to copy the measurement */
++ __u32 len; /* length of measurement blob */
++ };
++
++For more details on the measurement verification flow, see SEV spec Section 6.4.
++
++5. KVM_SEV_LAUNCH_FINISH
++------------------------
++
++After completion of the launch flow, the KVM_SEV_LAUNCH_FINISH command can be
++issued to make the guest ready for the execution.
++
++Returns: 0 on success, -negative on error
++
++6. KVM_SEV_GUEST_STATUS
++-----------------------
++
++The KVM_SEV_GUEST_STATUS command is used to retrieve status information about a
++SEV-enabled guest.
++
++Parameters (out): struct kvm_sev_guest_status
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_guest_status {
++ __u32 handle; /* guest handle */
++ __u32 policy; /* guest policy */
++ __u8 state; /* guest state (see enum below) */
++ };
++
++SEV guest state:
++
++::
++
++ enum {
++ SEV_STATE_INVALID = 0;
++ SEV_STATE_LAUNCHING, /* guest is currently being launched */
++ SEV_STATE_SECRET, /* guest is being launched and ready to accept the ciphertext data */
++ SEV_STATE_RUNNING, /* guest is fully launched and running */
++ SEV_STATE_RECEIVING, /* guest is being migrated in from another SEV machine */
++ SEV_STATE_SENDING /* guest is getting migrated out to another SEV machine */
++ };
++
++7. KVM_SEV_DBG_DECRYPT
++----------------------
++
++The KVM_SEV_DEBUG_DECRYPT command can be used by the hypervisor to request the
++firmware to decrypt the data at the given memory region.
++
++Parameters (in): struct kvm_sev_dbg
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_dbg {
++ __u64 src_uaddr; /* userspace address of data to decrypt */
++ __u64 dst_uaddr; /* userspace address of destination */
++ __u32 len; /* length of memory region to decrypt */
++ };
++
++The command returns an error if the guest policy does not allow debugging.
++
++8. KVM_SEV_DBG_ENCRYPT
++----------------------
++
++The KVM_SEV_DEBUG_ENCRYPT command can be used by the hypervisor to request the
++firmware to encrypt the data at the given memory region.
++
++Parameters (in): struct kvm_sev_dbg
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_dbg {
++ __u64 src_uaddr; /* userspace address of data to encrypt */
++ __u64 dst_uaddr; /* userspace address of destination */
++ __u32 len; /* length of memory region to encrypt */
++ };
++
++The command returns an error if the guest policy does not allow debugging.
++
++9. KVM_SEV_LAUNCH_SECRET
++------------------------
++
++The KVM_SEV_LAUNCH_SECRET command can be used by the hypervisor to inject secret
++data after the measurement has been validated by the guest owner.
++
++Parameters (in): struct kvm_sev_launch_secret
++
++Returns: 0 on success, -negative on error
++
++::
++
++ struct kvm_sev_launch_secret {
++ __u64 hdr_uaddr; /* userspace address containing the packet header */
++ __u32 hdr_len;
++
++ __u64 guest_uaddr; /* the guest memory region where the secret should be injected */
++ __u32 guest_len;
++
++ __u64 trans_uaddr; /* the hypervisor memory region which contains the secret */
++ __u32 trans_len;
++ };
++
++References
++==========
++
++.. [white-paper] http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2013/12/AMD_Memory_Encryption_Whitepaper_v7-Public.pdf
++.. [api-spec] http://support.amd.com/TechDocs/55766_SEV-KM%20API_Specification.pdf
++.. [amd-apm] http://support.amd.com/TechDocs/24593.pdf (section 15.34)
++.. [kvm-forum] http://www.linux-kvm.org/images/7/74/02x08A-Thomas_Lendacky-AMDs_Virtualizatoin_Memory_Encryption_Technology.pdf
+diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
+index 24f9ae2..ef6d9da 100644
+--- a/include/uapi/linux/kvm.h
++++ b/include/uapi/linux/kvm.h
+@@ -1371,6 +1371,86 @@ struct kvm_enc_region {
+ #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region)
+ #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region)
+
++/* Secure Encrypted Virtualization command */
++enum sev_cmd_id {
++ /* Guest initialization commands */
++ KVM_SEV_INIT = 0,
++ KVM_SEV_ES_INIT,
++ /* Guest launch commands */
++ KVM_SEV_LAUNCH_START,
++ KVM_SEV_LAUNCH_UPDATE_DATA,
++ KVM_SEV_LAUNCH_UPDATE_VMSA,
++ KVM_SEV_LAUNCH_SECRET,
++ KVM_SEV_LAUNCH_MEASURE,
++ KVM_SEV_LAUNCH_FINISH,
++ /* Guest migration commands (outgoing) */
++ KVM_SEV_SEND_START,
++ KVM_SEV_SEND_UPDATE_DATA,
++ KVM_SEV_SEND_UPDATE_VMSA,
++ KVM_SEV_SEND_FINISH,
++ /* Guest migration commands (incoming) */
++ KVM_SEV_RECEIVE_START,
++ KVM_SEV_RECEIVE_UPDATE_DATA,
++ KVM_SEV_RECEIVE_UPDATE_VMSA,
++ KVM_SEV_RECEIVE_FINISH,
++ /* Guest status and debug commands */
++ KVM_SEV_GUEST_STATUS,
++ KVM_SEV_DBG_DECRYPT,
++ KVM_SEV_DBG_ENCRYPT,
++ /* Guest certificates commands */
++ KVM_SEV_CERT_EXPORT,
++
++ KVM_SEV_NR_MAX,
++};
++
++struct kvm_sev_cmd {
++ __u32 id;
++ __u64 data;
++ __u32 error;
++ __u32 sev_fd;
++};
++
++struct kvm_sev_launch_start {
++ __u32 handle;
++ __u32 policy;
++ __u64 dh_uaddr;
++ __u32 dh_len;
++ __u64 session_uaddr;
++ __u32 session_len;
++};
++
++struct kvm_sev_launch_update_data {
++ __u64 uaddr;
++ __u32 len;
++};
++
++
++struct kvm_sev_launch_secret {
++ __u64 hdr_uaddr;
++ __u32 hdr_len;
++ __u64 guest_uaddr;
++ __u32 guest_len;
++ __u64 trans_uaddr;
++ __u32 trans_len;
++};
++
++struct kvm_sev_launch_measure {
++ __u64 uaddr;
++ __u32 len;
++};
++
++struct kvm_sev_guest_status {
++ __u32 handle;
++ __u32 policy;
++ __u32 state;
++};
++
++struct kvm_sev_dbg {
++ __u64 src_uaddr;
++ __u64 dst_uaddr;
++ __u32 len;
++};
++
+ #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+ #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
+ #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0070-KVM-SVM-Add-KVM_SEV_INIT-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0070-KVM-SVM-Add-KVM_SEV_INIT-command.patch
new file mode 100644
index 00000000..8b1a0147
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0070-KVM-SVM-Add-KVM_SEV_INIT-command.patch
@@ -0,0 +1,270 @@
+From f942889123c1393e5c89196bdc4e9cdee99f3aae Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 21:40:46 +0530
+Subject: [PATCH 70/95] KVM: SVM: Add KVM_SEV_INIT command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 1654efcbc431a369397a20bf85e45870d15c8689
+The command initializes the SEV platform context and allocates a new ASID
+for this guest from the SEV ASID pool. The firmware must be initialized
+before we issue any guest launch commands to create a new memory encryption
+context.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 7 +++
+ arch/x86/kvm/svm.c | 131 +++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 137 insertions(+), 1 deletion(-)
+ mode change 100644 => 100755 arch/x86/kvm/svm.c
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 430eeb3..9cc7c30 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -752,6 +752,11 @@ enum kvm_irqchip_mode {
+ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+
++struct kvm_sev_info {
++ bool active; /* SEV enabled guest */
++ unsigned int asid; /* ASID used for this guest */
++};
++
+ struct kvm_arch {
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+@@ -839,6 +844,8 @@ struct kvm_arch {
+
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
++
++ struct kvm_sev_info sev_info;
+ };
+
+ struct kvm_vm_stat {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+old mode 100644
+new mode 100755
+index df8e1e3..c41635b
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -38,6 +38,7 @@
+ #include <linux/hashtable.h>
+ #include <linux/frame.h>
+ #include <linux/psp-sev.h>
++#include <linux/file.h>
+
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
+@@ -339,6 +340,20 @@ enum {
+ #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
+ static unsigned int max_sev_asid;
++static unsigned int min_sev_asid;
++static unsigned long *sev_asid_bitmap;
++
++static inline bool svm_sev_enabled(void)
++{
++ return max_sev_asid;
++}
++
++static inline bool sev_guest(struct kvm *kvm)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
++ return sev->active;
++}
+
+ static inline void mark_all_dirty(struct vmcb *vmcb)
+ {
+@@ -1102,6 +1117,15 @@ static __init int sev_hardware_setup(void)
+ if (!max_sev_asid)
+ return 1;
+
++ /* Minimum ASID value that should be used for SEV guest */
++ min_sev_asid = cpuid_edx(0x8000001F);
++
++ /* Initialize SEV ASID bitmap */
++ sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
++ sizeof(unsigned long), GFP_KERNEL);
++ if (!sev_asid_bitmap)
++ return 1;
++
+ status = kmalloc(sizeof(*status), GFP_KERNEL);
+ if (!status)
+ return 1;
+@@ -1231,6 +1255,9 @@ static __exit void svm_hardware_unsetup(void)
+ {
+ int cpu;
+
++ if (svm_sev_enabled())
++ kfree(sev_asid_bitmap);
++
+ for_each_possible_cpu(cpu)
+ svm_cpu_uninit(cpu);
+
+@@ -1421,6 +1448,9 @@ static void init_vmcb(struct vcpu_svm *svm)
+ svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
+ }
+
++ if (sev_guest(svm->vcpu.kvm))
++ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
++
+ mark_all_dirty(svm->vmcb);
+
+ enable_gif(svm);
+@@ -1503,6 +1533,29 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
+ return 0;
+ }
+
++static void __sev_asid_free(int asid)
++{
++ int pos;
++
++ pos = asid - 1;
++ clear_bit(pos, sev_asid_bitmap);
++}
++
++static void sev_asid_free(struct kvm *kvm)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
++ __sev_asid_free(sev->asid);
++}
++
++static void sev_vm_destroy(struct kvm *kvm)
++{
++ if (!sev_guest(kvm))
++ return;
++
++ sev_asid_free(kvm);
++}
++
+ static void avic_vm_destroy(struct kvm *kvm)
+ {
+ unsigned long flags;
+@@ -1521,6 +1574,12 @@ static void avic_vm_destroy(struct kvm *kvm)
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ }
+
++static void svm_vm_destroy(struct kvm *kvm)
++{
++ avic_vm_destroy(kvm);
++ sev_vm_destroy(kvm);
++}
++
+ static int avic_vm_init(struct kvm *kvm)
+ {
+ unsigned long flags;
+@@ -5660,6 +5719,75 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
+ vcpu->arch.mcg_cap &= 0x1ff;
+ }
+
++static int sev_asid_new(void)
++{
++ int pos;
++
++ /*
++ * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
++ */
++ pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
++ if (pos >= max_sev_asid)
++ return -EBUSY;
++
++ set_bit(pos, sev_asid_bitmap);
++ return pos + 1;
++}
++
++static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ int asid, ret;
++
++ ret = -EBUSY;
++ asid = sev_asid_new();
++ if (asid < 0)
++ return ret;
++
++ ret = sev_platform_init(&argp->error);
++ if (ret)
++ goto e_free;
++
++ sev->active = true;
++ sev->asid = asid;
++
++ return 0;
++
++e_free:
++ __sev_asid_free(asid);
++ return ret;
++}
++
++static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
++{
++ struct kvm_sev_cmd sev_cmd;
++ int r;
++
++ if (!svm_sev_enabled())
++ return -ENOTTY;
++
++ if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
++ return -EFAULT;
++
++ mutex_lock(&kvm->lock);
++
++ switch (sev_cmd.id) {
++ case KVM_SEV_INIT:
++ r = sev_guest_init(kvm, &sev_cmd);
++ break;
++ default:
++ r = -EINVAL;
++ goto out;
++ }
++
++ if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
++ r = -EFAULT;
++
++out:
++ mutex_unlock(&kvm->lock);
++ return r;
++}
++
+ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+@@ -5676,7 +5804,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .vcpu_reset = svm_vcpu_reset,
+
+ .vm_init = avic_vm_init,
+- .vm_destroy = avic_vm_destroy,
++ .vm_destroy = svm_vm_destroy,
+
+ .prepare_guest_switch = svm_prepare_guest_switch,
+ .vcpu_load = svm_vcpu_load,
+@@ -5771,6 +5899,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
++ .mem_enc_op = svm_mem_enc_op,
+ };
+
+ static int __init svm_init(void)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0071-KVM-SVM-VMRUN-should-use-associated-ASID-when-SEV-is.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0071-KVM-SVM-VMRUN-should-use-associated-ASID-when-SEV-is.patch
new file mode 100644
index 00000000..5cf128d8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0071-KVM-SVM-VMRUN-should-use-associated-ASID-when-SEV-is.patch
@@ -0,0 +1,162 @@
+From 6d4852ea9d190fbb0607d7b067662b30dac7983e Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:13:30 +0530
+Subject: [PATCH 71/95] KVM: SVM: VMRUN should use associated ASID when SEV is
+ enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+ commit 70cd94e60c733e3afc18b0e6aab789c13b5571da
+
+ SEV hardware uses ASIDs to associate a memory encryption key with a
+ guest VM. During guest creation, a SEV VM uses the SEV_CMD_ACTIVATE
+ command to bind a particular ASID to the guest. Lets make sure that the
+ VMCB is programmed with the bound ASID before a VMRUN.
+
+ Cc: Thomas Gleixner <tglx@linutronix.de>
+ Cc: Ingo Molnar <mingo@redhat.com>
+ Cc: "H. Peter Anvin" <hpa@zytor.com>
+ Cc: Paolo Bonzini <pbonzini@redhat.com>
+ Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+ Cc: Joerg Roedel <joro@8bytes.org>
+ Cc: Borislav Petkov <bp@suse.de>
+ Cc: Tom Lendacky <thomas.lendacky@amd.com>
+ Cc: x86@kernel.org
+ Cc: kvm@vger.kernel.org
+ Cc: linux-kernel@vger.kernel.org
+ Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+ Reviewed-by: Borislav Petkov <bp@suse.de>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 56 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index c41635b..f4fc0b2 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -225,6 +225,9 @@ struct vcpu_svm {
+ */
+ struct list_head ir_list;
+ spinlock_t ir_list_lock;
++
++ /* which host CPU was used for running this vcpu */
++ unsigned int last_cpu;
+ };
+
+ /*
+@@ -355,6 +358,13 @@ static inline bool sev_guest(struct kvm *kvm)
+ return sev->active;
+ }
+
++static inline int sev_get_asid(struct kvm *kvm)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
++ return sev->asid;
++}
++
+ static inline void mark_all_dirty(struct vmcb *vmcb)
+ {
+ vmcb->control.clean = 0;
+@@ -566,6 +576,9 @@ struct svm_cpu_data {
+
+ struct page *save_area;
+ struct vmcb *current_vmcb;
++
++ /* index = sev_asid, value = vmcb pointer */
++ struct vmcb **sev_vmcbs;
+ };
+
+ static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
+@@ -879,6 +892,7 @@ static void svm_cpu_uninit(int cpu)
+ return;
+
+ per_cpu(svm_data, raw_smp_processor_id()) = NULL;
++ kfree(sd->sev_vmcbs);
+ __free_page(sd->save_area);
+ kfree(sd);
+ }
+@@ -892,11 +906,18 @@ static int svm_cpu_init(int cpu)
+ if (!sd)
+ return -ENOMEM;
+ sd->cpu = cpu;
+- sd->save_area = alloc_page(GFP_KERNEL);
+ r = -ENOMEM;
++ sd->save_area = alloc_page(GFP_KERNEL);
+ if (!sd->save_area)
+ goto err_1;
+
++ if (svm_sev_enabled()) {
++ r = -ENOMEM;
++ sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
++ if (!sd->sev_vmcbs)
++ goto err_1;
++ }
++
+ per_cpu(svm_data, cpu) = sd;
+
+ return 0;
+@@ -1535,10 +1556,16 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
+
+ static void __sev_asid_free(int asid)
+ {
+- int pos;
++ struct svm_cpu_data *sd;
++ int cpu, pos;
+
+ pos = asid - 1;
+ clear_bit(pos, sev_asid_bitmap);
++
++ for_each_possible_cpu(cpu) {
++ sd = per_cpu(svm_data, cpu);
++ sd->sev_vmcbs[pos] = NULL;
++ }
+ }
+
+ static void sev_asid_free(struct kvm *kvm)
+@@ -4603,12 +4630,39 @@ static void reload_tss(struct kvm_vcpu *vcpu)
+ load_TR_desc();
+ }
+
++static void pre_sev_run(struct vcpu_svm *svm, int cpu)
++{
++ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
++ int asid = sev_get_asid(svm->vcpu.kvm);
++
++ /* Assign the asid allocated with this SEV guest */
++ svm->vmcb->control.asid = asid;
++
++ /*
++ * Flush guest TLB:
++ *
++ * 1) when different VMCB for the same ASID is to be run on the same host CPU.
++ * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
++ */
++ if (sd->sev_vmcbs[asid] == svm->vmcb &&
++ svm->last_cpu == cpu)
++ return;
++
++ svm->last_cpu = cpu;
++ sd->sev_vmcbs[asid] = svm->vmcb;
++ svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
++ mark_dirty(svm->vmcb, VMCB_ASID);
++}
++
+ static void pre_svm_run(struct vcpu_svm *svm)
+ {
+ int cpu = raw_smp_processor_id();
+
+ struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
+
++ if (sev_guest(svm->vcpu.kvm))
++ return pre_sev_run(svm, cpu);
++
+ /* FIXME: handle wraparound of asid_generation */
+ if (svm->asid_generation != sd->asid_generation)
+ new_asid(svm, sd);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0072-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_START-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0072-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_START-command.patch
new file mode 100644
index 00000000..1e7c7735
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0072-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_START-command.patch
@@ -0,0 +1,248 @@
+From fbb9ae1da5aef85415efc29ae3a90d3662cb937b Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:14:32 +0530
+Subject: [PATCH 72/95] KVM: SVM: Add support for KVM_SEV_LAUNCH_START command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+ commit 59414c989220825f970f38dbcbf11f18e817d73c
+
+ The KVM_SEV_LAUNCH_START command is used to create a memory encryption
+ context within the SEV firmware. In order to do so, the guest owner
+ should provide the guest's policy, its public Diffie-Hellman (PDH) key
+ and session information. The command implements the LAUNCH_START flow
+ defined in SEV spec Section 6.2.
+
+ Cc: Thomas Gleixner <tglx@linutronix.de>
+ Cc: Ingo Molnar <mingo@redhat.com>
+ Cc: "H. Peter Anvin" <hpa@zytor.com>
+ Cc: Paolo Bonzini <pbonzini@redhat.com>
+ Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+ Cc: Joerg Roedel <joro@8bytes.org>
+ Cc: Borislav Petkov <bp@suse.de>
+ Cc: Tom Lendacky <thomas.lendacky@amd.com>
+ Cc: x86@kernel.org
+ Cc: kvm@vger.kernel.org
+ Cc: linux-kernel@vger.kernel.org
+ Improvements-by: Borislav Petkov <bp@suse.de>
+ Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+ Reviewed-by: Borislav Petkov <bp@suse.de>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 2 +
+ arch/x86/kvm/svm.c | 157 +++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 158 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 9cc7c30..03ba288 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -755,6 +755,8 @@ enum kvm_irqchip_mode {
+ struct kvm_sev_info {
+ bool active; /* SEV enabled guest */
+ unsigned int asid; /* ASID used for this guest */
++ unsigned int handle; /* SEV firmware handle */
++ int fd; /* SEV device fd */
+ };
+
+ struct kvm_arch {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index f4fc0b2..2f6aa95 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1469,8 +1469,10 @@ static void init_vmcb(struct vcpu_svm *svm)
+ svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
+ }
+
+- if (sev_guest(svm->vcpu.kvm))
++ if (sev_guest(svm->vcpu.kvm)) {
+ svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
++ clr_exception_intercept(svm, UD_VECTOR);
++ }
+
+ mark_all_dirty(svm->vmcb);
+
+@@ -1575,11 +1577,45 @@ static void sev_asid_free(struct kvm *kvm)
+ __sev_asid_free(sev->asid);
+ }
+
++static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
++{
++ struct sev_data_decommission *decommission;
++ struct sev_data_deactivate *data;
++
++ if (!handle)
++ return;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return;
++
++ /* deactivate handle */
++ data->handle = handle;
++ sev_guest_deactivate(data, NULL);
++
++ wbinvd_on_all_cpus();
++ sev_guest_df_flush(NULL);
++ kfree(data);
++
++ decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
++ if (!decommission)
++ return;
++
++ /* decommission handle */
++ decommission->handle = handle;
++ sev_guest_decommission(decommission, NULL);
++
++ kfree(decommission);
++}
++
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
+ if (!sev_guest(kvm))
+ return;
+
++ sev_unbind_asid(kvm, sev->handle);
+ sev_asid_free(kvm);
+ }
+
+@@ -5812,6 +5848,122 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
++{
++ struct sev_data_activate *data;
++ int asid = sev_get_asid(kvm);
++ int ret;
++
++ wbinvd_on_all_cpus();
++
++ ret = sev_guest_df_flush(error);
++ if (ret)
++ return ret;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ /* activate ASID on the given handle */
++ data->handle = handle;
++ data->asid = asid;
++ ret = sev_guest_activate(data, error);
++ kfree(data);
++
++ return ret;
++}
++
++static int sev_issue_cmd(int fd, int id, void *data, int *error)
++{
++ struct fd f;
++ int ret;
++
++ f = fdget(fd);
++ if (!f.file)
++ return -EBADF;
++
++ ret = sev_issue_cmd_external_user(f.file, id, data, error);
++
++ fdput(f);
++ return ret;
++}
++
++static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct sev_data_launch_start *start;
++ struct kvm_sev_launch_start params;
++ void *dh_blob, *session_blob;
++ int *error = &argp->error;
++ int ret;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
++ return -EFAULT;
++
++ start = kzalloc(sizeof(*start), GFP_KERNEL);
++ if (!start)
++ return -ENOMEM;
++
++ dh_blob = NULL;
++ if (params.dh_uaddr) {
++ dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
++ if (IS_ERR(dh_blob)) {
++ ret = PTR_ERR(dh_blob);
++ goto e_free;
++ }
++
++ start->dh_cert_address = __sme_set(__pa(dh_blob));
++ start->dh_cert_len = params.dh_len;
++ }
++
++ session_blob = NULL;
++ if (params.session_uaddr) {
++ session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
++ if (IS_ERR(session_blob)) {
++ ret = PTR_ERR(session_blob);
++ goto e_free_dh;
++ }
++
++ start->session_address = __sme_set(__pa(session_blob));
++ start->session_len = params.session_len;
++ }
++
++ start->handle = params.handle;
++ start->policy = params.policy;
++
++ /* create memory encryption context */
++ ret = sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
++ if (ret)
++ goto e_free_session;
++
++ /* Bind ASID to this guest */
++ ret = sev_bind_asid(kvm, start->handle, error);
++ if (ret)
++ goto e_free_session;
++
++ /* return handle to userspace */
++ params.handle = start->handle;
++ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
++ sev_unbind_asid(kvm, start->handle);
++ ret = -EFAULT;
++ goto e_free_session;
++ }
++
++ sev->handle = start->handle;
++ sev->fd = argp->sev_fd;
++
++e_free_session:
++ kfree(session_blob);
++e_free_dh:
++ kfree(dh_blob);
++e_free:
++ kfree(start);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -5829,6 +5981,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_INIT:
+ r = sev_guest_init(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_LAUNCH_START:
++ r = sev_launch_start(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0073-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_UPDATE_DATA-c.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0073-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_UPDATE_DATA-c.patch
new file mode 100644
index 00000000..e9bbfd97
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0073-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_UPDATE_DATA-c.patch
@@ -0,0 +1,298 @@
+From ff28163f1bd733eaa7f189136c21d87c60499f66 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:35 -0600
+Subject: [PATCH 73/95] KVM: SVM: Add support for KVM_SEV_LAUNCH_UPDATE_DATA
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used for encrypting the guest memory region using the VM
+encryption key (VEK) created during KVM_SEV_LAUNCH_START.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Improvements-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/svm.c | 191 +++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 190 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 03ba288..8493c469c 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -757,6 +757,7 @@ struct kvm_sev_info {
+ unsigned int asid; /* ASID used for this guest */
+ unsigned int handle; /* SEV firmware handle */
+ int fd; /* SEV device fd */
++ unsigned long pages_locked; /* Number of pages locked */
+ };
+
+ struct kvm_arch {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2f6aa95..6e9ddde 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -39,6 +39,8 @@
+ #include <linux/frame.h>
+ #include <linux/psp-sev.h>
+ #include <linux/file.h>
++#include <linux/pagemap.h>
++#include <linux/swap.h>
+
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
+@@ -345,6 +347,7 @@ enum {
+ static unsigned int max_sev_asid;
+ static unsigned int min_sev_asid;
+ static unsigned long *sev_asid_bitmap;
++#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
+ static inline bool svm_sev_enabled(void)
+ {
+@@ -1608,6 +1611,83 @@ static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
+ kfree(decommission);
+ }
+
++static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
++ unsigned long ulen, unsigned long *n,
++ int write)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ unsigned long npages, npinned, size;
++ unsigned long locked, lock_limit;
++ struct page **pages;
++ int first, last;
++
++ /* Calculate number of pages. */
++ first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
++ last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
++ npages = (last - first + 1);
++
++ locked = sev->pages_locked + npages;
++ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
++ if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
++ pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
++ return NULL;
++ }
++
++ /* Avoid using vmalloc for smaller buffers. */
++ size = npages * sizeof(struct page *);
++ if (size > PAGE_SIZE)
++ pages = vmalloc(size);
++ else
++ pages = kmalloc(size, GFP_KERNEL);
++
++ if (!pages)
++ return NULL;
++
++ /* Pin the user virtual address. */
++ npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
++ if (npinned != npages) {
++ pr_err("SEV: Failure locking %lu pages.\n", npages);
++ goto err;
++ }
++
++ *n = npages;
++ sev->pages_locked = locked;
++
++ return pages;
++
++err:
++ if (npinned > 0)
++ release_pages(pages, npinned);
++
++ kvfree(pages);
++ return NULL;
++}
++
++static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
++ unsigned long npages)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
++ release_pages(pages, npages);
++ kvfree(pages);
++ sev->pages_locked -= npages;
++}
++
++static void sev_clflush_pages(struct page *pages[], unsigned long npages)
++{
++ uint8_t *page_virtual;
++ unsigned long i;
++
++ if (npages == 0 || pages == NULL)
++ return;
++
++ for (i = 0; i < npages; i++) {
++ page_virtual = kmap_atomic(pages[i]);
++ clflush_cache_range(page_virtual, PAGE_SIZE);
++ kunmap_atomic(page_virtual);
++ }
++}
++
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+@@ -5873,7 +5953,7 @@ static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
+ return ret;
+ }
+
+-static int sev_issue_cmd(int fd, int id, void *data, int *error)
++static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+ {
+ struct fd f;
+ int ret;
+@@ -5888,6 +5968,13 @@ static int sev_issue_cmd(int fd, int id, void *data, int *error)
+ return ret;
+ }
+
++static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++
++ return __sev_issue_cmd(sev->fd, id, data, error);
++}
++
+ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+@@ -5935,7 +6022,7 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ start->policy = params.policy;
+
+ /* create memory encryption context */
+- ret = sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
++ ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
+ if (ret)
+ goto e_free_session;
+
+@@ -5964,6 +6051,103 @@ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int get_num_contig_pages(int idx, struct page **inpages,
++ unsigned long npages)
++{
++ unsigned long paddr, next_paddr;
++ int i = idx + 1, pages = 1;
++
++ /* find the number of contiguous pages starting from idx */
++ paddr = __sme_page_pa(inpages[idx]);
++ while (i < npages) {
++ next_paddr = __sme_page_pa(inpages[i++]);
++ if ((paddr + PAGE_SIZE) == next_paddr) {
++ pages++;
++ paddr = next_paddr;
++ continue;
++ }
++ break;
++ }
++
++ return pages;
++}
++
++static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_launch_update_data params;
++ struct sev_data_launch_update_data *data;
++ struct page **inpages;
++ int i, ret, pages;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
++ return -EFAULT;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ vaddr = params.uaddr;
++ size = params.len;
++ vaddr_end = vaddr + size;
++
++ /* Lock the user memory. */
++ inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
++ if (!inpages) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ /*
++ * The LAUNCH_UPDATE command will perform in-place encryption of the
++ * memory content (i.e it will write the same memory region with C=1).
++ * It's possible that the cache may contain the data with C=0, i.e.,
++ * unencrypted so invalidate it first.
++ */
++ sev_clflush_pages(inpages, npages);
++
++ for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
++ int offset, len;
++
++ /*
++ * If the user buffer is not page-aligned, calculate the offset
++ * within the page.
++ */
++ offset = vaddr & (PAGE_SIZE - 1);
++
++ /* Calculate the number of pages that can be encrypted in one go. */
++ pages = get_num_contig_pages(i, inpages, npages);
++
++ len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
++
++ data->handle = sev->handle;
++ data->len = len;
++ data->address = __sme_page_pa(inpages[i]) + offset;
++ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
++ if (ret)
++ goto e_unpin;
++
++ size -= len;
++ next_vaddr = vaddr + len;
++ }
++
++e_unpin:
++ /* content of memory is updated, mark pages dirty */
++ for (i = 0; i < npages; i++) {
++ set_page_dirty_lock(inpages[i]);
++ mark_page_accessed(inpages[i]);
++ }
++ /* unlock the user pages */
++ sev_unpin_memory(kvm, inpages, npages);
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -5984,6 +6168,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_LAUNCH_START:
+ r = sev_launch_start(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_LAUNCH_UPDATE_DATA:
++ r = sev_launch_update_data(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0074-SEV-error-too-few-arguments-to-function-release_page.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0074-SEV-error-too-few-arguments-to-function-release_page.patch
new file mode 100644
index 00000000..3f1787b1
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0074-SEV-error-too-few-arguments-to-function-release_page.patch
@@ -0,0 +1,39 @@
+From 8e2188a2d2a2d01b1f5e7ad9052e8fc91f7080a7 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:35:18 +0530
+Subject: [PATCH 74/95] =?UTF-8?q?SEV=20error:=20too=20few=20arguments=20to?=
+ =?UTF-8?q?=20function=20=E2=80=98release=5Fpages=E2=80=99=20is=20fixed?=
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 6e9ddde..bdc08e2 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1657,7 +1657,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+
+ err:
+ if (npinned > 0)
+- release_pages(pages, npinned);
++ release_pages(pages, npinned, 0);
+
+ kvfree(pages);
+ return NULL;
+@@ -1668,7 +1668,7 @@ static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+
+- release_pages(pages, npages);
++ release_pages(pages, npages, 0);
+ kvfree(pages);
+ sev->pages_locked -= npages;
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0075-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_MEASURE-comma.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0075-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_MEASURE-comma.patch
new file mode 100644
index 00000000..2a9ccbd7
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0075-KVM-SVM-Add-support-for-KVM_SEV_LAUNCH_MEASURE-comma.patch
@@ -0,0 +1,125 @@
+From ff79313c254df3bc3ca79168e30a75d15e39439a Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:36 -0600
+Subject: [PATCH 75/95] KVM: SVM: Add support for KVM_SEV_LAUNCH_MEASURE
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used to retrieve the measurement of contents encrypted
+through the KVM_SEV_LAUNCH_UPDATE_DATA command.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 74 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index bdc08e2..86e255b 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6148,6 +6148,77 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct sev_data_launch_measure *data;
++ struct kvm_sev_launch_measure params;
++ void *blob = NULL;
++ int ret;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
++ return -EFAULT;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ /* User wants to query the blob length */
++ if (!params.len)
++ goto cmd;
++
++ if (params.uaddr) {
++ if (params.len > SEV_FW_BLOB_MAX_SIZE) {
++ ret = -EINVAL;
++ goto e_free;
++ }
++
++ if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
++ ret = -EFAULT;
++ goto e_free;
++ }
++
++ ret = -ENOMEM;
++ blob = kmalloc(params.len, GFP_KERNEL);
++ if (!blob)
++ goto e_free;
++
++ data->address = __psp_pa(blob);
++ data->len = params.len;
++ }
++
++cmd:
++ data->handle = sev->handle;
++ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
++
++ /*
++ * If we query the session length, FW responded with expected data.
++ */
++ if (!params.len)
++ goto done;
++
++ if (ret)
++ goto e_free_blob;
++
++ if (blob) {
++ if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
++ ret = -EFAULT;
++ }
++
++done:
++ params.len = data->len;
++ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
++ ret = -EFAULT;
++e_free_blob:
++ kfree(blob);
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -6171,6 +6242,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_LAUNCH_UPDATE_DATA:
+ r = sev_launch_update_data(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_LAUNCH_MEASURE:
++ r = sev_launch_measure(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0076-KVM-SVM-Add-support-for-SEV-LAUNCH_FINISH-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0076-KVM-SVM-Add-support-for-SEV-LAUNCH_FINISH-command.patch
new file mode 100644
index 00000000..e1aeebcc
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0076-KVM-SVM-Add-support-for-SEV-LAUNCH_FINISH-command.patch
@@ -0,0 +1,72 @@
+From 523e467ebe09e8f940644928873a251e2fcc97bc Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:36 -0600
+Subject: [PATCH 76/95] KVM: SVM: Add support for SEV LAUNCH_FINISH command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used for finializing the SEV guest launch process.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 86e255b..a597efc 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6219,6 +6219,26 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct sev_data_launch_finish *data;
++ int ret;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->handle = sev->handle;
++ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
++
++ kfree(data);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -6245,6 +6265,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_LAUNCH_MEASURE:
+ r = sev_launch_measure(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_LAUNCH_FINISH:
++ r = sev_launch_finish(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0077-KVM-SVM-Add-support-for-SEV-GUEST_STATUS-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0077-KVM-SVM-Add-support-for-SEV-GUEST_STATUS-command.patch
new file mode 100644
index 00000000..740567b2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0077-KVM-SVM-Add-support-for-SEV-GUEST_STATUS-command.patch
@@ -0,0 +1,82 @@
+From 9211cc7af177176a5e4f9779098eef8e063152fa Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:37 -0600
+Subject: [PATCH 77/95] KVM: SVM: Add support for SEV GUEST_STATUS command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used for querying the SEV guest information.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 33 +++++++++++++++++++++++++++++++++
+ 1 file changed, 33 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index a597efc..f9eb8fa 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6239,6 +6239,36 @@ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_guest_status params;
++ struct sev_data_guest_status *data;
++ int ret;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->handle = sev->handle;
++ ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
++ if (ret)
++ goto e_free;
++
++ params.policy = data->policy;
++ params.state = data->state;
++ params.handle = data->handle;
++
++ if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
++ ret = -EFAULT;
++e_free:
++ kfree(data);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -6268,6 +6298,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_LAUNCH_FINISH:
+ r = sev_launch_finish(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_GUEST_STATUS:
++ r = sev_guest_status(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0078-KVM-SVM-Add-support-for-SEV-DEBUG_DECRYPT-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0078-KVM-SVM-Add-support-for-SEV-DEBUG_DECRYPT-command.patch
new file mode 100644
index 00000000..5f7419db
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0078-KVM-SVM-Add-support-for-SEV-DEBUG_DECRYPT-command.patch
@@ -0,0 +1,202 @@
+From b087a322574fc94f760c1150b344191996543733 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:37 -0600
+Subject: [PATCH 78/95] KVM: SVM: Add support for SEV DEBUG_DECRYPT command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used for decrypting a guest memory region for debug
+purposes.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 152 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 152 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index f9eb8fa..9c5962a 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6269,6 +6269,155 @@ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ return ret;
+ }
+
++static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
++ unsigned long dst, int size,
++ int *error, bool enc)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct sev_data_dbg *data;
++ int ret;
++
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ return -ENOMEM;
++
++ data->handle = sev->handle;
++ data->dst_addr = dst;
++ data->src_addr = src;
++ data->len = size;
++
++ ret = sev_issue_cmd(kvm,
++ enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
++ data, error);
++ kfree(data);
++ return ret;
++}
++
++static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
++ unsigned long dst_paddr, int sz, int *err)
++{
++ int offset;
++
++ /*
++ * Its safe to read more than we are asked, caller should ensure that
++ * destination has enough space.
++ */
++ src_paddr = round_down(src_paddr, 16);
++ offset = src_paddr & 15;
++ sz = round_up(sz + offset, 16);
++
++ return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
++}
++
++static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
++ unsigned long __user dst_uaddr,
++ unsigned long dst_paddr,
++ int size, int *err)
++{
++ struct page *tpage = NULL;
++ int ret, offset;
++
++ /* if inputs are not 16-byte then use intermediate buffer */
++ if (!IS_ALIGNED(dst_paddr, 16) ||
++ !IS_ALIGNED(paddr, 16) ||
++ !IS_ALIGNED(size, 16)) {
++ tpage = (void *)alloc_page(GFP_KERNEL);
++ if (!tpage)
++ return -ENOMEM;
++
++ dst_paddr = __sme_page_pa(tpage);
++ }
++
++ ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
++ if (ret)
++ goto e_free;
++
++ if (tpage) {
++ offset = paddr & 15;
++ if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
++ page_address(tpage) + offset, size))
++ ret = -EFAULT;
++ }
++
++e_free:
++ if (tpage)
++ __free_page(tpage);
++
++ return ret;
++}
++
++static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
++{
++ unsigned long vaddr, vaddr_end, next_vaddr;
++ unsigned long dst_vaddr, dst_vaddr_end;
++ struct page **src_p, **dst_p;
++ struct kvm_sev_dbg debug;
++ unsigned long n;
++ int ret, size;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
++ return -EFAULT;
++
++ vaddr = debug.src_uaddr;
++ size = debug.len;
++ vaddr_end = vaddr + size;
++ dst_vaddr = debug.dst_uaddr;
++ dst_vaddr_end = dst_vaddr + size;
++
++ for (; vaddr < vaddr_end; vaddr = next_vaddr) {
++ int len, s_off, d_off;
++
++ /* lock userspace source and destination page */
++ src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
++ if (!src_p)
++ return -EFAULT;
++
++ dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
++ if (!dst_p) {
++ sev_unpin_memory(kvm, src_p, n);
++ return -EFAULT;
++ }
++
++ /*
++ * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
++ * memory content (i.e it will write the same memory region with C=1).
++ * It's possible that the cache may contain the data with C=0, i.e.,
++ * unencrypted so invalidate it first.
++ */
++ sev_clflush_pages(src_p, 1);
++ sev_clflush_pages(dst_p, 1);
++
++ /*
++ * Since user buffer may not be page aligned, calculate the
++ * offset within the page.
++ */
++ s_off = vaddr & ~PAGE_MASK;
++ d_off = dst_vaddr & ~PAGE_MASK;
++ len = min_t(size_t, (PAGE_SIZE - s_off), size);
++
++ ret = __sev_dbg_decrypt_user(kvm,
++ __sme_page_pa(src_p[0]) + s_off,
++ dst_vaddr,
++ __sme_page_pa(dst_p[0]) + d_off,
++ len, &argp->error);
++
++ sev_unpin_memory(kvm, src_p, 1);
++ sev_unpin_memory(kvm, dst_p, 1);
++
++ if (ret)
++ goto err;
++
++ next_vaddr = vaddr + len;
++ dst_vaddr = dst_vaddr + len;
++ size -= len;
++ }
++err:
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -6301,6 +6450,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_GUEST_STATUS:
+ r = sev_guest_status(kvm, &sev_cmd);
+ break;
++ case KVM_SEV_DBG_DECRYPT:
++ r = sev_dbg_crypt(kvm, &sev_cmd, true);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0079-KVM-SVM-Add-support-for-SEV-DEBUG_ENCRYPT-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0079-KVM-SVM-Add-support-for-SEV-DEBUG_ENCRYPT-command.patch
new file mode 100644
index 00000000..6a73e408
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0079-KVM-SVM-Add-support-for-SEV-DEBUG_ENCRYPT-command.patch
@@ -0,0 +1,155 @@
+From 1c2d47539406957fffda1c33e8ab4108bcda2e69 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:37 -0600
+Subject: [PATCH 79/95] KVM: SVM: Add support for SEV DEBUG_ENCRYPT command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command copies a plaintext into guest memory and encrypts it using
+the VM encryption key. The command will be used for debug purposes
+(e.g setting breakpoints through gdbserver)
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 98 +++++++++++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 93 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 9c5962a..94461e3 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6346,6 +6346,83 @@ static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
+ return ret;
+ }
+
++static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
++ unsigned long __user vaddr,
++ unsigned long dst_paddr,
++ unsigned long __user dst_vaddr,
++ int size, int *error)
++{
++ struct page *src_tpage = NULL;
++ struct page *dst_tpage = NULL;
++ int ret, len = size;
++
++ /* If source buffer is not aligned then use an intermediate buffer */
++ if (!IS_ALIGNED(vaddr, 16)) {
++ src_tpage = alloc_page(GFP_KERNEL);
++ if (!src_tpage)
++ return -ENOMEM;
++
++ if (copy_from_user(page_address(src_tpage),
++ (void __user *)(uintptr_t)vaddr, size)) {
++ __free_page(src_tpage);
++ return -EFAULT;
++ }
++
++ paddr = __sme_page_pa(src_tpage);
++ }
++
++ /*
++ * If destination buffer or length is not aligned then do read-modify-write:
++ * - decrypt destination in an intermediate buffer
++ * - copy the source buffer in an intermediate buffer
++ * - use the intermediate buffer as source buffer
++ */
++ if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
++ int dst_offset;
++
++ dst_tpage = alloc_page(GFP_KERNEL);
++ if (!dst_tpage) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ ret = __sev_dbg_decrypt(kvm, dst_paddr,
++ __sme_page_pa(dst_tpage), size, error);
++ if (ret)
++ goto e_free;
++
++ /*
++ * If source is kernel buffer then use memcpy() otherwise
++ * copy_from_user().
++ */
++ dst_offset = dst_paddr & 15;
++
++ if (src_tpage)
++ memcpy(page_address(dst_tpage) + dst_offset,
++ page_address(src_tpage), size);
++ else {
++ if (copy_from_user(page_address(dst_tpage) + dst_offset,
++ (void __user *)(uintptr_t)vaddr, size)) {
++ ret = -EFAULT;
++ goto e_free;
++ }
++ }
++
++ paddr = __sme_page_pa(dst_tpage);
++ dst_paddr = round_down(dst_paddr, 16);
++ len = round_up(size, 16);
++ }
++
++ ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
++
++e_free:
++ if (src_tpage)
++ __free_page(src_tpage);
++ if (dst_tpage)
++ __free_page(dst_tpage);
++ return ret;
++}
++
+ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ {
+ unsigned long vaddr, vaddr_end, next_vaddr;
+@@ -6398,11 +6475,19 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ d_off = dst_vaddr & ~PAGE_MASK;
+ len = min_t(size_t, (PAGE_SIZE - s_off), size);
+
+- ret = __sev_dbg_decrypt_user(kvm,
+- __sme_page_pa(src_p[0]) + s_off,
+- dst_vaddr,
+- __sme_page_pa(dst_p[0]) + d_off,
+- len, &argp->error);
++ if (dec)
++ ret = __sev_dbg_decrypt_user(kvm,
++ __sme_page_pa(src_p[0]) + s_off,
++ dst_vaddr,
++ __sme_page_pa(dst_p[0]) + d_off,
++ len, &argp->error);
++ else
++ ret = __sev_dbg_encrypt_user(kvm,
++ __sme_page_pa(src_p[0]) + s_off,
++ vaddr,
++ __sme_page_pa(dst_p[0]) + d_off,
++ dst_vaddr,
++ len, &argp->error);
+
+ sev_unpin_memory(kvm, src_p, 1);
+ sev_unpin_memory(kvm, dst_p, 1);
+@@ -6453,6 +6538,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_DBG_DECRYPT:
+ r = sev_dbg_crypt(kvm, &sev_cmd, true);
+ break;
++ case KVM_SEV_DBG_ENCRYPT:
++ r = sev_dbg_crypt(kvm, &sev_cmd, false);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0080-KVM-SVM-Add-support-for-SEV-LAUNCH_SECRET-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0080-KVM-SVM-Add-support-for-SEV-LAUNCH_SECRET-command.patch
new file mode 100644
index 00000000..fa17dc72
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0080-KVM-SVM-Add-support-for-SEV-LAUNCH_SECRET-command.patch
@@ -0,0 +1,117 @@
+From df27d409eaaf9275190905f8714ca2c1ae2c19b5 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:38 -0600
+Subject: [PATCH 80/95] KVM: SVM: Add support for SEV LAUNCH_SECRET command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The command is used for injecting a secret into the guest memory region.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 68 insertions(+)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 94461e3..83a4dfe 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6503,6 +6503,71 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+ return ret;
+ }
+
++static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct sev_data_launch_secret *data;
++ struct kvm_sev_launch_secret params;
++ struct page **pages;
++ void *blob, *hdr;
++ unsigned long n;
++ int ret;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
++ return -EFAULT;
++
++ pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
++ if (!pages)
++ return -ENOMEM;
++
++ /*
++ * The secret must be copied into contiguous memory region, lets verify
++ * that userspace memory pages are contiguous before we issue command.
++ */
++ if (get_num_contig_pages(0, pages, n) != n) {
++ ret = -EINVAL;
++ goto e_unpin_memory;
++ }
++
++ ret = -ENOMEM;
++ data = kzalloc(sizeof(*data), GFP_KERNEL);
++ if (!data)
++ goto e_unpin_memory;
++
++ blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
++ if (IS_ERR(blob)) {
++ ret = PTR_ERR(blob);
++ goto e_free;
++ }
++
++ data->trans_address = __psp_pa(blob);
++ data->trans_len = params.trans_len;
++
++ hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
++ if (IS_ERR(hdr)) {
++ ret = PTR_ERR(hdr);
++ goto e_free_blob;
++ }
++ data->trans_address = __psp_pa(blob);
++ data->trans_len = params.trans_len;
++
++ data->handle = sev->handle;
++ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
++
++ kfree(hdr);
++
++e_free_blob:
++ kfree(blob);
++e_free:
++ kfree(data);
++e_unpin_memory:
++ sev_unpin_memory(kvm, pages, n);
++ return ret;
++}
++
+ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ {
+ struct kvm_sev_cmd sev_cmd;
+@@ -6541,6 +6606,9 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ case KVM_SEV_DBG_ENCRYPT:
+ r = sev_dbg_crypt(kvm, &sev_cmd, false);
+ break;
++ case KVM_SEV_LAUNCH_SECRET:
++ r = sev_launch_secret(kvm, &sev_cmd);
++ break;
+ default:
+ r = -EINVAL;
+ goto out;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch
new file mode 100644
index 00000000..0f0cdf18
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0081-KVM-SVM-Pin-guest-memory-when-SEV-is-active.patch
@@ -0,0 +1,231 @@
+From c8d6a6308c0d23867fa09ab68bf79dc06f4e92f5 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:39:29 +0530
+Subject: [PATCH 81/95] KVM: SVM: Pin guest memory when SEV is active
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 1e80fdc09d121d8327cdf62eefbb5abadddca792
+
+The SEV memory encryption engine uses a tweak such that two identical
+plaintext pages at different location will have different ciphertext.
+So swapping or moving ciphertext of two pages will not result in
+plaintext being swapped. Relocating (or migrating) physical backing
+pages for a SEV guest will require some additional steps. The current SEV
+key management spec does not provide commands to swap or migrate (move)
+ciphertext pages. For now, we pin the guest memory registered through
+KVM_MEMORY_ENCRYPT_REG_REGION ioctl.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/svm.c | 132 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 133 insertions(+)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8493c469c..13894c0 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -758,6 +758,7 @@ struct kvm_sev_info {
+ unsigned int handle; /* SEV firmware handle */
+ int fd; /* SEV device fd */
+ unsigned long pages_locked; /* Number of pages locked */
++ struct list_head regions_list; /* List of registered regions */
+ };
+
+ struct kvm_arch {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 83a4dfe..2dcfd9f 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -349,6 +349,14 @@ static unsigned int min_sev_asid;
+ static unsigned long *sev_asid_bitmap;
+ #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
+
++struct enc_region {
++ struct list_head list;
++ unsigned long npages;
++ struct page **pages;
++ unsigned long uaddr;
++ unsigned long size;
++};
++
+ static inline bool svm_sev_enabled(void)
+ {
+ return max_sev_asid;
+@@ -1688,13 +1696,46 @@ static void sev_clflush_pages(struct page *pages[], unsigned long npages)
+ }
+ }
+
++static void __unregister_enc_region_locked(struct kvm *kvm,
++ struct enc_region *region)
++{
++ /*
++ * The guest may change the memory encryption attribute from C=0 -> C=1
++ * or vice versa for this memory range. Lets make sure caches are
++ * flushed to ensure that guest data gets written into memory with
++ * correct C-bit.
++ */
++ sev_clflush_pages(region->pages, region->npages);
++
++ sev_unpin_memory(kvm, region->pages, region->npages);
++ list_del(&region->list);
++ kfree(region);
++}
++
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct list_head *head = &sev->regions_list;
++ struct list_head *pos, *q;
+
+ if (!sev_guest(kvm))
+ return;
+
++ mutex_lock(&kvm->lock);
++
++ /*
++ * if userspace was terminated before unregistering the memory regions
++ * then lets unpin all the registered memory.
++ */
++ if (!list_empty(head)) {
++ list_for_each_safe(pos, q, head) {
++ __unregister_enc_region_locked(kvm,
++ list_entry(pos, struct enc_region, list));
++ }
++ }
++
++ mutex_unlock(&kvm->lock);
++
+ sev_unbind_asid(kvm, sev->handle);
+ sev_asid_free(kvm);
+ }
+@@ -5920,6 +5961,7 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ sev->active = true;
+ sev->asid = asid;
++ INIT_LIST_HEAD(&sev->regions_list);
+
+ return 0;
+
+@@ -6622,6 +6664,94 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ return r;
+ }
+
++static int svm_register_enc_region(struct kvm *kvm,
++ struct kvm_enc_region *range)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct enc_region *region;
++ int ret = 0;
++
++ if (!sev_guest(kvm))
++ return -ENOTTY;
++
++ region = kzalloc(sizeof(*region), GFP_KERNEL);
++ if (!region)
++ return -ENOMEM;
++
++ region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
++ if (!region->pages) {
++ ret = -ENOMEM;
++ goto e_free;
++ }
++
++ /*
++ * The guest may change the memory encryption attribute from C=0 -> C=1
++ * or vice versa for this memory range. Lets make sure caches are
++ * flushed to ensure that guest data gets written into memory with
++ * correct C-bit.
++ */
++ sev_clflush_pages(region->pages, region->npages);
++
++ region->uaddr = range->addr;
++ region->size = range->size;
++
++ mutex_lock(&kvm->lock);
++ list_add_tail(&region->list, &sev->regions_list);
++ mutex_unlock(&kvm->lock);
++
++ return ret;
++
++e_free:
++ kfree(region);
++ return ret;
++}
++
++static struct enc_region *
++find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
++{
++ struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct list_head *head = &sev->regions_list;
++ struct enc_region *i;
++
++ list_for_each_entry(i, head, list) {
++ if (i->uaddr == range->addr &&
++ i->size == range->size)
++ return i;
++ }
++
++ return NULL;
++}
++
++
++static int svm_unregister_enc_region(struct kvm *kvm,
++ struct kvm_enc_region *range)
++{
++ struct enc_region *region;
++ int ret;
++
++ mutex_lock(&kvm->lock);
++
++ if (!sev_guest(kvm)) {
++ ret = -ENOTTY;
++ goto failed;
++ }
++
++ region = find_enc_region(kvm, range);
++ if (!region) {
++ ret = -EINVAL;
++ goto failed;
++ }
++
++ __unregister_enc_region_locked(kvm, region);
++
++ mutex_unlock(&kvm->lock);
++ return 0;
++
++failed:
++ mutex_unlock(&kvm->lock);
++ return ret;
++}
++
+ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .cpu_has_kvm_support = has_svm,
+ .disabled_by_bios = is_disabled,
+@@ -6734,6 +6864,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
+ .mem_enc_op = svm_mem_enc_op,
++ .mem_enc_reg_region = svm_register_enc_region,
++ .mem_enc_unreg_region = svm_unregister_enc_region,
+ };
+
+ static int __init svm_init(void)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0082-KVM-SVM-limit-kvm_handle_page_fault-to-PF-handling.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0082-KVM-SVM-limit-kvm_handle_page_fault-to-PF-handling.patch
new file mode 100644
index 00000000..f3775839
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0082-KVM-SVM-limit-kvm_handle_page_fault-to-PF-handling.patch
@@ -0,0 +1,111 @@
+From 656ae55f24476e152600b6109dbdb517f883dc77 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 22:48:00 +0530
+Subject: [PATCH 82/95] KVM: SVM: limit kvm_handle_page_fault to #PF handling
+
+ commit d0006530576f1c7a49b2010eac7afdcb5a3613ae
+
+ It has always annoyed me a bit how SVM_EXIT_NPF is handled by
+ pf_interception. This is also the only reason behind the
+ under-documented need_unprotect argument to kvm_handle_page_fault.
+ Let NPF go straight to kvm_mmu_page_fault, just like VMX
+ does in handle_ept_violation and handle_ept_misconfig.
+
+ Reviewed-by: Brijesh Singh <brijesh.singh@amd.com>
+ Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/mmu.c | 5 ++---
+ arch/x86/kvm/mmu.h | 3 +--
+ arch/x86/kvm/svm.c | 15 +++++++++++++--
+ arch/x86/kvm/vmx.c | 3 +--
+ 4 files changed, 17 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 1dfb808..1408d79 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -3853,8 +3853,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+ }
+
+ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+- u64 fault_address, char *insn, int insn_len,
+- bool need_unprotect)
++ u64 fault_address, char *insn, int insn_len)
+ {
+ int r = 1;
+
+@@ -3863,7 +3862,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+ default:
+ trace_kvm_page_fault(fault_address, error_code);
+
+- if (need_unprotect && kvm_event_needs_reinjection(vcpu))
++ if (kvm_event_needs_reinjection(vcpu))
+ kvm_mmu_unprotect_page_virt(vcpu, fault_address);
+ r = kvm_mmu_page_fault(vcpu, fault_address, error_code, insn,
+ insn_len);
+diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
+index efc8576..5b408c0 100644
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -66,8 +66,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
+ bool accessed_dirty);
+ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
+ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
+- u64 fault_address, char *insn, int insn_len,
+- bool need_unprotect);
++ u64 fault_address, char *insn, int insn_len);
+
+ static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
+ {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 2dcfd9f..364531e 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2488,7 +2488,18 @@ static int pf_interception(struct vcpu_svm *svm)
+
+ return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+ svm->vmcb->control.insn_bytes,
+- svm->vmcb->control.insn_len, !npt_enabled);
++ svm->vmcb->control.insn_len);
++}
++
++static int npf_interception(struct vcpu_svm *svm)
++{
++ u64 fault_address = svm->vmcb->control.exit_info_2;
++ u64 error_code = svm->vmcb->control.exit_info_1;
++
++ trace_kvm_page_fault(fault_address, error_code);
++ return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
++ svm->vmcb->control.insn_bytes,
++ svm->vmcb->control.insn_len);
+ }
+
+ static int db_interception(struct vcpu_svm *svm)
+@@ -4586,7 +4597,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+ [SVM_EXIT_MONITOR] = monitor_interception,
+ [SVM_EXIT_MWAIT] = mwait_interception,
+ [SVM_EXIT_XSETBV] = xsetbv_interception,
+- [SVM_EXIT_NPF] = pf_interception,
++ [SVM_EXIT_NPF] = npf_interception,
+ [SVM_EXIT_RSM] = emulate_on_interception,
+ [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
+ [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+index fd46d89..815ff09 100644
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -6344,8 +6344,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
+ cr2 = vmcs_readl(EXIT_QUALIFICATION);
+ /* EPT won't cause page fault directly */
+ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+- return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0,
+- true);
++ return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
+ }
+
+ ex_no = intr_info & INTR_INFO_VECTOR_MASK;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0083-KVM-SVM-Clear-C-bit-from-the-page-fault-address.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0083-KVM-SVM-Clear-C-bit-from-the-page-fault-address.patch
new file mode 100644
index 00000000..39c4234e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0083-KVM-SVM-Clear-C-bit-from-the-page-fault-address.patch
@@ -0,0 +1,54 @@
+From 02447b560c6a421194c98f9528c61b563925f9c0 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:39 -0600
+Subject: [PATCH 83/95] KVM: SVM: Clear C-bit from the page fault address
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+When SEV is active, on #VMEXIT the page fault address will contain the
+C-bit. We must clear the C-bit before handling the fault.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 364531e..331b127 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2483,7 +2483,7 @@ static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
+
+ static int pf_interception(struct vcpu_svm *svm)
+ {
+- u64 fault_address = svm->vmcb->control.exit_info_2;
++ u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+ u64 error_code = svm->vmcb->control.exit_info_1;
+
+ return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+@@ -2493,7 +2493,7 @@ static int pf_interception(struct vcpu_svm *svm)
+
+ static int npf_interception(struct vcpu_svm *svm)
+ {
+- u64 fault_address = svm->vmcb->control.exit_info_2;
++ u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
+ u64 error_code = svm->vmcb->control.exit_info_1;
+
+ trace_kvm_page_fault(fault_address, error_code);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0084-KVM-X86-Restart-the-guest-when-insn_len-is-zero-and-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0084-KVM-X86-Restart-the-guest-when-insn_len-is-zero-and-.patch
new file mode 100644
index 00000000..984f10e2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0084-KVM-X86-Restart-the-guest-when-insn_len-is-zero-and-.patch
@@ -0,0 +1,88 @@
+From cc62d922be657a89f3c8afbfd97f5cfc37a5a036 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 4 Dec 2017 10:57:40 -0600
+Subject: [PATCH 84/95] KVM: X86: Restart the guest when insn_len is zero and
+ SEV is enabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+On AMD platforms, under certain conditions insn_len may be zero on #NPF.
+This can happen if a guest gets a page-fault on data access but the HW
+table walker is not able to read the instruction page (e.g instruction
+page is not present in memory).
+
+Typically, when insn_len is zero, x86_emulate_instruction() walks the
+guest page table and fetches the instruction bytes from guest memory.
+When SEV is enabled, the guest memory is encrypted with guest-specific
+key hence hypervisor will not able to fetch the instruction bytes.
+In those cases we simply restart the guest.
+
+I have encountered this issue when running kernbench inside the guest.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: "Radim Krčmář" <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: x86@kernel.org
+Cc: kvm@vger.kernel.org
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/mmu.c | 10 ++++++++++
+ arch/x86/kvm/svm.c | 6 ++++--
+ 2 files changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
+index 1408d79..fcab730 100644
+--- a/arch/x86/kvm/mmu.c
++++ b/arch/x86/kvm/mmu.c
+@@ -4987,6 +4987,16 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code,
+ if (mmio_info_in_cache(vcpu, cr2, direct))
+ emulation_type = 0;
+ emulate:
++ /*
++ * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
++ * This can happen if a guest gets a page-fault on data access but the HW
++ * table walker is not able to read the instruction page (e.g instruction
++ * page is not present in memory). In those cases we simply restart the
++ * guest.
++ */
++ if (unlikely(insn && !insn_len))
++ return 1;
++
+ er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
+
+ switch (er) {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 331b127..399ad5e 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -2487,7 +2487,8 @@ static int pf_interception(struct vcpu_svm *svm)
+ u64 error_code = svm->vmcb->control.exit_info_1;
+
+ return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
+- svm->vmcb->control.insn_bytes,
++ static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
++ svm->vmcb->control.insn_bytes : NULL,
+ svm->vmcb->control.insn_len);
+ }
+
+@@ -2498,7 +2499,8 @@ static int npf_interception(struct vcpu_svm *svm)
+
+ trace_kvm_page_fault(fault_address, error_code);
+ return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
+- svm->vmcb->control.insn_bytes,
++ static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
++ svm->vmcb->control.insn_bytes : NULL,
+ svm->vmcb->control.insn_len);
+ }
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0085-x86-mm-Unbreak-modules-that-use-the-DMA-API.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0085-x86-mm-Unbreak-modules-that-use-the-DMA-API.patch
new file mode 100644
index 00000000..f6d1f9ff
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0085-x86-mm-Unbreak-modules-that-use-the-DMA-API.patch
@@ -0,0 +1,56 @@
+From dcd7c1da7989f960deccc1b1a0e503de1070c324 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 15 Dec 2017 10:20:12 -0600
+Subject: [PATCH 85/95] x86/mm: Unbreak modules that use the DMA API
+
+Commit d8aa7eea78a1 ("x86/mm: Add Secure Encrypted Virtualization (SEV)
+support") changed sme_active() from an inline function that referenced
+sme_me_mask to a non-inlined function in order to make the sev_enabled
+variable a static variable. This function was marked EXPORT_SYMBOL_GPL
+because at the time the patch was submitted, sme_me_mask was marked
+EXPORT_SYMBOL_GPL.
+
+Commit 87df26175e67 ("x86/mm: Unbreak modules that rely on external
+PAGE_KERNEL availability") changed sme_me_mask variable from
+EXPORT_SYMBOL_GPL to EXPORT_SYMBOL, allowing external modules the ability
+to build with CONFIG_AMD_MEM_ENCRYPT=y. Now, however, with sev_active()
+no longer an inline function and marked as EXPORT_SYMBOL_GPL, external
+modules that use the DMA API are once again broken in 4.15. Since the DMA
+API is meant to be used by external modules, this needs to be changed.
+
+Change the sme_active() and sev_active() functions from EXPORT_SYMBOL_GPL
+to EXPORT_SYMBOL.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Link: https://lkml.kernel.org/r/20171215162011.14125.7113.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/mem_encrypt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
+index 6d59032..5b023f2 100755
+--- a/arch/x86/mm/mem_encrypt.c
++++ b/arch/x86/mm/mem_encrypt.c
+@@ -406,13 +406,13 @@ bool sme_active(void)
+ {
+ return sme_me_mask && !sev_enabled;
+ }
+-EXPORT_SYMBOL_GPL(sme_active);
++EXPORT_SYMBOL(sme_active);
+
+ bool sev_active(void)
+ {
+ return sme_me_mask && sev_enabled;
+ }
+-EXPORT_SYMBOL_GPL(sev_active);
++EXPORT_SYMBOL(sev_active);
+
+ static const struct dma_map_ops sev_dma_ops = {
+ .alloc = sev_alloc,
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0086-x86-mm-Encrypt-the-initrd-earlier-for-BSP-microcode-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0086-x86-mm-Encrypt-the-initrd-earlier-for-BSP-microcode-.patch
new file mode 100644
index 00000000..4217bf56
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0086-x86-mm-Encrypt-the-initrd-earlier-for-BSP-microcode-.patch
@@ -0,0 +1,54 @@
+From 853fcc15e4523f42f229d49844c11f5ad1faaa8b Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 14 Aug 2018 23:20:36 +0530
+Subject: [PATCH 86/95] x86/mm: Encrypt the initrd earlier for BSP microcode
+ update
+
+ From 107cd2532181b96c549e8f224cdcca8631c3076b
+
+Currently the BSP microcode update code examines the initrd very early
+in the boot process. If SME is active, the initrd is treated as being
+encrypted but it has not been encrypted (in place) yet. Update the
+early boot code that encrypts the kernel to also encrypt the initrd so
+that early BSP microcode updates work.
+
+Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/20180110192634.6026.10452.stgit@tlendack-t1.amdoffice.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kernel/setup.c | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index aa23f8c..dcb00ac 100755
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -376,16 +376,6 @@ static void __init reserve_initrd(void)
+ !ramdisk_image || !ramdisk_size)
+ return; /* No initrd provided by bootloader */
+
+- /*
+- * If SME is active, this memory will be marked encrypted by the
+- * kernel when it is accessed (including relocation). However, the
+- * ramdisk image was loaded decrypted by the bootloader, so make
+- * sure that it is encrypted before accessing it. For SEV the
+- * ramdisk will already be encrypted, so only do this for SME.
+- */
+- if (sme_active())
+- sme_early_encrypt(ramdisk_image, ramdisk_end - ramdisk_image);
+-
+ initrd_start = 0;
+
+ mapped_size = memblock_mem_size(max_pfn_mapped);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0087-KVM-x86-prefer-depends-on-to-select-for-SEV.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0087-KVM-x86-prefer-depends-on-to-select-for-SEV.patch
new file mode 100644
index 00000000..f1f75416
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0087-KVM-x86-prefer-depends-on-to-select-for-SEV.patch
@@ -0,0 +1,36 @@
+From e4a578de658c458c98efe088f7bb763923d6de88 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 11 Jan 2018 13:10:38 +0100
+Subject: [PATCH 87/95] KVM: x86: prefer "depends on" to "select" for SEV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Avoid reverse dependencies. Instead, SEV will only be enabled if
+the PSP driver is available.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/Kconfig | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
+index 148ea32..92fd433 100644
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -85,9 +85,7 @@ config KVM_AMD_SEV
+ def_bool y
+ bool "AMD Secure Encrypted Virtualization (SEV) support"
+ depends on KVM_AMD && X86_64
+- select CRYPTO_DEV_CCP
+- select CRYPTO_DEV_CCP_DD
+- select CRYPTO_DEV_SP_PSP
++ depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+ ---help---
+ Provides support for launching Encrypted VMs on AMD processors.
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0088-KVM-SVM-no-need-to-call-access_ok-in-LAUNCH_MEASURE-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0088-KVM-SVM-no-need-to-call-access_ok-in-LAUNCH_MEASURE-.patch
new file mode 100644
index 00000000..5d0f49bf
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0088-KVM-SVM-no-need-to-call-access_ok-in-LAUNCH_MEASURE-.patch
@@ -0,0 +1,92 @@
+From a63bec5a4d29dba22ee32a0f5af779e17000e415 Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Fri, 23 Feb 2018 12:36:50 -0600
+Subject: [PATCH 88/95] KVM: SVM: no need to call access_ok() in LAUNCH_MEASURE
+ command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Using the access_ok() to validate the input before issuing the SEV
+command does not buy us anything in this case. If userland is
+giving us a garbage pointer then copy_to_user() will catch it when we try
+to return the measurement.
+
+Suggested-by: Al Viro <viro@ZenIV.linux.org.uk>
+Fixes: 0d0736f76347 (KVM: SVM: Add support for KVM_SEV_LAUNCH_MEASURE ...)
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-kernel@vger.kernel.org
+Cc: Joerg Roedel <joro@8bytes.org>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 399ad5e..8e60fbc 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6205,16 +6205,18 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
++ void __user *measure = (void __user *)(uintptr_t)argp->data;
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+ struct sev_data_launch_measure *data;
+ struct kvm_sev_launch_measure params;
++ void __user *p = NULL;
+ void *blob = NULL;
+ int ret;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+
+- if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
++ if (copy_from_user(&params, measure, sizeof(params)))
+ return -EFAULT;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+@@ -6225,17 +6227,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ if (!params.len)
+ goto cmd;
+
+- if (params.uaddr) {
++ p = (void __user *)(uintptr_t)params.uaddr;
++ if (p) {
+ if (params.len > SEV_FW_BLOB_MAX_SIZE) {
+ ret = -EINVAL;
+ goto e_free;
+ }
+
+- if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) {
+- ret = -EFAULT;
+- goto e_free;
+- }
+-
+ ret = -ENOMEM;
+ blob = kmalloc(params.len, GFP_KERNEL);
+ if (!blob)
+@@ -6259,13 +6257,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ goto e_free_blob;
+
+ if (blob) {
+- if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len))
++ if (copy_to_user(p, blob, params.len))
+ ret = -EFAULT;
+ }
+
+ done:
+ params.len = data->len;
+- if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
++ if (copy_to_user(measure, &params, sizeof(params)))
+ ret = -EFAULT;
+ e_free_blob:
+ kfree(blob);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0089-KVM-SVM-install-RSM-intercept.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0089-KVM-SVM-install-RSM-intercept.patch
new file mode 100644
index 00000000..f23de0d2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0089-KVM-SVM-install-RSM-intercept.patch
@@ -0,0 +1,78 @@
+From 2cd3e8ceaff90b03db00b7326ba63df8c64c7db8 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 15 Aug 2018 08:53:49 +0530
+Subject: [PATCH 89/95] KVM: SVM: install RSM intercept
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From 7607b7174405aec7441ff6c970833c463114040a
+
+RSM instruction is used by the SMM handler to return from SMM mode.
+Currently, rsm causes a #UD - which results in instruction fetch, decode,
+and emulate. By installing the RSM intercept we can avoid the instruction
+fetch since we know that #VMEXIT was due to rsm.
+
+The patch is required for the SEV guest, because in case of SEV guest
+memory is encrypted with guest-specific key and hypervisor will not
+able to fetch the instruction bytes from the guest memory.
+
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 8e60fbc..6352a6c 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -309,6 +309,8 @@ module_param(vgif, int, 0444);
+ static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
+ module_param(sev, int, 0444);
+
++static u8 rsm_ins_bytes[] = "\x0f\xaa";
++
+ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+ static void svm_flush_tlb(struct kvm_vcpu *vcpu);
+ static void svm_complete_interrupts(struct vcpu_svm *svm);
+@@ -1395,6 +1397,7 @@ static void init_vmcb(struct vcpu_svm *svm)
+ set_intercept(svm, INTERCEPT_SKINIT);
+ set_intercept(svm, INTERCEPT_WBINVD);
+ set_intercept(svm, INTERCEPT_XSETBV);
++ set_intercept(svm, INTERCEPT_RSM);
+
+ if (!kvm_mwait_in_guest()) {
+ set_intercept(svm, INTERCEPT_MONITOR);
+@@ -3707,6 +3710,12 @@ static int emulate_on_interception(struct vcpu_svm *svm)
+ return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
+ }
+
++static int rsm_interception(struct vcpu_svm *svm)
++{
++ return x86_emulate_instruction(&svm->vcpu, 0, 0,
++ rsm_ins_bytes, 2) == EMULATE_DONE;
++}
++
+ static int rdpmc_interception(struct vcpu_svm *svm)
+ {
+ int err;
+@@ -4600,7 +4609,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
+ [SVM_EXIT_MWAIT] = mwait_interception,
+ [SVM_EXIT_XSETBV] = xsetbv_interception,
+ [SVM_EXIT_NPF] = npf_interception,
+- [SVM_EXIT_RSM] = emulate_on_interception,
++ [SVM_EXIT_RSM] = rsm_interception,
+ [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
+ [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
+ };
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0090-KVM-SVM-Fix-SEV-LAUNCH_SECRET-command.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0090-KVM-SVM-Fix-SEV-LAUNCH_SECRET-command.patch
new file mode 100644
index 00000000..1aec08a2
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0090-KVM-SVM-Fix-SEV-LAUNCH_SECRET-command.patch
@@ -0,0 +1,64 @@
+From 0c47ce82da47eebc2e05744bcbebcc4976ea6ada Mon Sep 17 00:00:00 2001
+From: Brijesh Singh <brijesh.singh@amd.com>
+Date: Mon, 19 Feb 2018 10:13:25 -0600
+Subject: [PATCH 90/95] KVM: SVM: Fix SEV LAUNCH_SECRET command
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The SEV LAUNCH_SECRET command fails with error code 'invalid param'
+because we missed filling the guest and header system physical address
+while issuing the command.
+
+Fixes: 9f5b5b950aa9 (KVM: SVM: Add support for SEV LAUNCH_SECRET command)
+Cc: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: linux-kernel@vger.kernel.org
+Cc: Joerg Roedel <joro@8bytes.org>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/kvm/svm.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 6352a6c..e7815a3 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -6573,7 +6573,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ struct page **pages;
+ void *blob, *hdr;
+ unsigned long n;
+- int ret;
++ int ret, offset;
+
+ if (!sev_guest(kvm))
+ return -ENOTTY;
+@@ -6599,6 +6599,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ if (!data)
+ goto e_unpin_memory;
+
++ offset = params.guest_uaddr & (PAGE_SIZE - 1);
++ data->guest_address = __sme_page_pa(pages[0]) + offset;
++ data->guest_len = params.guest_len;
++
+ blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
+ if (IS_ERR(blob)) {
+ ret = PTR_ERR(blob);
+@@ -6613,8 +6617,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ ret = PTR_ERR(hdr);
+ goto e_free_blob;
+ }
+- data->trans_address = __psp_pa(blob);
+- data->trans_len = params.trans_len;
++ data->hdr_address = __psp_pa(hdr);
++ data->hdr_len = params.hdr_len;
+
+ data->handle = sev->handle;
+ ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0091-KVM-x86-define-SVM-VMX-specific-kvm_arch_-alloc-free.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0091-KVM-x86-define-SVM-VMX-specific-kvm_arch_-alloc-free.patch
new file mode 100644
index 00000000..7573bb34
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0091-KVM-x86-define-SVM-VMX-specific-kvm_arch_-alloc-free.patch
@@ -0,0 +1,123 @@
+From a91cb665ad19c0f1e7adc797137a40b2b784298f Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 23 Oct 2018 12:18:07 +0530
+Subject: [PATCH 91/95] KVM: x86: define SVM/VMX specific
+ kvm_arch_[alloc|free]_vm
+
+From 434a1e94469d3b603f1efabfb044182de4cf88ef
+
+Define kvm_arch_[alloc|free]_vm in x86 as pass through functions
+to new kvm_x86_ops vm_alloc and vm_free, and move the current
+allocation logic as-is to SVM and VMX. Vendor specific alloc/free
+functions set the stage for SVM/VMX wrappers of 'struct kvm',
+which will allow us to move the growing number of SVM/VMX specific
+member variables out of 'struct kvm_arch'.
+
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 13 +++++++++++++
+ arch/x86/kvm/svm.c | 12 ++++++++++++
+ arch/x86/kvm/vmx.c | 13 +++++++++++++
+ 3 files changed, 38 insertions(+)
+ mode change 100644 => 100755 arch/x86/kvm/vmx.c
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 13894c0..0b2bcd2 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -928,6 +928,8 @@ struct kvm_x86_ops {
+ bool (*has_emulated_msr)(int index);
+ void (*cpuid_update)(struct kvm_vcpu *vcpu);
+
++ struct kvm *(*vm_alloc)(void);
++ void (*vm_free)(struct kvm *);
+ int (*vm_init)(struct kvm *kvm);
+ void (*vm_destroy)(struct kvm *kvm);
+
+@@ -1094,6 +1096,17 @@ struct kvm_arch_async_pf {
+
+ extern struct kvm_x86_ops *kvm_x86_ops;
+
++#define __KVM_HAVE_ARCH_VM_ALLOC
++static inline struct kvm *kvm_arch_alloc_vm(void)
++{
++ return kvm_x86_ops->vm_alloc();
++}
++
++static inline void kvm_arch_free_vm(struct kvm *kvm)
++{
++ return kvm_x86_ops->vm_free(kvm);
++}
++
+ int kvm_mmu_module_init(void);
+ void kvm_mmu_module_exit(void);
+
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index e7815a3..e6c4353 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -1715,6 +1715,16 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
+ kfree(region);
+ }
+
++static struct kvm *svm_vm_alloc(void)
++{
++ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
++}
++
++static void svm_vm_free(struct kvm *kvm)
++{
++ kfree(kvm);
++}
++
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+ struct kvm_sev_info *sev = &kvm->arch.sev_info;
+@@ -6791,6 +6801,8 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+ .vcpu_free = svm_free_vcpu,
+ .vcpu_reset = svm_vcpu_reset,
+
++ .vm_alloc = svm_vm_alloc,
++ .vm_free = svm_vm_free,
+ .vm_init = avic_vm_init,
+ .vm_destroy = svm_vm_destroy,
+
+diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
+old mode 100644
+new mode 100755
+index 815ff09..26dc4f4
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -9942,6 +9942,16 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
+ }
+ STACK_FRAME_NON_STANDARD(vmx_vcpu_run);
+
++static struct kvm *vmx_vm_alloc(void)
++{
++ return kzalloc(sizeof(struct kvm), GFP_KERNEL);
++}
++
++static void vmx_vm_free(struct kvm *kvm)
++{
++ kfree(kvm);
++}
++
+ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
+ {
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+@@ -12517,6 +12527,9 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .hardware_disable = hardware_disable,
+ .cpu_has_accelerated_tpr = report_flexpriority,
+ .has_emulated_msr = vmx_has_emulated_msr,
++ .vm_init = vmx_vm_init,
++ .vm_alloc = vmx_vm_alloc,
++ .vm_free = vmx_vm_free,
+
+ .vm_init = vmx_vm_init,
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch
new file mode 100644
index 00000000..ca2a6d03
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0092-KVM-SVM-add-struct-kvm_svm-to-hold-SVM-specific-KVM-.patch
@@ -0,0 +1,467 @@
+From f9db61de70bc09e29e9fe6da88ad5becd37c8aa4 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 15 Aug 2018 12:47:42 +0530
+Subject: [PATCH 92/95] KVM: SVM: add struct kvm_svm to hold SVM specific KVM
+ vars
+
+From 81811c162d4da1ececef14a1efc9602e86d29ef5
+
+Add struct kvm_svm, which is analagous to struct vcpu_svm, along with
+a helper to_kvm_svm() to retrieve kvm_svm from a struct kvm *. Move
+the SVM specific variables and struct definitions out of kvm_arch
+and into kvm_svm.
+
+Cc: Tom Lendacky <thomas.lendacky@amd.com>
+Cc: Brijesh Singh <brijesh.singh@amd.com>
+Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/include/asm/kvm_host.h | 18 ------
+ arch/x86/kvm/svm.c | 134 +++++++++++++++++++++++-----------------
+ 2 files changed, 79 insertions(+), 73 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 0b2bcd2..ed9b0da 100755
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -752,15 +752,6 @@ enum kvm_irqchip_mode {
+ KVM_IRQCHIP_SPLIT, /* created with KVM_CAP_SPLIT_IRQCHIP */
+ };
+
+-struct kvm_sev_info {
+- bool active; /* SEV enabled guest */
+- unsigned int asid; /* ASID used for this guest */
+- unsigned int handle; /* SEV firmware handle */
+- int fd; /* SEV device fd */
+- unsigned long pages_locked; /* Number of pages locked */
+- struct list_head regions_list; /* List of registered regions */
+-};
+-
+ struct kvm_arch {
+ unsigned int n_used_mmu_pages;
+ unsigned int n_requested_mmu_pages;
+@@ -839,17 +830,8 @@ struct kvm_arch {
+
+ bool disabled_lapic_found;
+
+- /* Struct members for AVIC */
+- u32 avic_vm_id;
+- u32 ldr_mode;
+- struct page *avic_logical_id_table_page;
+- struct page *avic_physical_id_table_page;
+- struct hlist_node hnode;
+-
+ bool x2apic_format;
+ bool x2apic_broadcast_quirk_disabled;
+-
+- struct kvm_sev_info sev_info;
+ };
+
+ struct kvm_vm_stat {
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index e6c4353..9264eed 100755
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -132,6 +132,28 @@ static const u32 host_save_user_msrs[] = {
+
+ #define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+
++struct kvm_sev_info {
++ bool active; /* SEV enabled guest */
++ unsigned int asid; /* ASID used for this guest */
++ unsigned int handle; /* SEV firmware handle */
++ int fd; /* SEV device fd */
++ unsigned long pages_locked; /* Number of pages locked */
++ struct list_head regions_list; /* List of registered regions */
++};
++
++struct kvm_svm {
++ struct kvm kvm;
++
++ /* Struct members for AVIC */
++ u32 avic_vm_id;
++ u32 ldr_mode;
++ struct page *avic_logical_id_table_page;
++ struct page *avic_physical_id_table_page;
++ struct hlist_node hnode;
++
++ struct kvm_sev_info sev_info;
++};
++
+ struct kvm_vcpu;
+
+ struct nested_state {
+@@ -359,6 +381,12 @@ struct enc_region {
+ unsigned long size;
+ };
+
++
++static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm)
++{
++ return container_of(kvm, struct kvm_svm, kvm);
++}
++
+ static inline bool svm_sev_enabled(void)
+ {
+ return max_sev_asid;
+@@ -366,14 +394,14 @@ static inline bool svm_sev_enabled(void)
+
+ static inline bool sev_guest(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return sev->active;
+ }
+
+ static inline int sev_get_asid(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return sev->asid;
+ }
+@@ -1090,7 +1118,7 @@ static void disable_nmi_singlestep(struct vcpu_svm *svm)
+ }
+
+ /* Note:
+- * This hash table is used to map VM_ID to a struct kvm_arch,
++ * This hash table is used to map VM_ID to a struct kvm_svm,
+ * when handling AMD IOMMU GALOG notification to schedule in
+ * a particular vCPU.
+ */
+@@ -1107,7 +1135,7 @@ static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
+ static int avic_ga_log_notifier(u32 ga_tag)
+ {
+ unsigned long flags;
+- struct kvm_arch *ka = NULL;
++ struct kvm_svm *kvm_svm;
+ struct kvm_vcpu *vcpu = NULL;
+ u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
+ u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
+@@ -1115,13 +1143,10 @@ static int avic_ga_log_notifier(u32 ga_tag)
+ pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+- hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
+- struct kvm *kvm = container_of(ka, struct kvm, arch);
+- struct kvm_arch *vm_data = &kvm->arch;
+-
+- if (vm_data->avic_vm_id != vm_id)
++ hash_for_each_possible(svm_vm_data_hash, kvm_svm, hnode, vm_id) {
++ if (kvm_svm->avic_vm_id != vm_id)
+ continue;
+- vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
++ vcpu = kvm_get_vcpu_by_id(&kvm_svm->kvm, vcpu_id);
+ break;
+ }
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+@@ -1338,10 +1363,10 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+ static void avic_init_vmcb(struct vcpu_svm *svm)
+ {
+ struct vmcb *vmcb = svm->vmcb;
+- struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm);
+ phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
+- phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
+- phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
++ phys_addr_t lpa = __sme_set(page_to_phys(kvm_svm->avic_logical_id_table_page));
++ phys_addr_t ppa = __sme_set(page_to_phys(kvm_svm->avic_physical_id_table_page));
+
+ vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
+ vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
+@@ -1498,12 +1523,12 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
+ unsigned int index)
+ {
+ u64 *avic_physical_id_table;
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+
+ if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
+ return NULL;
+
+- avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
++ avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
+
+ return &avic_physical_id_table[index];
+ }
+@@ -1586,7 +1611,7 @@ static void __sev_asid_free(int asid)
+
+ static void sev_asid_free(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ __sev_asid_free(sev->asid);
+ }
+@@ -1626,7 +1651,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ unsigned long ulen, unsigned long *n,
+ int write)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ unsigned long npages, npinned, size;
+ unsigned long locked, lock_limit;
+ struct page **pages;
+@@ -1677,7 +1702,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
+ static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
+ unsigned long npages)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ release_pages(pages, npages, 0);
+ kvfree(pages);
+@@ -1717,17 +1742,18 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
+
+ static struct kvm *svm_vm_alloc(void)
+ {
+- return kzalloc(sizeof(struct kvm), GFP_KERNEL);
++ struct kvm_svm *kvm_svm = kzalloc(sizeof(struct kvm_svm), GFP_KERNEL);
++ return &kvm_svm->kvm;
+ }
+
+ static void svm_vm_free(struct kvm *kvm)
+ {
+- kfree(kvm);
++ kfree(to_kvm_svm(kvm));
+ }
+
+ static void sev_vm_destroy(struct kvm *kvm)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct list_head *pos, *q;
+
+@@ -1756,18 +1782,18 @@ static void sev_vm_destroy(struct kvm *kvm)
+ static void avic_vm_destroy(struct kvm *kvm)
+ {
+ unsigned long flags;
+- struct kvm_arch *vm_data = &kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
+
+ if (!avic)
+ return;
+
+- if (vm_data->avic_logical_id_table_page)
+- __free_page(vm_data->avic_logical_id_table_page);
+- if (vm_data->avic_physical_id_table_page)
+- __free_page(vm_data->avic_physical_id_table_page);
++ if (kvm_svm->avic_logical_id_table_page)
++ __free_page(kvm_svm->avic_logical_id_table_page);
++ if (kvm_svm->avic_physical_id_table_page)
++ __free_page(kvm_svm->avic_physical_id_table_page);
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+- hash_del(&vm_data->hnode);
++ hash_del(&kvm_svm->hnode);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+ }
+
+@@ -1781,10 +1807,10 @@ static int avic_vm_init(struct kvm *kvm)
+ {
+ unsigned long flags;
+ int err = -ENOMEM;
+- struct kvm_arch *vm_data = &kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(kvm);
++ struct kvm_svm *k2;
+ struct page *p_page;
+ struct page *l_page;
+- struct kvm_arch *ka;
+ u32 vm_id;
+
+ if (!avic)
+@@ -1795,7 +1821,7 @@ static int avic_vm_init(struct kvm *kvm)
+ if (!p_page)
+ goto free_avic;
+
+- vm_data->avic_physical_id_table_page = p_page;
++ kvm_svm->avic_physical_id_table_page = p_page;
+ clear_page(page_address(p_page));
+
+ /* Allocating logical APIC ID table (4KB) */
+@@ -1803,7 +1829,7 @@ static int avic_vm_init(struct kvm *kvm)
+ if (!l_page)
+ goto free_avic;
+
+- vm_data->avic_logical_id_table_page = l_page;
++ kvm_svm->avic_logical_id_table_page = l_page;
+ clear_page(page_address(l_page));
+
+ spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
+@@ -1815,15 +1841,13 @@ static int avic_vm_init(struct kvm *kvm)
+ }
+ /* Is it still in use? Only possible if wrapped at least once */
+ if (next_vm_id_wrapped) {
+- hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
+- struct kvm *k2 = container_of(ka, struct kvm, arch);
+- struct kvm_arch *vd2 = &k2->arch;
+- if (vd2->avic_vm_id == vm_id)
++ hash_for_each_possible(svm_vm_data_hash, k2, hnode, vm_id) {
++ if (k2->avic_vm_id == vm_id)
+ goto again;
+ }
+ }
+- vm_data->avic_vm_id = vm_id;
+- hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
++ kvm_svm->avic_vm_id = vm_id;
++ hash_add(svm_vm_data_hash, &kvm_svm->hnode, kvm_svm->avic_vm_id);
+ spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
+
+ return 0;
+@@ -4355,7 +4379,7 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
+
+ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
+ {
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+ int index;
+ u32 *logical_apic_id_table;
+ int dlid = GET_APIC_LOGICAL_ID(ldr);
+@@ -4377,7 +4401,7 @@ static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
+ index = (cluster << 2) + apic;
+ }
+
+- logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
++ logical_apic_id_table = (u32 *) page_address(kvm_svm->avic_logical_id_table_page);
+
+ return &logical_apic_id_table[index];
+ }
+@@ -4457,7 +4481,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
+ static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+- struct kvm_arch *vm_data = &vcpu->kvm->arch;
++ struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+ u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
+ u32 mod = (dfr >> 28) & 0xf;
+
+@@ -4466,11 +4490,11 @@ static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
+ * If this changes, we need to flush the AVIC logical
+ * APID id table.
+ */
+- if (vm_data->ldr_mode == mod)
++ if (kvm_svm->ldr_mode == mod)
+ return 0;
+
+- clear_page(page_address(vm_data->avic_logical_id_table_page));
+- vm_data->ldr_mode = mod;
++ clear_page(page_address(kvm_svm->avic_logical_id_table_page));
++ kvm_svm->ldr_mode = mod;
+
+ if (svm->ldr_reg)
+ avic_handle_ldr_update(vcpu);
+@@ -5105,7 +5129,7 @@ static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
+ /* Try to enable guest_mode in IRTE */
+ pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
+ AVIC_HPA_MASK);
+- pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
++ pi.ga_tag = AVIC_GATAG(to_kvm_svm(kvm)->avic_vm_id,
+ svm->vcpu.vcpu_id);
+ pi.is_guest_mode = true;
+ pi.vcpu_data = &vcpu_info;
+@@ -5979,7 +6003,7 @@ static int sev_asid_new(void)
+
+ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ int asid, ret;
+
+ ret = -EBUSY;
+@@ -6044,14 +6068,14 @@ static int __sev_issue_cmd(int fd, int id, void *data, int *error)
+
+ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ return __sev_issue_cmd(sev->fd, id, data, error);
+ }
+
+ static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_start *start;
+ struct kvm_sev_launch_start params;
+ void *dh_blob, *session_blob;
+@@ -6149,7 +6173,7 @@ static int get_num_contig_pages(int idx, struct page **inpages,
+ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_launch_update_data params;
+ struct sev_data_launch_update_data *data;
+ struct page **inpages;
+@@ -6225,7 +6249,7 @@ static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+ void __user *measure = (void __user *)(uintptr_t)argp->data;
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_measure *data;
+ struct kvm_sev_launch_measure params;
+ void __user *p = NULL;
+@@ -6293,7 +6317,7 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_finish *data;
+ int ret;
+
+@@ -6313,7 +6337,7 @@ static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
+
+ static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_guest_status params;
+ struct sev_data_guest_status *data;
+ int ret;
+@@ -6345,7 +6369,7 @@ static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
+ unsigned long dst, int size,
+ int *error, bool enc)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_dbg *data;
+ int ret;
+
+@@ -6577,7 +6601,7 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
+
+ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct sev_data_launch_secret *data;
+ struct kvm_sev_launch_secret params;
+ struct page **pages;
+@@ -6701,7 +6725,7 @@ static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
+ static int svm_register_enc_region(struct kvm *kvm,
+ struct kvm_enc_region *range)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct enc_region *region;
+ int ret = 0;
+
+@@ -6743,7 +6767,7 @@ static int svm_register_enc_region(struct kvm *kvm,
+ static struct enc_region *
+ find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
+ {
+- struct kvm_sev_info *sev = &kvm->arch.sev_info;
++ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+ struct list_head *head = &sev->regions_list;
+ struct enc_region *i;
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0093-check-pci-dev-before-getting-pci-alias.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0093-check-pci-dev-before-getting-pci-alias.patch
new file mode 100644
index 00000000..9890a61c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0093-check-pci-dev-before-getting-pci-alias.patch
@@ -0,0 +1,56 @@
+From 1bca08eb860a16e2679655918bd36f21fa213422 Mon Sep 17 00:00:00 2001
+From: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+Date: Thu, 26 Apr 2018 23:31:29 -0500
+Subject: [PATCH 93/95] check pci dev before getting pci alias
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/iommu/amd_iommu.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 10190e3..33dd869 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -254,7 +254,9 @@ static u16 get_alias(struct device *dev)
+ /* The callers make sure that get_device_id() does not fail here */
+ devid = get_device_id(dev);
+ ivrs_alias = amd_iommu_alias_table[devid];
+- pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
++ if (dev_is_pci(dev)) {
++ pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
++ }
+
+ if (ivrs_alias == pci_alias)
+ return ivrs_alias;
+@@ -280,18 +282,20 @@ static u16 get_alias(struct device *dev)
+ return pci_alias;
+ }
+
+- pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
+- "for device %s[%04x:%04x], kernel reported alias "
+- "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
+- PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
+- PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
+- PCI_FUNC(pci_alias));
++ if (dev_is_pci(dev)) {
++ pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
++ "for device %s[%04x:%04x], kernel reported alias "
++ "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
++ PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
++ PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
++ PCI_FUNC(pci_alias));
++ }
+
+ /*
+ * If we don't have a PCI DMA alias and the IVRS alias is on the same
+ * bus, then the IVRS table may know about a quirk that we don't.
+ */
+- if (pci_alias == devid &&
++ if (dev_is_pci(dev) && pci_alias == devid &&
+ PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
+ pci_add_dma_alias(pdev, ivrs_alias & 0xff);
+ pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0094-mmc-sdhci-acpi-Add-support-for-ACPI-HID-of-AMD-Contr.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0094-mmc-sdhci-acpi-Add-support-for-ACPI-HID-of-AMD-Contr.patch
new file mode 100644
index 00000000..3cd99eaf
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0094-mmc-sdhci-acpi-Add-support-for-ACPI-HID-of-AMD-Contr.patch
@@ -0,0 +1,125 @@
+From 047b0e5412203f56a29d0c450e8e5523f566233e Mon Sep 17 00:00:00 2001
+From: Shah Nehal-Bakulchandra <Nehal-bakulchandra.Shah@amd.com>
+Date: Fri, 1 Dec 2017 15:38:52 +0530
+Subject: [PATCH 94/95] mmc: sdhci-acpi: Add support for ACPI HID of AMD
+ Controller with HS400
+
+This patch supports HS400 for AMD upcoming emmc 5.0 controller.The
+HS400 and HS200 mode requires hardware work around also. This patch
+adds the quirks for the same.
+
+Signed-off-by: Nehal-bakulchandra Shah <Nehal-bakulchandra.Shah@amd.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci-acpi.c | 79 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 79 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+index 08ae0ff..c2e7048 100644
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -367,6 +367,83 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = {
+ .caps = MMC_CAP_NONREMOVABLE,
+ };
+
++/* AMD sdhci reset dll register. */
++#define SDHCI_AMD_RESET_DLL_REGISTER 0x908
++
++static int amd_select_drive_strength(struct mmc_card *card,
++ unsigned int max_dtr, int host_drv,
++ int card_drv, int *drv_type)
++{
++ return MMC_SET_DRIVER_TYPE_A;
++}
++
++static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host)
++{
++ /* AMD Platform requires dll setting */
++ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER);
++ usleep_range(10, 20);
++ sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER);
++}
++
++/*
++ * For AMD Platform it is required to disable the tuning
++ * bit first controller to bring to HS Mode from HS200
++ * mode, later enable to tune to HS400 mode.
++ */
++static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++ unsigned int old_timing = host->timing;
++
++ sdhci_set_ios(mmc, ios);
++ if (old_timing == MMC_TIMING_MMC_HS200 &&
++ ios->timing == MMC_TIMING_MMC_HS)
++ sdhci_writew(host, 0x9, SDHCI_HOST_CONTROL2);
++ if (old_timing != MMC_TIMING_MMC_HS400 &&
++ ios->timing == MMC_TIMING_MMC_HS400) {
++ sdhci_writew(host, 0x80, SDHCI_HOST_CONTROL2);
++ sdhci_acpi_amd_hs400_dll(host);
++ }
++}
++
++static const struct sdhci_ops sdhci_acpi_ops_amd = {
++ .set_clock = sdhci_set_clock,
++ .set_bus_width = sdhci_set_bus_width,
++ .reset = sdhci_reset,
++ .set_uhs_signaling = sdhci_set_uhs_signaling,
++};
++
++static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
++ .ops = &sdhci_acpi_ops_amd,
++};
++
++static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev,
++ const char *hid, const char *uid)
++{
++ struct sdhci_acpi_host *c = platform_get_drvdata(pdev);
++ struct sdhci_host *host = c->host;
++
++ sdhci_read_caps(host);
++ if (host->caps1 & SDHCI_SUPPORT_DDR50)
++ host->mmc->caps = MMC_CAP_1_8V_DDR;
++
++ if ((host->caps1 & SDHCI_SUPPORT_SDR104) &&
++ (host->mmc->caps & MMC_CAP_1_8V_DDR))
++ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
++
++ host->mmc_host_ops.select_drive_strength = amd_select_drive_strength;
++ host->mmc_host_ops.set_ios = amd_set_ios;
++ return 0;
++}
++
++static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
++ .chip = &sdhci_acpi_chip_amd,
++ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
++ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
++ SDHCI_QUIRK_32BIT_ADMA_SIZE,
++ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
++};
++
+ struct sdhci_acpi_uid_slot {
+ const char *hid;
+ const char *uid;
+@@ -390,6 +467,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
+ { "PNP0D40" },
+ { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v },
+ { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd },
++ { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc },
+ { },
+ };
+
+@@ -406,6 +484,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
+ { "PNP0D40" },
+ { "QCOM8051" },
+ { "QCOM8052" },
++ { "AMDI0040" },
+ { },
+ };
+ MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0095-eMMC-patch-4.14.48.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0095-eMMC-patch-4.14.48.patch
new file mode 100644
index 00000000..9278cd7e
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0095-eMMC-patch-4.14.48.patch
@@ -0,0 +1,219 @@
+From 83800ff77e4b8d8946ed9d33229e57cb3fc58cbc Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Thu, 16 Aug 2018 22:52:11 +0530
+Subject: [PATCH 95/95] eMMC patch 4.14.48
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/acpi/resource.c | 5 +++++
+ drivers/mmc/core/mmc.c | 25 +++++++++++++++----------
+ drivers/mmc/host/sdhci-acpi.c | 2 ++
+ drivers/mmc/host/sdhci.c | 21 +++++++++++++++++++++
+ drivers/mmc/host/sdhci.h | 2 ++
+ include/linux/mmc/host.h | 1 +
+ 6 files changed, 46 insertions(+), 10 deletions(-)
+ mode change 100644 => 100755 drivers/acpi/resource.c
+ mode change 100644 => 100755 drivers/mmc/core/mmc.c
+ mode change 100644 => 100755 drivers/mmc/host/sdhci-acpi.c
+ mode change 100644 => 100755 drivers/mmc/host/sdhci.c
+ mode change 100644 => 100755 drivers/mmc/host/sdhci.h
+ mode change 100644 => 100755 include/linux/mmc/host.h
+
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+old mode 100644
+new mode 100755
+index d85e010..e82b5a7
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -425,6 +425,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
+ triggering = trig;
+ polarity = pol;
+ }
++ if (gsi == 5) {
++ polarity = ACPI_ACTIVE_LOW;
++ pr_warning("ACPI: IRQ %d do not override to %s, %s\n", gsi,
++ t ? "level" : "edge", p ? "low" : "high");
++ }
+ }
+
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+old mode 100644
+new mode 100755
+index bad5c1b..29bba1e
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1161,14 +1161,14 @@ static int mmc_select_hs400(struct mmc_card *card)
+ mmc_hostname(host), err);
+ return err;
+ }
+-
+- /* Set host controller to HS timing */
+- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+-
+- /* Reduce frequency to HS frequency */
+- max_dtr = card->ext_csd.hs_max_dtr;
+- mmc_set_clock(host, max_dtr);
+-
++ /*In AMD Platform due to hardware ip issue this fails*/
++ if (!host->ops->set_hs400_dll) {
++ /* Set host controller to HS timing */
++ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
++ /* Reduce frequency to HS frequency */
++ max_dtr = card->ext_csd.hs_max_dtr;
++ mmc_set_clock(host, max_dtr);
++ }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+@@ -1204,7 +1204,8 @@ static int mmc_select_hs400(struct mmc_card *card)
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+-
++ if (host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
+ return 0;
+
+ out_err:
+@@ -1227,6 +1228,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
++ if (!host->ops->set_hs400_dll)
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+@@ -1236,12 +1238,15 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ true, false, true);
+ if (err)
+ goto out_err;
+-
++ /*In AMD Platform due to hardware ip issue this fails*/
++ if (!host->ops->set_hs400_dll)
++ {
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
++ }
+
+ /* Switch HS DDR to HS */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+old mode 100644
+new mode 100755
+index c2e7048..33592a6
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -411,6 +411,7 @@ static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_hs400_dll = sdhci_acpi_amd_hs400_dll,
+ };
+
+ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+@@ -441,6 +442,7 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
++ .quirks2 = SDHCI_QUIRK2_BROKEN_TUNING_WA,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ };
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+old mode 100644
+new mode 100755
+index d35deb7..8837d45
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1207,6 +1207,12 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ flags |= SDHCI_CMD_DATA;
+
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
++
++ if (cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 && (host->quirks2 & SDHCI_QUIRK2_BROKEN_TUNING_WA)) {
++ mdelay(10);
++ sdhci_writel(host, 0x8803040a, 0x8b8);
++ mdelay(10);
++ }
+ }
+ EXPORT_SYMBOL_GPL(sdhci_send_command);
+
+@@ -1873,6 +1879,14 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
+ host->ops->hw_reset(host);
+ }
+
++static void sdhci_set_hs400_dll(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ if (host->ops && host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
++}
++
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+@@ -2356,6 +2370,7 @@ static const struct mmc_host_ops sdhci_ops = {
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
++ .set_hs400_dll = sdhci_set_hs400_dll,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+@@ -3300,6 +3315,12 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+ host->caps1 &= ~upper_32_bits(dt_caps_mask);
+ host->caps1 |= upper_32_bits(dt_caps);
+ }
++
++ if ((host->caps1 & SDHCI_SUPPORT_SDR104) && (host->caps1 & SDHCI_SUPPORT_DDR50) &&
++ (host->quirks2 & SDHCI_QUIRK2_BROKEN_TUNING_WA))
++ {
++ host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
++ }
+ }
+ EXPORT_SYMBOL_GPL(__sdhci_read_caps);
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+old mode 100644
+new mode 100755
+index 1d7d61e..b5fd294
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -438,6 +438,7 @@ struct sdhci_host {
+ /* Controller has CRC in 136 bit Command Response */
+ #define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16)
+
++#define SDHCI_QUIRK2_BROKEN_TUNING_WA (1<<17)
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+ char *bounce_buffer; /* For packing SDMA reads/writes */
+@@ -584,6 +585,7 @@ struct sdhci_ops {
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
++ void (*set_hs400_dll)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+old mode 100644
+new mode 100755
+index 9a43763..b7d5611
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -152,6 +152,7 @@ struct mmc_host_ops {
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+ void (*hw_reset)(struct mmc_host *host);
++ void (*set_hs400_dll)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0096-Revert-eMMC-patch-4.14.48.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0096-Revert-eMMC-patch-4.14.48.patch
new file mode 100644
index 00000000..6957eef8
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0096-Revert-eMMC-patch-4.14.48.patch
@@ -0,0 +1,221 @@
+From d433eff73ff15a790c2f6872d57ecd54f8694f7a Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 22 Jan 2019 16:17:11 +0530
+Subject: [PATCH 096/131] Revert "eMMC patch 4.14.48"
+
+This reverts commit 925f75997af85adaea92b1771b24764787e4d4ce.
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/acpi/resource.c | 5 -----
+ drivers/mmc/core/mmc.c | 25 ++++++++++---------------
+ drivers/mmc/host/sdhci-acpi.c | 2 --
+ drivers/mmc/host/sdhci.c | 21 ---------------------
+ drivers/mmc/host/sdhci.h | 2 --
+ include/linux/mmc/host.h | 1 -
+ 6 files changed, 10 insertions(+), 46 deletions(-)
+ mode change 100755 => 100644 drivers/acpi/resource.c
+ mode change 100755 => 100644 drivers/mmc/core/mmc.c
+ mode change 100755 => 100644 drivers/mmc/host/sdhci-acpi.c
+ mode change 100755 => 100644 drivers/mmc/host/sdhci.c
+ mode change 100755 => 100644 drivers/mmc/host/sdhci.h
+ mode change 100755 => 100644 include/linux/mmc/host.h
+
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+old mode 100755
+new mode 100644
+index e82b5a7..d85e010
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -425,11 +425,6 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
+ triggering = trig;
+ polarity = pol;
+ }
+- if (gsi == 5) {
+- polarity = ACPI_ACTIVE_LOW;
+- pr_warning("ACPI: IRQ %d do not override to %s, %s\n", gsi,
+- t ? "level" : "edge", p ? "low" : "high");
+- }
+ }
+
+ res->flags = acpi_dev_irq_flags(triggering, polarity, shareable);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+old mode 100755
+new mode 100644
+index 29bba1e..bad5c1b
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1161,14 +1161,14 @@ static int mmc_select_hs400(struct mmc_card *card)
+ mmc_hostname(host), err);
+ return err;
+ }
+- /*In AMD Platform due to hardware ip issue this fails*/
+- if (!host->ops->set_hs400_dll) {
+- /* Set host controller to HS timing */
+- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+- /* Reduce frequency to HS frequency */
+- max_dtr = card->ext_csd.hs_max_dtr;
+- mmc_set_clock(host, max_dtr);
+- }
++
++ /* Set host controller to HS timing */
++ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
++
++ /* Reduce frequency to HS frequency */
++ max_dtr = card->ext_csd.hs_max_dtr;
++ mmc_set_clock(host, max_dtr);
++
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+@@ -1204,8 +1204,7 @@ static int mmc_select_hs400(struct mmc_card *card)
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+- if (host->ops->set_hs400_dll)
+- host->ops->set_hs400_dll(host);
++
+ return 0;
+
+ out_err:
+@@ -1228,7 +1227,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
+- if (!host->ops->set_hs400_dll)
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+@@ -1238,15 +1236,12 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ true, false, true);
+ if (err)
+ goto out_err;
+- /*In AMD Platform due to hardware ip issue this fails*/
+- if (!host->ops->set_hs400_dll)
+- {
++
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+- }
+
+ /* Switch HS DDR to HS */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+old mode 100755
+new mode 100644
+index 33592a6..c2e7048
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -411,7 +411,6 @@ static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+- .set_hs400_dll = sdhci_acpi_amd_hs400_dll,
+ };
+
+ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+@@ -442,7 +441,6 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = {
+ .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE |
+ SDHCI_QUIRK_32BIT_ADMA_SIZE,
+- .quirks2 = SDHCI_QUIRK2_BROKEN_TUNING_WA,
+ .probe_slot = sdhci_acpi_emmc_amd_probe_slot,
+ };
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+old mode 100755
+new mode 100644
+index 8837d45..d35deb7
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1207,12 +1207,6 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
+ flags |= SDHCI_CMD_DATA;
+
+ sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+-
+- if (cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 && (host->quirks2 & SDHCI_QUIRK2_BROKEN_TUNING_WA)) {
+- mdelay(10);
+- sdhci_writel(host, 0x8803040a, 0x8b8);
+- mdelay(10);
+- }
+ }
+ EXPORT_SYMBOL_GPL(sdhci_send_command);
+
+@@ -1879,14 +1873,6 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
+ host->ops->hw_reset(host);
+ }
+
+-static void sdhci_set_hs400_dll(struct mmc_host *mmc)
+-{
+- struct sdhci_host *host = mmc_priv(mmc);
+-
+- if (host->ops && host->ops->set_hs400_dll)
+- host->ops->set_hs400_dll(host);
+-}
+-
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+@@ -2370,7 +2356,6 @@ static const struct mmc_host_ops sdhci_ops = {
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
+- .set_hs400_dll = sdhci_set_hs400_dll,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+@@ -3315,12 +3300,6 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+ host->caps1 &= ~upper_32_bits(dt_caps_mask);
+ host->caps1 |= upper_32_bits(dt_caps);
+ }
+-
+- if ((host->caps1 & SDHCI_SUPPORT_SDR104) && (host->caps1 & SDHCI_SUPPORT_DDR50) &&
+- (host->quirks2 & SDHCI_QUIRK2_BROKEN_TUNING_WA))
+- {
+- host->mmc->caps2 = MMC_CAP2_HS400_1_8V;
+- }
+ }
+ EXPORT_SYMBOL_GPL(__sdhci_read_caps);
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+old mode 100755
+new mode 100644
+index b5fd294..1d7d61e
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -438,7 +438,6 @@ struct sdhci_host {
+ /* Controller has CRC in 136 bit Command Response */
+ #define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16)
+
+-#define SDHCI_QUIRK2_BROKEN_TUNING_WA (1<<17)
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+ char *bounce_buffer; /* For packing SDMA reads/writes */
+@@ -585,7 +584,6 @@ struct sdhci_ops {
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
+- void (*set_hs400_dll)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+old mode 100755
+new mode 100644
+index b7d5611..9a43763
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -152,7 +152,6 @@ struct mmc_host_ops {
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+ void (*hw_reset)(struct mmc_host *host);
+- void (*set_hs400_dll)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0097-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0097-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch
new file mode 100644
index 00000000..5dcdadeb
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0097-mmc-core-Move-calls-to-prepare_hs400_tuning-closer-t.patch
@@ -0,0 +1,50 @@
+From b81f61d97f8e479d3ee3cc8fe428accbaf3a5cf6 Mon Sep 17 00:00:00 2001
+From: Ulf Hansson <ulf.hansson@linaro.org>
+Date: Tue, 22 May 2018 16:26:26 +0200
+Subject: [PATCH 097/131] mmc: core: Move calls to ->prepare_hs400_tuning()
+ closer to mmc code
+
+Move the calls to ->prepare_hs400_tuning(), from mmc_retune() into
+mmc_hs400_to_hs200(), as it better belongs there, rather than being generic
+to all type of cards.
+
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Reviewed-by: Simon Horman <horms+renesas@verge.net.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/core/host.c | 3 ---
+ drivers/mmc/core/mmc.c | 4 ++++
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index ad88deb..4651e9b 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -148,9 +148,6 @@ int mmc_retune(struct mmc_host *host)
+ goto out;
+
+ return_to_hs400 = true;
+-
+- if (host->ops->prepare_hs400_tuning)
+- host->ops->prepare_hs400_tuning(host, &host->ios);
+ }
+
+ err = mmc_execute_tuning(host->card);
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index bad5c1b..16845a8 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1278,6 +1278,10 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ mmc_set_bus_speed(card);
+
++ /* Prepare tuning for HS400 mode. */
++ if (host->ops->prepare_hs400_tuning)
++ host->ops->prepare_hs400_tuning(host, &host->ios);
++
+ return 0;
+
+ out_err:
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0098-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0098-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch
new file mode 100644
index 00000000..8017ef7b
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0098-mmc-core-more-fine-grained-hooks-for-HS400-tuning.patch
@@ -0,0 +1,86 @@
+From 3f3089099ceefd39f728a341fb57ca48f0fbc163 Mon Sep 17 00:00:00 2001
+From: Simon Horman <horms+renesas@verge.net.au>
+Date: Mon, 18 Jun 2018 14:57:49 +0200
+Subject: [PATCH 098/131] mmc: core: more fine-grained hooks for HS400 tuning
+
+This adds two new HS400 tuning operations:
+* hs400_downgrade
+* hs400_complete
+
+These supplement the existing HS400 operation:
+* prepare_hs400_tuning
+
+This is motivated by a requirement of Renesas SDHI for the following:
+1. Disabling SCC before selecting to HS if selection of HS400 has occurred.
+ This can be done in an implementation of prepare_hs400_tuning_downgrade
+2. Updating registers after switching to HS400
+ This can be done in an implementation of complete_hs400_tuning
+
+If hs400_downgrade or hs400_complete are not implemented then they are not
+called. Thus means there should be no affect for existing drivers as none
+implemt these ops.
+
+Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/core/mmc.c | 10 ++++++++++
+ include/linux/mmc/host.h | 7 +++++++
+ 2 files changed, 17 insertions(+)
+
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 16845a8..16b22d7 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1165,6 +1165,10 @@ static int mmc_select_hs400(struct mmc_card *card)
+ /* Set host controller to HS timing */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
++ /* Prepare host to downgrade to HS timing */
++ if (host->ops->hs400_downgrade)
++ host->ops->hs400_downgrade(host);
++
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+@@ -1205,6 +1209,9 @@ static int mmc_select_hs400(struct mmc_card *card)
+ if (err)
+ goto out_err;
+
++ if (host->ops->hs400_complete)
++ host->ops->hs400_complete(host);
++
+ return 0;
+
+ out_err:
+@@ -1252,6 +1259,9 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ mmc_set_timing(host, MMC_TIMING_MMC_HS);
+
++ if (host->ops->hs400_downgrade)
++ host->ops->hs400_downgrade(host);
++
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+index 9a43763..843c38f 100644
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -145,6 +145,13 @@ struct mmc_host_ops {
+
+ /* Prepare HS400 target operating frequency depending host driver */
+ int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
++
++ /* Prepare for switching from HS400 to HS200 */
++ void (*hs400_downgrade)(struct mmc_host *host);
++
++ /* Complete selection of HS400 */
++ void (*hs400_complete)(struct mmc_host *host);
++
+ /* Prepare enhanced strobe depending host driver */
+ void (*hs400_enhanced_strobe)(struct mmc_host *host,
+ struct mmc_ios *ios);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0099-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0099-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch
new file mode 100644
index 00000000..ed092c65
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0099-mmc-sdhci-Export-sdhci-tuning-function-symbol.patch
@@ -0,0 +1,91 @@
+From d988bfe2ade0b5c288f350708a8cc3e916b5f286 Mon Sep 17 00:00:00 2001
+From: "ernest.zhang" <ernest.zhang@bayhubtech.com>
+Date: Mon, 16 Jul 2018 14:26:53 +0800
+Subject: [PATCH 099/131] mmc: sdhci: Export sdhci tuning function symbol
+
+Export sdhci tuning function symbols which are used by other SD Host
+controller driver modules.
+
+Signed-off-by: ernest.zhang <ernest.zhang@bayhubtech.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 12 ++++++++----
+ drivers/mmc/host/sdhci.h | 5 +++++
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index d35deb7..9b65a38 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2027,7 +2027,7 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+ return 0;
+ }
+
+-static void sdhci_start_tuning(struct sdhci_host *host)
++void sdhci_start_tuning(struct sdhci_host *host)
+ {
+ u16 ctrl;
+
+@@ -2050,14 +2050,16 @@ static void sdhci_start_tuning(struct sdhci_host *host)
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_start_tuning);
+
+-static void sdhci_end_tuning(struct sdhci_host *host)
++void sdhci_end_tuning(struct sdhci_host *host)
+ {
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
++EXPORT_SYMBOL_GPL(sdhci_end_tuning);
+
+-static void sdhci_reset_tuning(struct sdhci_host *host)
++void sdhci_reset_tuning(struct sdhci_host *host)
+ {
+ u16 ctrl;
+
+@@ -2066,6 +2068,7 @@ static void sdhci_reset_tuning(struct sdhci_host *host)
+ ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ }
++EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
+
+ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
+ {
+@@ -2086,7 +2089,7 @@ static void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
+ * interrupt setup is different to other commands and there is no timeout
+ * interrupt so special handling is needed.
+ */
+-static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
++void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+ {
+ struct mmc_host *mmc = host->mmc;
+ struct mmc_command cmd = {};
+@@ -2136,6 +2139,7 @@ static void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+ msecs_to_jiffies(50));
+
+ }
++EXPORT_SYMBOL_GPL(sdhci_send_tuning);
+
+ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 1d7d61e..6386709 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -733,4 +733,9 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+
+ void sdhci_dumpregs(struct sdhci_host *host);
+
++void sdhci_start_tuning(struct sdhci_host *host);
++void sdhci_end_tuning(struct sdhci_host *host);
++void sdhci_reset_tuning(struct sdhci_host *host);
++void sdhci_send_tuning(struct sdhci_host *host, u32 opcode);
++
+ #endif /* __SDHCI_HW_H */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0100-mmc-sdhci-add-tuning-error-codes.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0100-mmc-sdhci-add-tuning-error-codes.patch
new file mode 100644
index 00000000..d2076a54
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0100-mmc-sdhci-add-tuning-error-codes.patch
@@ -0,0 +1,78 @@
+From f0cbc1d2216360846eec53fe7ff93f583f6ee6ac Mon Sep 17 00:00:00 2001
+From: Yinbo Zhu <yinbo.zhu@nxp.com>
+Date: Thu, 23 Aug 2018 16:48:31 +0800
+Subject: [PATCH 100/131] mmc: sdhci: add tuning error codes
+
+This patch is to add tuning error codes to
+judge tuning state
+
+Signed-off-by: Yinbo Zhu <yinbo.zhu@nxp.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 9 +++++----
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 9b65a38..730cfa7 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -2141,7 +2141,7 @@ void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
+ }
+ EXPORT_SYMBOL_GPL(sdhci_send_tuning);
+
+-static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
++static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ {
+ int i;
+
+@@ -2158,13 +2158,13 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ pr_info("%s: Tuning timeout, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_abort_tuning(host, opcode);
+- return;
++ return -ETIMEDOUT;
+ }
+
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
+ if (ctrl & SDHCI_CTRL_TUNED_CLK)
+- return; /* Success! */
++ return 0; /* Success! */
+ break;
+ }
+
+@@ -2176,6 +2176,7 @@ static void __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
+ pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
+ mmc_hostname(host->mmc));
+ sdhci_reset_tuning(host);
++ return -EAGAIN;
+ }
+
+ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+@@ -2237,7 +2238,7 @@ int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+
+ sdhci_start_tuning(host);
+
+- __sdhci_execute_tuning(host, opcode);
++ host->tuning_err = __sdhci_execute_tuning(host, opcode);
+
+ sdhci_end_tuning(host);
+ out:
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 6386709..d38abce 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -540,6 +540,7 @@ struct sdhci_host {
+
+ unsigned int tuning_count; /* Timer count for re-tuning */
+ unsigned int tuning_mode; /* Re-tuning mode supported by host */
++ unsigned int tuning_err; /* Error code for re-tuning */
+ #define SDHCI_TUNING_MODE_1 0
+ #define SDHCI_TUNING_MODE_2 1
+ #define SDHCI_TUNING_MODE_3 2
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0101-mmc-sdhci-Export-sdhci_request.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0101-mmc-sdhci-Export-sdhci_request.patch
new file mode 100644
index 00000000..d8bc986d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0101-mmc-sdhci-Export-sdhci_request.patch
@@ -0,0 +1,53 @@
+From 4561d29b62ca137f268f19fadd098db381af0402 Mon Sep 17 00:00:00 2001
+From: Aapo Vienamo <avienamo@nvidia.com>
+Date: Mon, 20 Aug 2018 12:23:32 +0300
+Subject: [PATCH 101/131] mmc: sdhci: Export sdhci_request()
+
+Allow SDHCI drivers to hook code before and after sdhci_request() by
+making it externally visible.
+
+Signed-off-by: Aapo Vienamo <avienamo@nvidia.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 3 ++-
+ drivers/mmc/host/sdhci.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 730cfa7..0819b85 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1552,7 +1552,7 @@ EXPORT_SYMBOL_GPL(sdhci_set_power);
+ * *
+ \*****************************************************************************/
+
+-static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
++void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ {
+ struct sdhci_host *host;
+ int present;
+@@ -1591,6 +1591,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
++EXPORT_SYMBOL_GPL(sdhci_request);
+
+ void sdhci_set_bus_width(struct sdhci_host *host, int width)
+ {
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index d38abce..e093037 100644
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -710,6 +710,7 @@ void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
+ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
+ unsigned short vdd);
++void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
+ void sdhci_set_bus_width(struct sdhci_host *host, int width);
+ void sdhci_reset(struct sdhci_host *host, u8 mask);
+ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0102-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0102-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch
new file mode 100644
index 00000000..7679f318
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0102-mmc-sdhci-add-adma_table_cnt-member-to-struct-sdhci_.patch
@@ -0,0 +1,80 @@
+From 1d2f6e2bf771658f2865a0931aa71ce4a861a8b2 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 16 Jan 2019 11:23:47 +0530
+Subject: [PATCH 102/131] mmc: sdhci: add adma_table_cnt member to struct
+ sdhci_host
+
+This patch adds adma_table_cnt member to struct sdhci_host to give more
+flexibility to drivers to control the ADMA table count.
+
+Default value of adma_table_cnt is set to (SDHCI_MAX_SEGS * 2 + 1).
+
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 17 +++++++++--------
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 12 insertions(+), 8 deletions(-)
+ mode change 100644 => 100755 drivers/mmc/host/sdhci.h
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 0819b85..075253f 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3223,6 +3223,13 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
+
+ host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
+
++ /*
++ * The DMA table descriptor count is calculated as the maximum
++ * number of segments times 2, to allow for an alignment
++ * descriptor for each segment, plus 1 for a nop end descriptor.
++ */
++ host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
++
+ return host;
+ }
+
+@@ -3468,18 +3475,12 @@ int sdhci_setup_host(struct sdhci_host *host)
+ dma_addr_t dma;
+ void *buf;
+
+- /*
+- * The DMA descriptor table size is calculated as the maximum
+- * number of segments times 2, to allow for an alignment
+- * descriptor for each segment, plus 1 for a nop end descriptor,
+- * all multipled by the descriptor size.
+- */
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_64_DESC_SZ;
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ } else {
+- host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) *
++ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_32_DESC_SZ;
+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ;
+ }
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+old mode 100644
+new mode 100755
+index e093037..17193f4
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -550,6 +550,9 @@ struct sdhci_host {
+ /* Host SDMA buffer boundary. */
+ u32 sdma_boundary;
+
++ /* Host ADMA table count */
++ u32 adma_table_cnt;
++
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0103-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0103-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch
new file mode 100644
index 00000000..bea876fd
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0103-mmc-sdhci-introduce-adma_write_desc-hook-to-struct-s.patch
@@ -0,0 +1,128 @@
+From d564e134441916f5275e277146471997443d0aad Mon Sep 17 00:00:00 2001
+From: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Date: Tue, 28 Aug 2018 17:47:23 +0800
+Subject: [PATCH 103/131] mmc: sdhci: introduce adma_write_desc() hook to
+ struct sdhci_ops
+
+Add this hook so that it can be overridden with driver specific
+implementations. We also let the original sdhci_adma_write_desc()
+accept &desc so that the function can set its new value. Then export
+the function so that it could be reused by driver's specific
+implementations.
+
+Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 37 +++++++++++++++++++++++--------------
+ drivers/mmc/host/sdhci.h | 4 ++++
+ 2 files changed, 27 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 075253f..f345a31 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -554,10 +554,10 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
+ local_irq_restore(*flags);
+ }
+
+-static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
+- dma_addr_t addr, int len, unsigned cmd)
++void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd)
+ {
+- struct sdhci_adma2_64_desc *dma_desc = desc;
++ struct sdhci_adma2_64_desc *dma_desc = *desc;
+
+ /* 32-bit and 64-bit descriptors have these members in same position */
+ dma_desc->cmd = cpu_to_le16(cmd);
+@@ -566,6 +566,19 @@ static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc,
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32);
++
++ *desc += host->desc_sz;
++}
++EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
++
++static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
++ void **desc, dma_addr_t addr,
++ int len, unsigned int cmd)
++{
++ if (host->ops->adma_write_desc)
++ host->ops->adma_write_desc(host, desc, addr, len, cmd);
++
++ sdhci_adma_write_desc(host, desc, addr, len, cmd);
+ }
+
+ static void sdhci_adma_mark_end(void *desc)
+@@ -618,28 +631,24 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
+ }
+
+ /* tran, valid */
+- sdhci_adma_write_desc(host, desc, align_addr, offset,
+- ADMA2_TRAN_VALID);
++ __sdhci_adma_write_desc(host, &desc, align_addr,
++ offset, ADMA2_TRAN_VALID);
+
+ BUG_ON(offset > 65536);
+
+ align += SDHCI_ADMA2_ALIGN;
+ align_addr += SDHCI_ADMA2_ALIGN;
+
+- desc += host->desc_sz;
+-
+ addr += offset;
+ len -= offset;
+ }
+
+ BUG_ON(len > 65536);
+
+- if (len) {
+- /* tran, valid */
+- sdhci_adma_write_desc(host, desc, addr, len,
+- ADMA2_TRAN_VALID);
+- desc += host->desc_sz;
+- }
++ /* tran, valid */
++ if (len)
++ __sdhci_adma_write_desc(host, &desc, addr, len,
++ ADMA2_TRAN_VALID);
+
+ /*
+ * If this triggers then we have a calculation bug
+@@ -656,7 +665,7 @@ static void sdhci_adma_table_pre(struct sdhci_host *host,
+ }
+ } else {
+ /* Add a terminating entry - nop, end, valid */
+- sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID);
++ __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
+ }
+ }
+
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 17193f4..14e6545 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -591,6 +591,8 @@ struct sdhci_ops {
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
++ void (*adma_write_desc)(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd);
+ };
+
+ #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
+@@ -722,6 +724,8 @@ void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
+ int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
+ struct mmc_ios *ios);
+ void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable);
++void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
++ dma_addr_t addr, int len, unsigned int cmd);
+
+ #ifdef CONFIG_PM
+ int sdhci_suspend_host(struct sdhci_host *host);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0104-mmc-sdhci-Add-version-V4-definition.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0104-mmc-sdhci-Add-version-V4-definition.patch
new file mode 100644
index 00000000..2aff580c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0104-mmc-sdhci-Add-version-V4-definition.patch
@@ -0,0 +1,46 @@
+From e4e3401cfb668960c78f4969a89521f6d55746e9 Mon Sep 17 00:00:00 2001
+From: Chunyan Zhang <zhang.chunyan@linaro.org>
+Date: Thu, 30 Aug 2018 16:21:37 +0800
+Subject: [PATCH 104/131] mmc: sdhci: Add version V4 definition
+
+Added definitions for v400, v410, v420.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 2 +-
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index f345a31..e47f1aa 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -3423,7 +3423,7 @@ int sdhci_setup_host(struct sdhci_host *host)
+
+ override_timeout_clk = host->timeout_clk;
+
+- if (host->version > SDHCI_SPEC_300) {
++ if (host->version > SDHCI_SPEC_420) {
+ pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
+ mmc_hostname(mmc), host->version);
+ }
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 14e6545..5bd6aa4 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -270,6 +270,9 @@
+ #define SDHCI_SPEC_100 0
+ #define SDHCI_SPEC_200 1
+ #define SDHCI_SPEC_300 2
++#define SDHCI_SPEC_400 3
++#define SDHCI_SPEC_410 4
++#define SDHCI_SPEC_420 5
+
+ /*
+ * End of controller registers.
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0105-mmc-sdhci-Add-sd-host-v4-mode.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0105-mmc-sdhci-Add-sd-host-v4-mode.patch
new file mode 100644
index 00000000..60f06cef
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0105-mmc-sdhci-Add-sd-host-v4-mode.patch
@@ -0,0 +1,105 @@
+From f1eb47a120122349dd60aeec4fe787fcb18f470a Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Wed, 16 Jan 2019 11:47:52 +0530
+Subject: [PATCH 105/131] mmc: sdhci: Add sd host v4 mode
+
+For SD host controller version 4.00 or later ones, there're two
+modes of implementation - Version 3.00 compatible mode or
+Version 4 mode. This patch introduced an interface to enable
+v4 mode.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 29 +++++++++++++++++++++++++++++
+ drivers/mmc/host/sdhci.h | 3 +++
+ 2 files changed, 32 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index e47f1aa..5cc756a 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -123,6 +123,29 @@ EXPORT_SYMBOL_GPL(sdhci_dumpregs);
+ * *
+ \*****************************************************************************/
+
++static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
++{
++ u16 ctrl2;
++
++ ctrl2 = sdhci_readb(host, SDHCI_HOST_CONTROL2);
++ if (ctrl2 & SDHCI_CTRL_V4_MODE)
++ return;
++
++ ctrl2 |= SDHCI_CTRL_V4_MODE;
++ sdhci_writeb(host, ctrl2, SDHCI_HOST_CONTROL);
++}
++
++/*
++ * This can be called before sdhci_add_host() by Vendor's host controller
++ * driver to enable v4 mode if supported.
++ */
++void sdhci_enable_v4_mode(struct sdhci_host *host)
++{
++ host->v4_mode = true;
++ sdhci_do_enable_v4_mode(host);
++}
++EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
++
+ static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
+ {
+ return cmd->data || cmd->flags & MMC_RSP_BUSY;
+@@ -252,6 +275,9 @@ static void sdhci_init(struct sdhci_host *host, int soft)
+ else
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
++ if (host->v4_mode)
++ sdhci_do_enable_v4_mode(host);
++
+ sdhci_set_default_irqs(host);
+
+ host->cqe_on = false;
+@@ -3293,6 +3319,9 @@ void __sdhci_read_caps(struct sdhci_host *host, u16 *ver, u32 *caps, u32 *caps1)
+
+ sdhci_do_reset(host, SDHCI_RESET_ALL);
+
++ if (host->v4_mode)
++ sdhci_do_enable_v4_mode(host);
++
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+ "sdhci-caps-mask", &dt_caps_mask);
+ of_property_read_u64(mmc_dev(host->mmc)->of_node,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 5bd6aa4..cc40ddc 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -184,6 +184,7 @@
+ #define SDHCI_CTRL_DRV_TYPE_D 0x0030
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
++#define SDHCI_CTRL_V4_MODE 0x1000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+
+ #define SDHCI_CAPABILITIES 0x40
+@@ -490,6 +491,7 @@ struct sdhci_host {
+ bool bus_on; /* Bus power prevents runtime suspend */
+ bool preset_enabled; /* Preset is enabled */
+ bool pending_reset; /* Cmd/data reset is pending */
++ bool v4_mode; /* Host Version 4 Enable */
+
+ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */
+ struct mmc_command *cmd; /* Current command */
+@@ -744,6 +746,7 @@ bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
+ int *data_error);
+
+ void sdhci_dumpregs(struct sdhci_host *host);
++void sdhci_enable_v4_mode(struct sdhci_host *host);
+
+ void sdhci_start_tuning(struct sdhci_host *host);
+ void sdhci_end_tuning(struct sdhci_host *host);
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0106-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0106-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch
new file mode 100644
index 00000000..2223b777
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0106-mmc-sdhci-Add-ADMA2-64-bit-addressing-support-for-V4.patch
@@ -0,0 +1,210 @@
+From c2398060f0982a65582b59636777dfec2134fb1c Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 22 Jan 2019 22:59:45 +0530
+Subject: [PATCH 106/131] mmc: sdhci: Add ADMA2 64-bit addressing support for
+ V4 mode
+
+ADMA2 64-bit addressing support is divided into V3 mode and V4 mode.
+So there are two kinds of descriptors for ADMA2 64-bit addressing
+i.e. 96-bit Descriptor for V3 mode, and 128-bit Descriptor for V4
+mode. 128-bit Descriptor is aligned to 8-byte.
+
+For V4 mode, ADMA2 64-bit addressing is enabled via Host Control 2
+register.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+[Ulf: Fixed conflict while applying]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 92 +++++++++++++++++++++++++++++++++++-------------
+ drivers/mmc/host/sdhci.h | 12 +++++--
+ 2 files changed, 78 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 5cc756a..4b18f3f 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -266,6 +266,52 @@ static void sdhci_set_default_irqs(struct sdhci_host *host)
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
+
++static void sdhci_config_dma(struct sdhci_host *host)
++{
++ u8 ctrl;
++ u16 ctrl2;
++
++ if (host->version < SDHCI_SPEC_200)
++ return;
++
++ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
++
++ /*
++ * Always adjust the DMA selection as some controllers
++ * (e.g. JMicron) can't do PIO properly when the selection
++ * is ADMA.
++ */
++ ctrl &= ~SDHCI_CTRL_DMA_MASK;
++ if (!(host->flags & SDHCI_REQ_USE_DMA))
++ goto out;
++
++ /* Note if DMA Select is zero then SDMA is selected */
++ if (host->flags & SDHCI_USE_ADMA)
++ ctrl |= SDHCI_CTRL_ADMA32;
++
++ if (host->flags & SDHCI_USE_64_BIT_DMA) {
++ /*
++ * If v4 mode, all supported DMA can be 64-bit addressing if
++ * controller supports 64-bit system address, otherwise only
++ * ADMA can support 64-bit addressing.
++ */
++ if (host->v4_mode) {
++ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
++ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++ } else if (host->flags & SDHCI_USE_ADMA) {
++ /*
++ * Don't need to undo SDHCI_CTRL_ADMA32 in order to
++ * set SDHCI_CTRL_ADMA64.
++ */
++ ctrl |= SDHCI_CTRL_ADMA64;
++ }
++ }
++
++out:
++ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
++}
++
+ static void sdhci_init(struct sdhci_host *host, int soft)
+ {
+ struct mmc_host *mmc = host->mmc;
+@@ -839,7 +885,6 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+
+ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ {
+- u8 ctrl;
+ struct mmc_data *data = cmd->data;
+
+ if (sdhci_data_line_cmd(cmd))
+@@ -934,25 +979,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ }
+ }
+
+- /*
+- * Always adjust the DMA selection as some controllers
+- * (e.g. JMicron) can't do PIO properly when the selection
+- * is ADMA.
+- */
+- if (host->version >= SDHCI_SPEC_200) {
+- ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+- ctrl &= ~SDHCI_CTRL_DMA_MASK;
+- if ((host->flags & SDHCI_REQ_USE_DMA) &&
+- (host->flags & SDHCI_USE_ADMA)) {
+- if (host->flags & SDHCI_USE_64_BIT_DMA)
+- ctrl |= SDHCI_CTRL_ADMA64;
+- else
+- ctrl |= SDHCI_CTRL_ADMA32;
+- } else {
+- ctrl |= SDHCI_CTRL_SDMA;
+- }
+- sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+- }
++ sdhci_config_dma(host);
+
+ if (!(host->flags & SDHCI_REQ_USE_DMA)) {
+ int flags;
+@@ -3416,6 +3443,19 @@ static int sdhci_allocate_bounce_buffer(struct sdhci_host *host)
+ return 0;
+ }
+
++static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
++{
++ /*
++ * According to SD Host Controller spec v4.10, bit[27] added from
++ * version 4.10 in Capabilities Register is used as 64-bit System
++ * Address support for V4 mode.
++ */
++ if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
++ return host->caps & SDHCI_CAN_64BIT_V4;
++
++ return host->caps & SDHCI_CAN_64BIT;
++}
++
+ int sdhci_setup_host(struct sdhci_host *host)
+ {
+ struct mmc_host *mmc;
+@@ -3487,7 +3527,7 @@ int sdhci_setup_host(struct sdhci_host *host)
+ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
+ * implement.
+ */
+- if (host->caps & SDHCI_CAN_64BIT)
++ if (sdhci_can_64bit_dma(host))
+ host->flags |= SDHCI_USE_64_BIT_DMA;
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+@@ -3515,8 +3555,8 @@ int sdhci_setup_host(struct sdhci_host *host)
+
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ host->adma_table_sz = host->adma_table_cnt *
+- SDHCI_ADMA2_64_DESC_SZ;
+- host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
++ SDHCI_ADMA2_64_DESC_SZ(host);
++ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
+ } else {
+ host->adma_table_sz = host->adma_table_cnt *
+ SDHCI_ADMA2_32_DESC_SZ;
+@@ -3524,7 +3564,11 @@ int sdhci_setup_host(struct sdhci_host *host)
+ }
+
+ host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
+- buf = dma_alloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
++ /*
++ * Use zalloc to zero the reserved high 32-bits of 128-bit
++ * descriptors so that they never need to be written.
++ */
++ buf = dma_zalloc_coherent(mmc_dev(mmc), host->align_buffer_sz +
+ host->adma_table_sz, &dma, GFP_KERNEL);
+ if (!buf) {
+ pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index cc40ddc..0b5ac1c 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -185,6 +185,7 @@
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
+ #define SDHCI_CTRL_V4_MODE 0x1000
++#define SDHCI_CTRL_64BIT_ADDR 0x2000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+
+ #define SDHCI_CAPABILITIES 0x40
+@@ -205,6 +206,7 @@
+ #define SDHCI_CAN_VDD_330 0x01000000
+ #define SDHCI_CAN_VDD_300 0x02000000
+ #define SDHCI_CAN_VDD_180 0x04000000
++#define SDHCI_CAN_64BIT_V4 0x08000000
+ #define SDHCI_CAN_64BIT 0x10000000
+
+ #define SDHCI_SUPPORT_SDR50 0x00000001
+@@ -309,8 +311,14 @@ struct sdhci_adma2_32_desc {
+ */
+ #define SDHCI_ADMA2_DESC_ALIGN 8
+
+-/* ADMA2 64-bit DMA descriptor size */
+-#define SDHCI_ADMA2_64_DESC_SZ 12
++/*
++ * ADMA2 64-bit DMA descriptor size
++ * According to SD Host Controller spec v4.10, there are two kinds of
++ * descriptors for 64-bit addressing mode: 96-bit Descriptor and 128-bit
++ * Descriptor, if Host Version 4 Enable is set in the Host Control 2
++ * register, 128-bit Descriptor will be selected.
++ */
++#define SDHCI_ADMA2_64_DESC_SZ(host) ((host)->v4_mode ? 16 : 12)
+
+ /*
+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0107-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0107-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch
new file mode 100644
index 00000000..407c8917
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0107-mmc-sdhci-Add-32-bit-block-count-support-for-v4-mode.patch
@@ -0,0 +1,79 @@
+From 6f5130259bf48630d9bf77359e8f8e4badc55cfe Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 22 Jan 2019 23:00:26 +0530
+Subject: [PATCH 107/131] mmc: sdhci: Add 32-bit block count support for v4
+ mode
+
+Host Controller Version 4.10 re-defines SDMA System Address register
+as 32-bit Block Count for v4 mode, and SDMA uses ADMA System
+Address register (05Fh-058h) instead if v4 mode is enabled. Also
+when using 32-bit block count, 16-bit block count register need
+to be set to zero.
+
+Since using 32-bit Block Count would cause problems for auto-cmd23,
+it can be chosen via host->quirk2.
+
+Signed-off-by: Chunyan Zhang <zhang.chunyan@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 14 +++++++++++++-
+ drivers/mmc/host/sdhci.h | 8 ++++++++
+ 2 files changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index 4b18f3f..da59b0a 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -998,7 +998,19 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
+ /* Set the DMA boundary value and block size */
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
+ SDHCI_BLOCK_SIZE);
+- sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
++
++ /*
++ * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
++ * can be supported, in that case 16-bit block count register must be 0.
++ */
++ if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
++ (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
++ if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
++ sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
++ sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
++ } else {
++ sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
++ }
+ }
+
+ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 0b5ac1c..5197966 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -28,6 +28,7 @@
+
+ #define SDHCI_DMA_ADDRESS 0x00
+ #define SDHCI_ARGUMENT2 SDHCI_DMA_ADDRESS
++#define SDHCI_32BIT_BLK_CNT SDHCI_DMA_ADDRESS
+
+ #define SDHCI_BLOCK_SIZE 0x04
+ #define SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF))
+@@ -449,6 +450,13 @@ struct sdhci_host {
+ #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15)
+ /* Controller has CRC in 136 bit Command Response */
+ #define SDHCI_QUIRK2_RSP_136_HAS_CRC (1<<16)
++/*
++ * 32-bit block count may not support eMMC where upper bits of CMD23 are used
++ * for other purposes. Consequently we support 16-bit block count by default.
++ * Otherwise, SDHCI_QUIRK2_USE_32BIT_BLK_CNT can be selected to use 32-bit
++ * block count.
++ */
++#define SDHCI_QUIRK2_USE_32BIT_BLK_CNT (1<<18)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0108-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0108-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch
new file mode 100644
index 00000000..9242d0bd
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0108-mmc-sdhci-Add-Auto-CMD-Auto-Select-support.patch
@@ -0,0 +1,113 @@
+From f5ec4fb304cea4512e2a321152fa4dd3ba70f6aa Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 22 Jan 2019 23:01:11 +0530
+Subject: [PATCH 108/131] mmc: sdhci: Add Auto CMD Auto Select support
+
+As SD Host Controller Specification v4.10 documents:
+Host Controller Version 4.10 defines this "Auto CMD Auto Select" mode.
+Selection of Auto CMD depends on setting of CMD23 Enable in the Host
+Control 2 register which indicates whether card supports CMD23. If CMD23
+Enable =1, Auto CMD23 is used and if CMD23 Enable =0, Auto CMD12 is
+used. In case of Version 4.10 or later, use of Auto CMD Auto Select is
+recommended rather than use of Auto CMD12 Enable or Auto CMD23
+Enable.
+
+This patch add this new mode support.
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/host/sdhci.c | 49 ++++++++++++++++++++++++++++++++++++++----------
+ drivers/mmc/host/sdhci.h | 2 ++
+ 2 files changed, 41 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index da59b0a..dc5d75f 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1020,6 +1020,43 @@ static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
+ !mrq->cap_cmd_during_tfr;
+ }
+
++static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
++ struct mmc_command *cmd,
++ u16 *mode)
++{
++ bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
++ (cmd->opcode != SD_IO_RW_EXTENDED);
++ bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
++ u16 ctrl2;
++
++ /*
++ * In case of Version 4.10 or later, use of 'Auto CMD Auto
++ * Select' is recommended rather than use of 'Auto CMD12
++ * Enable' or 'Auto CMD23 Enable'.
++ */
++ if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
++ *mode |= SDHCI_TRNS_AUTO_SEL;
++
++ ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
++ if (use_cmd23)
++ ctrl2 |= SDHCI_CMD23_ENABLE;
++ else
++ ctrl2 &= ~SDHCI_CMD23_ENABLE;
++ sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
++
++ return;
++ }
++
++ /*
++ * If we are sending CMD23, CMD12 never gets sent
++ * on successful completion (so no Auto-CMD12).
++ */
++ if (use_cmd12)
++ *mode |= SDHCI_TRNS_AUTO_CMD12;
++ else if (use_cmd23)
++ *mode |= SDHCI_TRNS_AUTO_CMD23;
++}
++
+ static void sdhci_set_transfer_mode(struct sdhci_host *host,
+ struct mmc_command *cmd)
+ {
+@@ -1046,17 +1083,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
+
+ if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
+ mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
+- /*
+- * If we are sending CMD23, CMD12 never gets sent
+- * on successful completion (so no Auto-CMD12).
+- */
+- if (sdhci_auto_cmd12(host, cmd->mrq) &&
+- (cmd->opcode != SD_IO_RW_EXTENDED))
+- mode |= SDHCI_TRNS_AUTO_CMD12;
+- else if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+- mode |= SDHCI_TRNS_AUTO_CMD23;
++ sdhci_auto_cmd_select(host, cmd, &mode);
++ if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
+ sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
+- }
+ }
+
+ if (data->flags & MMC_DATA_READ)
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 5197966..23ddc46 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -42,6 +42,7 @@
+ #define SDHCI_TRNS_BLK_CNT_EN 0x02
+ #define SDHCI_TRNS_AUTO_CMD12 0x04
+ #define SDHCI_TRNS_AUTO_CMD23 0x08
++#define SDHCI_TRNS_AUTO_SEL 0x0C
+ #define SDHCI_TRNS_READ 0x10
+ #define SDHCI_TRNS_MULTI 0x20
+
+@@ -185,6 +186,7 @@
+ #define SDHCI_CTRL_DRV_TYPE_D 0x0030
+ #define SDHCI_CTRL_EXEC_TUNING 0x0040
+ #define SDHCI_CTRL_TUNED_CLK 0x0080
++#define SDHCI_CMD23_ENABLE 0x0800
+ #define SDHCI_CTRL_V4_MODE 0x1000
+ #define SDHCI_CTRL_64BIT_ADDR 0x2000
+ #define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0109-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0109-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch
new file mode 100644
index 00000000..50b0433b
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0109-amd-eMMC-sdhci-HS400-workaround-for-ZP.patch
@@ -0,0 +1,149 @@
+From c3dcaa45a3f29d943e18bc6470a80b827ac7a115 Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Tue, 22 Jan 2019 23:01:45 +0530
+Subject: [PATCH 109/131] amd-eMMC sdhci HS400 workaround for ZP
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/mmc/core/mmc.c | 12 ++++++++++--
+ drivers/mmc/host/sdhci-acpi.c | 1 +
+ drivers/mmc/host/sdhci.c | 9 +++++++++
+ drivers/mmc/host/sdhci.h | 1 +
+ include/linux/mmc/host.h | 1 +
+ 5 files changed, 22 insertions(+), 2 deletions(-)
+ mode change 100644 => 100755 drivers/mmc/core/mmc.c
+ mode change 100644 => 100755 drivers/mmc/host/sdhci-acpi.c
+ mode change 100644 => 100755 drivers/mmc/host/sdhci.c
+ mode change 100644 => 100755 include/linux/mmc/host.h
+
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+old mode 100644
+new mode 100755
+index 16b22d7..2313e58
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -1161,7 +1161,9 @@ static int mmc_select_hs400(struct mmc_card *card)
+ mmc_hostname(host), err);
+ return err;
+ }
++ /*In AMD Platform due to hardware ip issue this fails*/
+
++ if (!host->ops->set_hs400_dll) {
+ /* Set host controller to HS timing */
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+@@ -1172,7 +1174,7 @@ static int mmc_select_hs400(struct mmc_card *card)
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+-
++ }
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
+@@ -1212,6 +1214,8 @@ static int mmc_select_hs400(struct mmc_card *card)
+ if (host->ops->hs400_complete)
+ host->ops->hs400_complete(host);
+
++ if (host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
+ return 0;
+
+ out_err:
+@@ -1234,6 +1238,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
++ if (!host->ops->set_hs400_dll)
+ mmc_set_clock(host, max_dtr);
+
+ /* Switch HS400 to HS DDR */
+@@ -1243,12 +1248,15 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
+ true, false, true);
+ if (err)
+ goto out_err;
+-
++ /*In AMD Platform due to hardware ip issue this fails*/
++ if (!host->ops->set_hs400_dll)
++ {
+ mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+
+ err = mmc_switch_status(card);
+ if (err)
+ goto out_err;
++ }
+
+ /* Switch HS DDR to HS */
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BUS_WIDTH,
+diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
+old mode 100644
+new mode 100755
+index c2e7048..558b792
+--- a/drivers/mmc/host/sdhci-acpi.c
++++ b/drivers/mmc/host/sdhci-acpi.c
+@@ -411,6 +411,7 @@ static const struct sdhci_ops sdhci_acpi_ops_amd = {
+ .set_bus_width = sdhci_set_bus_width,
+ .reset = sdhci_reset,
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
++ .set_hs400_dll = sdhci_acpi_amd_hs400_dll,
+ };
+
+ static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = {
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+old mode 100644
+new mode 100755
+index dc5d75f..34c6a81
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -1977,6 +1977,14 @@ static void sdhci_hw_reset(struct mmc_host *mmc)
+ host->ops->hw_reset(host);
+ }
+
++static void sdhci_set_hs400_dll(struct mmc_host *mmc)
++{
++ struct sdhci_host *host = mmc_priv(mmc);
++
++ if (host->ops && host->ops->set_hs400_dll)
++ host->ops->set_hs400_dll(host);
++}
++
+ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
+ {
+ if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+@@ -2465,6 +2473,7 @@ static const struct mmc_host_ops sdhci_ops = {
+ .get_cd = sdhci_get_cd,
+ .get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
++ .set_hs400_dll = sdhci_set_hs400_dll,
+ .enable_sdio_irq = sdhci_enable_sdio_irq,
+ .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
+diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
+index 23ddc46..492401d 100755
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -611,6 +611,7 @@ struct sdhci_ops {
+ int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
+ void (*hw_reset)(struct sdhci_host *host);
++ void (*set_hs400_dll)(struct sdhci_host *host);
+ void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ void (*card_event)(struct sdhci_host *host);
+ void (*voltage_switch)(struct sdhci_host *host);
+diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
+old mode 100644
+new mode 100755
+index 843c38f..ba4af38
+--- a/include/linux/mmc/host.h
++++ b/include/linux/mmc/host.h
+@@ -159,6 +159,7 @@ struct mmc_host_ops {
+ unsigned int max_dtr, int host_drv,
+ int card_drv, int *drv_type);
+ void (*hw_reset)(struct mmc_host *host);
++ void (*set_hs400_dll)(struct mmc_host *host);
+ void (*card_event)(struct mmc_host *host);
+
+ /*
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0110-pinctrl-eMMC-and-PinCtrl-is-sharing-the-interrupt-no.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0110-pinctrl-eMMC-and-PinCtrl-is-sharing-the-interrupt-no.patch
new file mode 100644
index 00000000..e55c583a
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0110-pinctrl-eMMC-and-PinCtrl-is-sharing-the-interrupt-no.patch
@@ -0,0 +1,29 @@
+From 2af564865343fa4b384fdb6c61138c07b8601cba Mon Sep 17 00:00:00 2001
+From: Ayyappa Chandolu <Ayyappa.Chandolu@amd.com>
+Date: Fri, 2 Mar 2018 11:25:17 +0530
+Subject: [PATCH 110/131] pinctrl: eMMC and PinCtrl is sharing the interrupt no
+ 7 for Dibber. So PinCtrl must register the interrupt hadler with SHARED
+ flags. BUGID : EMBSWDEV-4739
+
+Signed-off-by: Ayyappa Chandolu <Ayyappa.Chandolu@amd.com>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/pinctrl/pinctrl-amd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index b78f42a..f0e1f7c 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -896,7 +896,7 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ goto out2;
+ }
+
+- ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, 0,
++ ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, IRQF_SHARED,
+ KBUILD_MODNAME, gpio_dev);
+ if (ret)
+ goto out2;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0111-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0111-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch
new file mode 100644
index 00000000..1a902632
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0111-amd-xgbe-use-dma_mapping_error-to-check-map-errors.patch
@@ -0,0 +1,45 @@
+From a3c0851e777f229877dcd8480b254b2750a601c4 Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Thu, 26 Jul 2018 09:51:27 +0800
+Subject: [PATCH 111/131] amd-xgbe: use dma_mapping_error to check map errors
+
+The dma_mapping_error() returns true or false, but we want
+to return -ENOMEM if there was an error.
+
+Fixes: 174fd2597b0b ("amd-xgbe: Implement split header receive support")
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+index cc1e4f8..5330942 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -289,7 +289,7 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ struct page *pages = NULL;
+ dma_addr_t pages_dma;
+ gfp_t gfp;
+- int order, ret;
++ int order;
+
+ again:
+ order = alloc_order;
+@@ -316,10 +316,9 @@ static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
+ /* Map the pages */
+ pages_dma = dma_map_page(pdata->dev, pages, 0,
+ PAGE_SIZE << order, DMA_FROM_DEVICE);
+- ret = dma_mapping_error(pdata->dev, pages_dma);
+- if (ret) {
++ if (dma_mapping_error(pdata->dev, pages_dma)) {
+ put_page(pages);
+- return ret;
++ return -ENOMEM;
+ }
+
+ pa->pages = pages;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0112-lib-crc-Move-polynomial-definition-to-separate-heade.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0112-lib-crc-Move-polynomial-definition-to-separate-heade.patch
new file mode 100644
index 00000000..307bc389
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0112-lib-crc-Move-polynomial-definition-to-separate-heade.patch
@@ -0,0 +1,96 @@
+From 56993bb2133c233893248f4611cdedddcefb9a79 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:36 +0200
+Subject: [PATCH 112/131] lib/crc: Move polynomial definition to separate
+ header
+
+Allow other drivers and parts of kernel to use the same define for
+CRC32 polynomial, instead of duplicating it in many places. This code
+does not bring any functional changes, except moving existing code.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/linux/crc32poly.h | 20 ++++++++++++++++++++
+ lib/crc32.c | 1 +
+ lib/crc32defs.h | 14 --------------
+ lib/gen_crc32table.c | 1 +
+ 4 files changed, 22 insertions(+), 14 deletions(-)
+ create mode 100644 include/linux/crc32poly.h
+
+diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
+new file mode 100644
+index 0000000..7ad5aa9
+--- /dev/null
++++ b/include/linux/crc32poly.h
+@@ -0,0 +1,20 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_CRC32_POLY_H
++#define _LINUX_CRC32_POLY_H
++
++/*
++ * There are multiple 16-bit CRC polynomials in common use, but this is
++ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
++ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
++ */
++#define CRCPOLY_LE 0xedb88320
++#define CRCPOLY_BE 0x04c11db7
++
++/*
++ * This is the CRC32c polynomial, as outlined by Castagnoli.
++ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
++ * x^8+x^6+x^0
++ */
++#define CRC32C_POLY_LE 0x82F63B78
++
++#endif /* _LINUX_CRC32_POLY_H */
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 6ddc92b..82bfc053 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -27,6 +27,7 @@
+ /* see: Documentation/crc32.txt for a description of algorithms */
+
+ #include <linux/crc32.h>
++#include <linux/crc32poly.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+ #include <linux/sched.h>
+diff --git a/lib/crc32defs.h b/lib/crc32defs.h
+index cb275a2..0c8fb59 100644
+--- a/lib/crc32defs.h
++++ b/lib/crc32defs.h
+@@ -1,18 +1,4 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * There are multiple 16-bit CRC polynomials in common use, but this is
+- * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+- * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+- */
+-#define CRCPOLY_LE 0xedb88320
+-#define CRCPOLY_BE 0x04c11db7
+-
+-/*
+- * This is the CRC32c polynomial, as outlined by Castagnoli.
+- * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+- * x^8+x^6+x^0
+- */
+-#define CRC32C_POLY_LE 0x82F63B78
+
+ /* Try to choose an implementation variant via Kconfig */
+ #ifdef CONFIG_CRC32_SLICEBY8
+diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
+index 8f26660..34c3bc8 100644
+--- a/lib/gen_crc32table.c
++++ b/lib/gen_crc32table.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ #include <stdio.h>
++#include "../include/linux/crc32poly.h"
+ #include "../include/generated/autoconf.h"
+ #include "crc32defs.h"
+ #include <inttypes.h>
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0113-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0113-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch
new file mode 100644
index 00000000..9b5511cd
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0113-lib-crc-Use-consistent-naming-for-CRC-32-polynomials.patch
@@ -0,0 +1,104 @@
+From 42b2ec07bcaa1a833a595937df05a7ee07627ef4 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:37 +0200
+Subject: [PATCH 113/131] lib/crc: Use consistent naming for CRC-32 polynomials
+
+Header was defining CRCPOLY_LE/BE and CRC32C_POLY_LE but in fact all of
+them are CRC-32 polynomials so use consistent naming.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ include/linux/crc32poly.h | 4 ++--
+ lib/crc32.c | 10 +++++-----
+ lib/gen_crc32table.c | 4 ++--
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
+index 7ad5aa9..62c4b77 100644
+--- a/include/linux/crc32poly.h
++++ b/include/linux/crc32poly.h
+@@ -7,8 +7,8 @@
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+-#define CRCPOLY_LE 0xedb88320
+-#define CRCPOLY_BE 0x04c11db7
++#define CRC32_POLY_LE 0xedb88320
++#define CRC32_POLY_BE 0x04c11db7
+
+ /*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+diff --git a/lib/crc32.c b/lib/crc32.c
+index 82bfc053..7111c44 100644
+--- a/lib/crc32.c
++++ b/lib/crc32.c
+@@ -185,7 +185,7 @@ static inline u32 __pure crc32_le_generic(u32 crc, unsigned char const *p,
+ #if CRC_LE_BITS == 1
+ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+ {
+- return crc32_le_generic(crc, p, len, NULL, CRCPOLY_LE);
++ return crc32_le_generic(crc, p, len, NULL, CRC32_POLY_LE);
+ }
+ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ {
+@@ -195,7 +195,7 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len)
+ {
+ return crc32_le_generic(crc, p, len,
+- (const u32 (*)[256])crc32table_le, CRCPOLY_LE);
++ (const u32 (*)[256])crc32table_le, CRC32_POLY_LE);
+ }
+ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
+ {
+@@ -269,7 +269,7 @@ static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+
+ u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+ {
+- return crc32_generic_shift(crc, len, CRCPOLY_LE);
++ return crc32_generic_shift(crc, len, CRC32_POLY_LE);
+ }
+
+ u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+@@ -331,13 +331,13 @@ static inline u32 __pure crc32_be_generic(u32 crc, unsigned char const *p,
+ #if CRC_LE_BITS == 1
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+- return crc32_be_generic(crc, p, len, NULL, CRCPOLY_BE);
++ return crc32_be_generic(crc, p, len, NULL, CRC32_POLY_BE);
+ }
+ #else
+ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len)
+ {
+ return crc32_be_generic(crc, p, len,
+- (const u32 (*)[256])crc32table_be, CRCPOLY_BE);
++ (const u32 (*)[256])crc32table_be, CRC32_POLY_BE);
+ }
+ #endif
+ EXPORT_SYMBOL(crc32_be);
+diff --git a/lib/gen_crc32table.c b/lib/gen_crc32table.c
+index 34c3bc8..f755b99 100644
+--- a/lib/gen_crc32table.c
++++ b/lib/gen_crc32table.c
+@@ -58,7 +58,7 @@ static void crc32init_le_generic(const uint32_t polynomial,
+
+ static void crc32init_le(void)
+ {
+- crc32init_le_generic(CRCPOLY_LE, crc32table_le);
++ crc32init_le_generic(CRC32_POLY_LE, crc32table_le);
+ }
+
+ static void crc32cinit_le(void)
+@@ -77,7 +77,7 @@ static void crc32init_be(void)
+ crc32table_be[0][0] = 0;
+
+ for (i = 1; i < BE_TABLE_SIZE; i <<= 1) {
+- crc = (crc << 1) ^ ((crc & 0x80000000) ? CRCPOLY_BE : 0);
++ crc = (crc << 1) ^ ((crc & 0x80000000) ? CRC32_POLY_BE : 0);
+ for (j = 0; j < i; j++)
+ crc32table_be[0][i + j] = crc ^ crc32table_be[0][j];
+ }
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0114-net-ethernet-Use-existing-define-with-polynomial.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0114-net-ethernet-Use-existing-define-with-polynomial.patch
new file mode 100644
index 00000000..da9af087
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0114-net-ethernet-Use-existing-define-with-polynomial.patch
@@ -0,0 +1,46 @@
+From dd77e87d25cb432b40a5dbc1b6c6dcada717ce69 Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzk@kernel.org>
+Date: Tue, 17 Jul 2018 18:05:39 +0200
+Subject: [PATCH 114/131] net: ethernet: Use existing define with polynomial
+
+Do not define again the polynomial but use header with existing define.
+
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+index e107e18..1e929a1 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+@@ -119,6 +119,7 @@
+ #include <linux/clk.h>
+ #include <linux/bitrev.h>
+ #include <linux/crc32.h>
++#include <linux/crc32poly.h>
+
+ #include "xgbe.h"
+ #include "xgbe-common.h"
+@@ -887,7 +888,6 @@ static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+
+ static u32 xgbe_vid_crc32_le(__le16 vid_le)
+ {
+- u32 poly = 0xedb88320; /* CRCPOLY_LE */
+ u32 crc = ~0;
+ u32 temp = 0;
+ unsigned char *data = (unsigned char *)&vid_le;
+@@ -904,7 +904,7 @@ static u32 xgbe_vid_crc32_le(__le16 vid_le)
+ data_byte >>= 1;
+
+ if (temp)
+- crc ^= poly;
++ crc ^= CRC32_POLY_LE;
+ }
+
+ return crc;
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0115-net-amd-fix-return-type-of-ndo_start_xmit-function.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0115-net-amd-fix-return-type-of-ndo_start_xmit-function.patch
new file mode 100644
index 00000000..2c9e4bc1
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0115-net-amd-fix-return-type-of-ndo_start_xmit-function.patch
@@ -0,0 +1,44 @@
+From 836d2feccaa1e38e4d3c6adab713f9f6d579cd7f Mon Sep 17 00:00:00 2001
+From: YueHaibing <yuehaibing@huawei.com>
+Date: Wed, 19 Sep 2018 18:50:17 +0800
+Subject: [PATCH 115/131] net: amd: fix return type of ndo_start_xmit function
+
+The method ndo_start_xmit() is defined as returning an 'netdev_tx_t',
+which is a typedef for an enum type, so make sure the implementation in
+this driver has returns 'netdev_tx_t' value, and change the function
+return type to netdev_tx_t.
+
+Found by coccinelle.
+
+Signed-off-by: YueHaibing <yuehaibing@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 8cfba4b..d85272d 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -2009,7 +2009,7 @@ static int xgbe_close(struct net_device *netdev)
+ return 0;
+ }
+
+-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
++static netdev_tx_t xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+@@ -2018,7 +2018,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ struct netdev_queue *txq;
+- int ret;
++ netdev_tx_t ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0116-net-phy-Add-helper-for-advertise-to-lcl-value.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0116-net-phy-Add-helper-for-advertise-to-lcl-value.patch
new file mode 100644
index 00000000..4afd6f7d
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0116-net-phy-Add-helper-for-advertise-to-lcl-value.patch
@@ -0,0 +1,71 @@
+From 6bb7c3d2ca408f41be6c4a9d51cba757fc53afcb Mon Sep 17 00:00:00 2001
+From: Andrew Lunn <andrew@lunn.ch>
+Date: Sat, 29 Sep 2018 23:04:13 +0200
+Subject: [PATCH 116/131] net: phy: Add helper for advertise to lcl value
+
+Add a helper to convert the local advertising to an LCL capabilities,
+which is then used to resolve pause flow control settings.
+
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Maxime Chevallier <maxime.chevallier@bootlin.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 5 +----
+ include/linux/mii.h | 20 ++++++++++++++++++++
+ 2 files changed, 21 insertions(+), 4 deletions(-)
+ mode change 100644 => 100755 include/linux/mii.h
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 3ceb4f9..5f01b36 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -1495,10 +1495,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata)
+ if (!phy_data->phydev)
+ return;
+
+- if (phy_data->phydev->advertising & ADVERTISED_Pause)
+- lcl_adv |= ADVERTISE_PAUSE_CAP;
+- if (phy_data->phydev->advertising & ADVERTISED_Asym_Pause)
+- lcl_adv |= ADVERTISE_PAUSE_ASYM;
++ lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising);
+
+ if (phy_data->phydev->pause) {
+ XGBE_SET_LP_ADV(lks, Pause);
+diff --git a/include/linux/mii.h b/include/linux/mii.h
+old mode 100644
+new mode 100755
+index 55000ee..63cd587
+--- a/include/linux/mii.h
++++ b/include/linux/mii.h
+@@ -302,6 +302,26 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
+ return result | mii_adv_to_ethtool_adv_x(lpa);
+ }
+
++
++/**
++ * ethtool_adv_to_lcl_adv_t
++ * @advertising:pointer to ethtool advertising
++ *
++ * A small helper function that translates ethtool advertising to LVL
++ * pause capabilities.
++ */
++static inline u32 ethtool_adv_to_lcl_adv_t(u32 advertising)
++{
++ u32 lcl_adv = 0;
++
++ if (advertising & ADVERTISED_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_CAP;
++ if (advertising & ADVERTISED_Asym_Pause)
++ lcl_adv |= ADVERTISE_PAUSE_ASYM;
++
++ return lcl_adv;
++}
++
+ /**
+ * mii_advertise_flowctrl - get flow control advertisement flags
+ * @cap: Flow control capabilities (FLOW_CTRL_RX, FLOW_CTRL_TX or both)
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0117-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0117-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch
new file mode 100644
index 00000000..c86cba1c
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0117-drivers-net-remove-net-busy_poll.h-inclusion-when-no.patch
@@ -0,0 +1,35 @@
+From e3655f57f50fd9cd92336563a2cfadcda06bad25 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 25 Oct 2018 06:42:12 -0700
+Subject: [PATCH 117/131] drivers: net: remove <net/busy_poll.h> inclusion when
+ not needed
+
+Drivers using generic NAPI interface no longer need to include
+<net/busy_poll.h>, since busy polling was moved to core networking
+stack long ago.
+
+See commit 79e7fff47b7b ("net: remove support for per driver
+ndo_busy_poll()") for reference.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index d85272d..649a283 100755
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -119,7 +119,6 @@
+ #include <linux/tcp.h>
+ #include <linux/if_vlan.h>
+ #include <linux/interrupt.h>
+-#include <net/busy_poll.h>
+ #include <linux/clk.h>
+ #include <linux/if_ether.h>
+ #include <linux/net_tstamp.h>
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0118-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0118-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch
new file mode 100644
index 00000000..9df6d48a
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0118-net-ethernet-xgbe-expand-PHY_GBIT_FEAUTRES.patch
@@ -0,0 +1,104 @@
+From f5aaf7afea74803350355aec49db4c4dfed8d55f Mon Sep 17 00:00:00 2001
+From: Sudheesh Mavila <sudheesh.mavila@amd.com>
+Date: Sun, 11 Nov 2018 23:32:49 +0530
+Subject: [PATCH 118/131] net: ethernet: xgbe: expand PHY_GBIT_FEAUTRES
+
+From d0939c26c53a2b2cecfbe6953858a58abb0158c7
+The macro PHY_GBIT_FEAUTRES needs to change into a bitmap in order to
+support link_modes. Remove its use from xgde by replacing it with its
+definition.
+
+Probably, the current behavior is wrong. It probably should be
+ANDing not assigning.
+
+Signed-off-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 14 ++++++++------
+ drivers/net/phy/phy_device.c | 14 ++++++++++++++
+ include/linux/phy.h | 1 +
+ 3 files changed, 23 insertions(+), 6 deletions(-)
+ mode change 100644 => 100755 drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+ mode change 100644 => 100755 drivers/net/phy/phy_device.c
+ mode change 100644 => 100755 include/linux/phy.h
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+old mode 100644
+new mode 100755
+index 5f01b36..151bdb6
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -878,9 +878,10 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata)
+ phy_write(phy_data->phydev, 0x04, 0x0d01);
+ phy_write(phy_data->phydev, 0x00, 0x9140);
+
+- phy_data->phydev->supported = PHY_GBIT_FEATURES;
+- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phy_data->phydev->advertising = phy_data->phydev->supported;
++ phy_data->phydev->supported = PHY_10BT_FEATURES |
++ PHY_100BT_FEATURES |
++ PHY_1000BT_FEATURES;
++ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "Finisar PHY quirk in place\n");
+@@ -950,9 +951,10 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ reg = phy_read(phy_data->phydev, 0x00);
+ phy_write(phy_data->phydev, 0x00, reg & ~0x00800);
+
+- phy_data->phydev->supported = PHY_GBIT_FEATURES;
+- phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+- phy_data->phydev->advertising = phy_data->phydev->supported;
++ phy_data->phydev->supported = (PHY_10BT_FEATURES |
++ PHY_100BT_FEATURES |
++ PHY_1000BT_FEATURES);
++ phy_support_asym_pause(phy_data->phydev);
+
+ netif_dbg(pdata, drv, pdata->netdev,
+ "BelFuse PHY quirk in place\n");
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+old mode 100644
+new mode 100755
+index fe76e2c..f16af99
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -1736,6 +1736,20 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
+ }
+ EXPORT_SYMBOL(phy_set_max_speed);
+
++/**
++ * phy_support_asym_pause - Enable support of asym pause
++ * @phydev: target phy_device struct
++ *
++ * Description: Called by the MAC to indicate is supports Asym Pause.
++ */
++void phy_support_asym_pause(struct phy_device *phydev)
++{
++ phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
++ phydev->advertising = phydev->supported;
++}
++EXPORT_SYMBOL(phy_support_asym_pause);
++
++
+ static void of_set_phy_supported(struct phy_device *phydev)
+ {
+ struct device_node *node = phydev->mdio.dev.of_node;
+diff --git a/include/linux/phy.h b/include/linux/phy.h
+old mode 100644
+new mode 100755
+index efc04c2..38d36a6
+--- a/include/linux/phy.h
++++ b/include/linux/phy.h
+@@ -913,6 +913,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+ int phy_start_interrupts(struct phy_device *phydev);
+ void phy_print_status(struct phy_device *phydev);
+ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
++void phy_support_asym_pause(struct phy_device *phydev);
+
+ int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
+ int (*run)(struct phy_device *));
+--
+2.7.4
+
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0119-crypto-ahash-remove-useless-setting-of-type-flags.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0119-crypto-ahash-remove-useless-setting-of-type-flags.patch
new file mode 100644
index 00000000..c92bf5fa
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0119-crypto-ahash-remove-useless-setting-of-type-flags.patch
@@ -0,0 +1,53 @@
+From 358e73a569470ef44a142ecd1cd096587b5e9da2 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sat, 30 Jun 2018 15:16:12 -0700
+Subject: [PATCH 119/131] crypto: ahash - remove useless setting of type flags
+
+Many ahash algorithms set .cra_flags = CRYPTO_ALG_TYPE_AHASH. But this
+is redundant with the C structure type ('struct ahash_alg'), and
+crypto_register_ahash() already sets the type flag automatically,
+clearing any type flag that was already there. Apparently the useless
+assignment has just been copy+pasted around.
+
+So, remove the useless assignment from all the ahash algorithms.