aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch154
1 files changed, 0 insertions, 154 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch
deleted file mode 100644
index 611d718b..00000000
--- a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0052-x86-mm-Add-DMA-support-for-SEV-memory-encryption.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-From 45f7a023504c57c0820c1d816a7298bf35ba0134 Mon Sep 17 00:00:00 2001
-From: Tom Lendacky <thomas.lendacky@amd.com>
-Date: Fri, 20 Oct 2017 09:30:53 -0500
-Subject: [PATCH 52/95] x86/mm: Add DMA support for SEV memory encryption
-
-DMA access to encrypted memory cannot be performed when SEV is active.
-In order for DMA to properly work when SEV is active, the SWIOTLB bounce
-buffers must be used.
-
-Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
-Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: Borislav Petkov <bp@suse.de>C
-Tested-by: Borislav Petkov <bp@suse.de>
-Cc: kvm@vger.kernel.org
-Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Cc: Borislav Petkov <bp@alien8.de>
-Link: https://lkml.kernel.org/r/20171020143059.3291-12-brijesh.singh@amd.com
-Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
----
- arch/x86/mm/mem_encrypt.c | 86 +++++++++++++++++++++++++++++++++++++++++++++++
- lib/swiotlb.c | 5 +--
- 2 files changed, 89 insertions(+), 2 deletions(-)
-
-diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
-index 4e4a304..3c82d64 100755
---- a/arch/x86/mm/mem_encrypt.c
-+++ b/arch/x86/mm/mem_encrypt.c
-@@ -192,6 +192,70 @@ void __init sme_early_init(void)
- /* Update the protection map with memory encryption mask */
- for (i = 0; i < ARRAY_SIZE(protection_map); i++)
- protection_map[i] = pgprot_encrypted(protection_map[i]);
-+
-+ if (sev_active())
-+ swiotlb_force = SWIOTLB_FORCE;
-+}
-+
-+static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
-+ gfp_t gfp, unsigned long attrs)
-+{
-+ unsigned long dma_mask;
-+ unsigned int order;
-+ struct page *page;
-+ void *vaddr = NULL;
-+
-+ dma_mask = dma_alloc_coherent_mask(dev, gfp);
-+ order = get_order(size);
-+
-+ /*
-+ * Memory will be memset to zero after marking decrypted, so don't
-+ * bother clearing it before.
-+ */
-+ gfp &= ~__GFP_ZERO;
-+
-+ page = alloc_pages_node(dev_to_node(dev), gfp, order);
-+ if (page) {
-+ dma_addr_t addr;
-+
-+ /*
-+ * Since we will be clearing the encryption bit, check the
-+ * mask with it already cleared.
-+ */
-+ addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
-+ if ((addr + size) > dma_mask) {
-+ __free_pages(page, get_order(size));
-+ } else {
-+ vaddr = page_address(page);
-+ *dma_handle = addr;
-+ }
-+ }
-+
-+ if (!vaddr)
-+ vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
-+
-+ if (!vaddr)
-+ return NULL;
-+
-+ /* Clear the SME encryption bit for DMA use if not swiotlb area */
-+ if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
-+ set_memory_decrypted((unsigned long)vaddr, 1 << order);
-+ memset(vaddr, 0, PAGE_SIZE << order);
-+ *dma_handle = __sme_clr(*dma_handle);
-+ }
-+
-+ return vaddr;
-+}
-+
-+static void sev_free(struct device *dev, size_t size, void *vaddr,
-+ dma_addr_t dma_handle, unsigned long attrs)
-+{
-+ /* Set the SME encryption bit for re-use if not swiotlb area */
-+ if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
-+ set_memory_encrypted((unsigned long)vaddr,
-+ 1 << get_order(size));
-+
-+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
- }
-
- /*
-@@ -218,6 +282,20 @@ bool sev_active(void)
- }
- EXPORT_SYMBOL_GPL(sev_active);
-
-+static const struct dma_map_ops sev_dma_ops = {
-+ .alloc = sev_alloc,
-+ .free = sev_free,
-+ .map_page = swiotlb_map_page,
-+ .unmap_page = swiotlb_unmap_page,
-+ .map_sg = swiotlb_map_sg_attrs,
-+ .unmap_sg = swiotlb_unmap_sg_attrs,
-+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
-+ .sync_single_for_device = swiotlb_sync_single_for_device,
-+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
-+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
-+ .mapping_error = swiotlb_dma_mapping_error,
-+};
-+
- /* Architecture __weak replacement functions */
- void __init mem_encrypt_init(void)
- {
-@@ -227,6 +305,14 @@ void __init mem_encrypt_init(void)
- /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
- swiotlb_update_mem_attributes();
-
-+ /*
-+ * With SEV, DMA operations cannot use encryption. New DMA ops
-+ * are required in order to mark the DMA areas as decrypted or
-+ * to use bounce buffers.
-+ */
-+ if (sev_active())
-+ dma_ops = &sev_dma_ops;
-+
- pr_info("AMD Secure Memory Encryption (SME) active\n");
- }
-
-diff --git a/lib/swiotlb.c b/lib/swiotlb.c
-index 20df2fd..0d7f46f 100644
---- a/lib/swiotlb.c
-+++ b/lib/swiotlb.c
-@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
- if (no_iotlb_memory)
- panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
-
-- if (sme_active())
-- pr_warn_once("SME is active and system is using DMA bounce buffers\n");
-+ if (mem_encrypt_active())
-+ pr_warn_once("%s is active and system is using DMA bounce buffers\n",
-+ sme_active() ? "SME" : "SEV");
-
- mask = dma_get_seg_boundary(hwdev);
-
---
-2.7.4
-