aboutsummaryrefslogtreecommitdiffstats
path: root/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch
diff options
context:
space:
mode:
Diffstat (limited to 'meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch')
-rw-r--r--meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch218
1 files changed, 218 insertions, 0 deletions
diff --git a/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch
new file mode 100644
index 00000000..99776243
--- /dev/null
+++ b/meta-amd-bsp/recipes-kernel/linux/linux-yocto-4.14.71-e3000/0051-x86-mm-resource-Use-PAGE_KERNEL-protection-for-iorem.patch
@@ -0,0 +1,218 @@
+From 8a49eeabdeed5b414efeb82c97ba44278e591819 Mon Sep 17 00:00:00 2001
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Fri, 20 Oct 2017 09:30:52 -0500
+Subject: [PATCH 51/95] x86/mm, resource: Use PAGE_KERNEL protection for
+ ioremap of memory pages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In order for memory pages to be properly mapped when SEV is active, it's
+necessary to use the PAGE_KERNEL protection attribute as the base
+protection. This ensures that memory mapping of, e.g. ACPI tables,
+receives the proper mapping attributes.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Borislav Petkov <bp@suse.de>
+Tested-by: Borislav Petkov <bp@suse.de>
+Cc: Laura Abbott <labbott@redhat.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: kvm@vger.kernel.org
+Cc: Jérôme Glisse <jglisse@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Link: https://lkml.kernel.org/r/20171020143059.3291-11-brijesh.singh@amd.com
+Signed-off-by: Sudheesh Mavila <sudheesh.mavila@amd.com>
+---
+ arch/x86/mm/ioremap.c | 79 ++++++++++++++++++++++++++++++++++++++++++--------
+ include/linux/ioport.h | 3 ++
+ kernel/resource.c | 19 ++++++++++++
+ 3 files changed, 89 insertions(+), 12 deletions(-)
+
+diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
+index f0b91a2..e2db83b 100644
+--- a/arch/x86/mm/ioremap.c
++++ b/arch/x86/mm/ioremap.c
+@@ -27,6 +27,11 @@
+
+ #include "physaddr.h"
+
++struct ioremap_mem_flags {
++ bool system_ram;
++ bool desc_other;
++};
++
+ /*
+ * Fix up the linear direct mapping of the kernel to avoid cache attribute
+ * conflicts.
+@@ -56,17 +61,59 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
+ return err;
+ }
+
+-static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+- void *arg)
++static bool __ioremap_check_ram(struct resource *res)
+ {
++ unsigned long start_pfn, stop_pfn;
+ unsigned long i;
+
+- for (i = 0; i < nr_pages; ++i)
+- if (pfn_valid(start_pfn + i) &&
+- !PageReserved(pfn_to_page(start_pfn + i)))
+- return 1;
++ if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
++ return false;
+
+- return 0;
++ start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ stop_pfn = (res->end + 1) >> PAGE_SHIFT;
++ if (stop_pfn > start_pfn) {
++ for (i = 0; i < (stop_pfn - start_pfn); ++i)
++ if (pfn_valid(start_pfn + i) &&
++ !PageReserved(pfn_to_page(start_pfn + i)))
++ return true;
++ }
++
++ return false;
++}
++
++static int __ioremap_check_desc_other(struct resource *res)
++{
++ return (res->desc != IORES_DESC_NONE);
++}
++
++static int __ioremap_res_check(struct resource *res, void *arg)
++{
++ struct ioremap_mem_flags *flags = arg;
++
++ if (!flags->system_ram)
++ flags->system_ram = __ioremap_check_ram(res);
++
++ if (!flags->desc_other)
++ flags->desc_other = __ioremap_check_desc_other(res);
++
++ return flags->system_ram && flags->desc_other;
++}
++
++/*
++ * To avoid multiple resource walks, this function walks resources marked as
++ * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
++ * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
++ */
++static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
++ struct ioremap_mem_flags *flags)
++{
++ u64 start, end;
++
++ start = (u64)addr;
++ end = start + size - 1;
++ memset(flags, 0, sizeof(*flags));
++
++ walk_mem_res(start, end, flags, __ioremap_res_check);
+ }
+
+ /*
+@@ -87,9 +134,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ unsigned long size, enum page_cache_mode pcm, void *caller)
+ {
+ unsigned long offset, vaddr;
+- resource_size_t pfn, last_pfn, last_addr;
++ resource_size_t last_addr;
+ const resource_size_t unaligned_phys_addr = phys_addr;
+ const unsigned long unaligned_size = size;
++ struct ioremap_mem_flags mem_flags;
+ struct vm_struct *area;
+ enum page_cache_mode new_pcm;
+ pgprot_t prot;
+@@ -108,13 +156,12 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ return NULL;
+ }
+
++ __ioremap_check_mem(phys_addr, size, &mem_flags);
++
+ /*
+ * Don't allow anybody to remap normal RAM that we're using..
+ */
+- pfn = phys_addr >> PAGE_SHIFT;
+- last_pfn = last_addr >> PAGE_SHIFT;
+- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+- __ioremap_check_ram) == 1) {
++ if (mem_flags.system_ram) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
+@@ -146,7 +193,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+ pcm = new_pcm;
+ }
+
++ /*
++ * If the page being mapped is in memory and SEV is active then
++ * make sure the memory encryption attribute is enabled in the
++ * resulting mapping.
++ */
+ prot = PAGE_KERNEL_IO;
++ if (sev_active() && mem_flags.desc_other)
++ prot = pgprot_encrypted(prot);
++
+ switch (pcm) {
+ case _PAGE_CACHE_MODE_UC:
+ default:
+diff --git a/include/linux/ioport.h b/include/linux/ioport.h
+index c0070d7..93b4183 100644
+--- a/include/linux/ioport.h
++++ b/include/linux/ioport.h
+@@ -271,6 +271,9 @@ extern int
+ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg, int (*func)(unsigned long, unsigned long, void *));
+ extern int
++walk_mem_res(u64 start, u64 end, void *arg,
++ int (*func)(struct resource *, void *));
++extern int
+ walk_system_ram_res(u64 start, u64 end, void *arg,
+ int (*func)(struct resource *, void *));
+ extern int
+diff --git a/kernel/resource.c b/kernel/resource.c
+index 78cae82..790f1b8 100644
+--- a/kernel/resource.c
++++ b/kernel/resource.c
+@@ -397,6 +397,8 @@ static int find_next_iomem_res(struct resource *res, unsigned long desc,
+ res->start = p->start;
+ if (res->end > p->end)
+ res->end = p->end;
++ res->flags = p->flags;
++ res->desc = p->desc;
+ return 0;
+ }
+
+@@ -467,6 +469,23 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
+ arg, func);
+ }
+
++/*
++ * This function calls the @func callback against all memory ranges, which
++ * are ranges marked as IORESOURCE_MEM and IORESOUCE_BUSY.
++ */
++int walk_mem_res(u64 start, u64 end, void *arg,
++ int (*func)(struct resource *, void *))
++{
++ struct resource res;
++
++ res.start = start;
++ res.end = end;
++ res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++
++ return __walk_iomem_res_desc(&res, IORES_DESC_NONE, true,
++ arg, func);
++}
++
+ #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
+
+ /*
+--
+2.7.4
+