aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/dma
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/dma')
-rw-r--r--kernel/dma/coherent.c4
-rw-r--r--kernel/dma/debug.c8
-rw-r--r--kernel/dma/swiotlb.c11
3 files changed, 15 insertions, 8 deletions
diff --git a/kernel/dma/coherent.c b/kernel/dma/coherent.c
index 597d40893862..4c7ffd094a57 100644
--- a/kernel/dma/coherent.c
+++ b/kernel/dma/coherent.c
@@ -369,8 +369,10 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
static void rmem_dma_device_release(struct reserved_mem *rmem,
struct device *dev)
{
- if (dev)
+ if (dev) {
dev->dma_mem = NULL;
+ dev->dma_mem = NULL;
+ }
}
static const struct reserved_mem_ops rmem_dma_ops = {
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 1c82b0d25498..c345a6e2f7b7 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -469,7 +469,7 @@ void debug_dma_dump_mappings(struct device *dev)
* At any time debug_dma_assert_idle() can be called to trigger a
* warning if any cachelines in the given page are in the active set.
*/
-static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
+static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
static DEFINE_SPINLOCK(radix_lock);
#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
@@ -963,7 +963,7 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o
static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
{
struct device *dev = data;
- struct dma_debug_entry *uninitialized_var(entry);
+ struct dma_debug_entry *entry;
int count;
if (dma_debug_disabled())
@@ -1056,7 +1056,7 @@ static __init int dma_debug_cmdline(char *str)
global_disable = true;
}
- return 0;
+ return 1;
}
static __init int dma_debug_entries_cmdline(char *str)
@@ -1065,7 +1065,7 @@ static __init int dma_debug_entries_cmdline(char *str)
return -EINVAL;
if (!get_option(&str, &nr_prealloc_entries))
nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
- return 0;
+ return 1;
}
__setup("dma_debug=", dma_debug_cmdline);
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 6f7d4e977c5c..b1e2ce2f9c2d 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -587,10 +587,15 @@ found:
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
- if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
- (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
+ /*
+ * When dir == DMA_FROM_DEVICE we could omit the copy from the orig
+ * to the tlb buffer, if we knew for sure the device will
+ * overwirte the entire current content. But we don't. Thus
+ * unconditional bounce may prevent leaking swiotlb content (i.e.
+ * kernel memory) to user-space.
+ */
+ if (orig_addr)
swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
-
return tlb_addr;
}