diff options
Diffstat (limited to 'features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch')
-rw-r--r-- | features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch | 211 |
1 files changed, 0 insertions, 211 deletions
diff --git a/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch b/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch deleted file mode 100644 index 20da73a1..00000000 --- a/features/rt/mm-zsmalloc-copy-with-get_cpu_var-and-locking.patch +++ /dev/null @@ -1,211 +0,0 @@ -From 58041154aa39fc127b4c5d0f6179c606403ced5d Mon Sep 17 00:00:00 2001 -From: Mike Galbraith <umgwanakikbuti@gmail.com> -Date: Tue, 22 Mar 2016 11:16:09 +0100 -Subject: [PATCH 116/191] mm/zsmalloc: copy with get_cpu_var() and locking - -get_cpu_var() disables preemption and triggers a might_sleep() splat later. -This is replaced with get_locked_var(). -This bitspinlocks are replaced with a proper mutex which requires a slightly -larger struct to allocate. - -Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com> -[bigeasy: replace the bitspin_lock() with a mutex, get_locked_var(). Mike then -fixed the size magic] -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/zsmalloc.c | 85 +++++++++++++++++++++++++++++++++++++++++++++++---- - 1 file changed, 79 insertions(+), 6 deletions(-) - -diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c -index 30c358b72025..6177b736d018 100644 ---- a/mm/zsmalloc.c -+++ b/mm/zsmalloc.c -@@ -57,6 +57,7 @@ - #include <linux/wait.h> - #include <linux/pagemap.h> - #include <linux/fs.h> -+#include <linux/local_lock.h> - - #define ZSPAGE_MAGIC 0x58 - -@@ -77,6 +78,20 @@ - - #define ZS_HANDLE_SIZE (sizeof(unsigned long)) - -+#ifdef CONFIG_PREEMPT_RT -+ -+struct zsmalloc_handle { -+ unsigned long addr; -+ struct mutex lock; -+}; -+ -+#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) -+ -+#else -+ -+#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) -+#endif -+ - /* - * Object location (<PFN>, <obj_idx>) is encoded as - * a single (unsigned long) handle value. -@@ -293,6 +308,7 @@ struct zspage { - }; - - struct mapping_area { -+ local_lock_t lock; - char *vm_buf; /* copy buffer for objects that span pages */ - char *vm_addr; /* address of kmap_atomic()'ed pages */ - enum zs_mapmode vm_mm; /* mapping mode */ -@@ -322,7 +338,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} - - static int create_cache(struct zs_pool *pool) - { -- pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, -+ pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, - 0, 0, NULL); - if (!pool->handle_cachep) - return 1; -@@ -346,9 +362,26 @@ static void destroy_cache(struct zs_pool *pool) - - static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) - { -- return (unsigned long)kmem_cache_alloc(pool->handle_cachep, -- gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); -+ void *p; -+ -+ p = kmem_cache_alloc(pool->handle_cachep, -+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); -+#ifdef CONFIG_PREEMPT_RT -+ if (p) { -+ struct zsmalloc_handle *zh = p; -+ -+ mutex_init(&zh->lock); -+ } -+#endif -+ return (unsigned long)p; -+} -+ -+#ifdef CONFIG_PREEMPT_RT -+static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) -+{ -+ return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); - } -+#endif - - static void cache_free_handle(struct zs_pool *pool, unsigned long handle) - { -@@ -368,12 +401,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) - - static void record_obj(unsigned long handle, unsigned long obj) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ WRITE_ONCE(zh->addr, obj); -+#else - /* - * lsb of @obj represents handle lock while other bits - * represent object value the handle is pointing so - * updating shouldn't do store tearing. - */ - WRITE_ONCE(*(unsigned long *)handle, obj); -+#endif - } - - /* zpool driver */ -@@ -455,7 +494,10 @@ MODULE_ALIAS("zpool-zsmalloc"); - #endif /* CONFIG_ZPOOL */ - - /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ --static DEFINE_PER_CPU(struct mapping_area, zs_map_area); -+static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { -+ /* XXX remove this and use a spin_lock_t in pin_tag() */ -+ .lock = INIT_LOCAL_LOCK(lock), -+}; - - static bool is_zspage_isolated(struct zspage *zspage) - { -@@ -862,7 +904,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) - - static unsigned long handle_to_obj(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return zh->addr; -+#else - return *(unsigned long *)handle; -+#endif - } - - static unsigned long obj_to_head(struct page *page, void *obj) -@@ -876,22 +924,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) - - static inline int testpin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_is_locked(&zh->lock); -+#else - return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static inline int trypin_tag(unsigned long handle) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_trylock(&zh->lock); -+#else - return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void pin_tag(unsigned long handle) __acquires(bitlock) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_lock(&zh->lock); -+#else - bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void unpin_tag(unsigned long handle) __releases(bitlock) - { -+#ifdef CONFIG_PREEMPT_RT -+ struct zsmalloc_handle *zh = zs_get_pure_handle(handle); -+ -+ return mutex_unlock(&zh->lock); -+#else - bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); -+#endif - } - - static void reset_page(struct page *page) -@@ -1274,7 +1346,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, - class = pool->size_class[class_idx]; - off = (class->size * obj_idx) & ~PAGE_MASK; - -- area = &get_cpu_var(zs_map_area); -+ local_lock(&zs_map_area.lock); -+ area = this_cpu_ptr(&zs_map_area); - area->vm_mm = mm; - if (off + class->size <= PAGE_SIZE) { - /* this object is contained entirely within a page */ -@@ -1328,7 +1401,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) - - __zs_unmap_object(area, pages, off, class->size); - } -- put_cpu_var(zs_map_area); -+ local_unlock(&zs_map_area.lock); - - migrate_read_unlock(zspage); - unpin_tag(handle); --- -2.19.1 - |