aboutsummaryrefslogtreecommitdiffstats
path: root/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
diff options
context:
space:
mode:
Diffstat (limited to 'features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch')
-rw-r--r--features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch143
1 files changed, 143 insertions, 0 deletions
diff --git a/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
new file mode 100644
index 00000000..d85633ab
--- /dev/null
+++ b/features/rt/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -0,0 +1,143 @@
+From a597d1d521af35edd6fbb86d1934a0f92bc9d54e Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 18 Aug 2020 10:30:00 +0200
+Subject: [PATCH 113/191] mm: memcontrol: Provide a local_lock for per-CPU
+ memcg_stock
+
+The interrupts are disabled to ensure CPU-local access to the per-CPU
+variable `memcg_stock'.
+As the code inside the interrupt disabled section acquires regular
+spinlocks, which are converted to 'sleeping' spinlocks on a PREEMPT_RT
+kernel, this conflicts with the RT semantics.
+
+Convert it to a local_lock which allows RT kernels to substitute them with
+a real per CPU lock. On non RT kernels this maps to local_irq_save() as
+before, but provides also lockdep coverage of the critical region.
+No functional change.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/memcontrol.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+diff --git a/mm/memcontrol.c b/mm/memcontrol.c
+index 24091f7a64e2..74a65d87866c 100644
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -2210,6 +2210,7 @@ void unlock_page_memcg(struct page *page)
+ EXPORT_SYMBOL(unlock_page_memcg);
+
+ struct memcg_stock_pcp {
++ local_lock_t lock;
+ struct mem_cgroup *cached; /* this never be root cgroup */
+ unsigned int nr_pages;
+
+@@ -2261,7 +2262,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (nr_pages > MEMCG_CHARGE_BATCH)
+ return ret;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
+@@ -2269,7 +2270,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+
+ return ret;
+ }
+@@ -2304,14 +2305,14 @@ static void drain_local_stock(struct work_struct *dummy)
+ * The only protection from memory hotplug vs. drain_stock races is
+ * that we always operate on local CPU stock here with IRQ disabled
+ */
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ drain_obj_stock(stock);
+ drain_stock(stock);
+ clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ /*
+@@ -2323,7 +2324,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached != memcg) { /* reset if necessary */
+@@ -2336,7 +2337,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
+ if (stock->nr_pages > MEMCG_CHARGE_BATCH)
+ drain_stock(stock);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ /*
+@@ -3158,7 +3159,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ unsigned long flags;
+ bool ret = false;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
+@@ -3166,7 +3167,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ ret = true;
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+
+ return ret;
+ }
+@@ -3225,7 +3226,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ struct memcg_stock_pcp *stock;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&memcg_stock.lock, flags);
+
+ stock = this_cpu_ptr(&memcg_stock);
+ if (stock->cached_objcg != objcg) { /* reset if necessary */
+@@ -3239,7 +3240,7 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes)
+ if (stock->nr_bytes > PAGE_SIZE)
+ drain_obj_stock(stock);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&memcg_stock.lock, flags);
+ }
+
+ int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
+@@ -7065,9 +7066,13 @@ static int __init mem_cgroup_init(void)
+ cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
+ memcg_hotplug_cpu_dead);
+
+- for_each_possible_cpu(cpu)
+- INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work,
+- drain_local_stock);
++ for_each_possible_cpu(cpu) {
++ struct memcg_stock_pcp *stock;
++
++ stock = per_cpu_ptr(&memcg_stock, cpu);
++ INIT_WORK(&stock->work, drain_local_stock);
++ local_lock_init(&stock->lock);
++ }
+
+ for_each_node(node) {
+ struct mem_cgroup_tree_per_node *rtpn;
+--
+2.19.1
+