From 6ed3540f53cdd5624fa18f4af17ebbc03d5b6fb9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 18 Apr 2019 11:09:06 +0200 Subject: [PATCH 107/251] mm/swap: Access struct pagevec remotely When the newly introduced static key would be enabled, struct pagevec is locked during access. So it is possible to access it from a remote CPU. The advantage is that the work can be done from the "requesting" CPU without firing a worker on a remote CPU and waiting for it to complete the work. No functional change because static key is not enabled. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior --- mm/page_alloc.c | 19 ++++++++----- mm/swap.c | 73 +++++++++++++++++++++++++++++-------------------- 2 files changed, 56 insertions(+), 36 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a3cba1d89a88..5c575079f013 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2999,15 +2999,20 @@ void drain_all_pages(struct zone *zone) cpumask_clear_cpu(cpu, &cpus_with_pcps); } - for_each_cpu(cpu, &cpus_with_pcps) { - struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); + if (static_branch_likely(&use_pvec_lock)) { + for_each_cpu(cpu, &cpus_with_pcps) + drain_cpu_pages(cpu, zone); + } else { + for_each_cpu(cpu, &cpus_with_pcps) { + struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu); - drain->zone = zone; - INIT_WORK(&drain->work, drain_local_pages_wq); - queue_work_on(cpu, mm_percpu_wq, &drain->work); + drain->zone = zone; + INIT_WORK(&drain->work, drain_local_pages_wq); + queue_work_on(cpu, mm_percpu_wq, &drain->work); + } + for_each_cpu(cpu, &cpus_with_pcps) + flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); } - for_each_cpu(cpu, &cpus_with_pcps) - flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work); mutex_unlock(&pcpu_drain_mutex); } diff --git a/mm/swap.c b/mm/swap.c index bcc5908999e9..9afc8767d37b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -834,39 +834,54 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) */ void lru_add_drain_all(void) { - static DEFINE_MUTEX(lock); - static struct cpumask has_work; - int cpu; - - /* - * Make sure nobody triggers this path before mm_percpu_wq is fully - * initialized. - */ - if (WARN_ON(!mm_percpu_wq)) - return; + if (static_branch_likely(&use_pvec_lock)) { + int cpu; + + for_each_online_cpu(cpu) { + if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) || + pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) || + need_activate_page_drain(cpu)) { + lru_add_drain_cpu(cpu); + } + } + } else { + static DEFINE_MUTEX(lock); + static struct cpumask has_work; + int cpu; - mutex_lock(&lock); - cpumask_clear(&has_work); - - for_each_online_cpu(cpu) { - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - - if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) || - pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) || - pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) || - pagevec_count(&per_cpu(lru_deactivate_pvecs.pvec, cpu)) || - pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) || - need_activate_page_drain(cpu)) { - INIT_WORK(work, lru_add_drain_per_cpu); - queue_work_on(cpu, mm_percpu_wq, work); - cpumask_set_cpu(cpu, &has_work); + /* + * Make sure nobody triggers this path before mm_percpu_wq + * is fully initialized. + */ + if (WARN_ON(!mm_percpu_wq)) + return; + + mutex_lock(&lock); + cpumask_clear(&has_work); + + for_each_online_cpu(cpu) { + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + + if (pagevec_count(&per_cpu(lru_add_pvec.pvec, cpu)) || + pagevec_count(&per_cpu(lru_rotate_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_file_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_deactivate_pvecs.pvec, cpu)) || + pagevec_count(&per_cpu(lru_lazyfree_pvecs.pvec, cpu)) || + need_activate_page_drain(cpu)) { + INIT_WORK(work, lru_add_drain_per_cpu); + queue_work_on(cpu, mm_percpu_wq, work); + cpumask_set_cpu(cpu, &has_work); + } } - } - for_each_cpu(cpu, &has_work) - flush_work(&per_cpu(lru_add_drain_work, cpu)); + for_each_cpu(cpu, &has_work) + flush_work(&per_cpu(lru_add_drain_work, cpu)); - mutex_unlock(&lock); + mutex_unlock(&lock); + } } #else void lru_add_drain_all(void) -- 2.19.1