summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c1
-rw-r--r--mm/memory.c2
-rw-r--r--mm/readahead.c8
-rw-r--r--mm/vmstat.c7
4 files changed, 11 insertions, 7 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 1fedbde68f59..d9b5c817dce8 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3387,6 +3387,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
ret = page_counter_memparse(args, "-1", &threshold);
if (ret)
return ret;
+ threshold <<= PAGE_SHIFT;
mutex_lock(&memcg->thresholds_lock);
diff --git a/mm/memory.c b/mm/memory.c
index 9cb27470fee9..deb679c31f2a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2426,6 +2426,8 @@ void unmap_mapping_range(struct address_space *mapping,
if (details.last_index < details.first_index)
details.last_index = ULONG_MAX;
+
+ /* DAX uses i_mmap_lock to serialise file truncate vs page fault */
i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
diff --git a/mm/readahead.c b/mm/readahead.c
index 60cd846a9a44..24682f6f4cfd 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -89,8 +89,8 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
while (!list_empty(pages)) {
page = list_to_page(pages);
list_del(&page->lru);
- if (add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL)) {
+ if (add_to_page_cache_lru(page, mapping, page->index,
+ GFP_KERNEL & mapping_gfp_mask(mapping))) {
read_cache_pages_invalidate_page(mapping, page);
continue;
}
@@ -127,8 +127,8 @@ static int read_pages(struct address_space *mapping, struct file *filp,
for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_to_page(pages);
list_del(&page->lru);
- if (!add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL)) {
+ if (!add_to_page_cache_lru(page, mapping, page->index,
+ GFP_KERNEL & mapping_gfp_mask(mapping))) {
mapping->a_ops->readpage(filp, page);
}
page_cache_release(page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 4f5cd974e11a..fbf14485a049 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1363,15 +1363,16 @@ static cpumask_var_t cpu_stat_off;
static void vmstat_update(struct work_struct *w)
{
- if (refresh_cpu_vm_stats())
+ if (refresh_cpu_vm_stats()) {
/*
* Counters were updated so we expect more updates
* to occur in the future. Keep on running the
* update worker thread.
*/
- schedule_delayed_work(this_cpu_ptr(&vmstat_work),
+ schedule_delayed_work_on(smp_processor_id(),
+ this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
- else {
+ } else {
/*
* We did not update any counters so the app may be in
* a mode where it does not cause counter updates.