aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c227
1 files changed, 145 insertions, 82 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 7a5fc89a8652..21890994c1d3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -101,9 +101,11 @@ static int set_memmap_mode(const char *val, const struct kernel_param *kp)
static int get_memmap_mode(char *buffer, const struct kernel_param *kp)
{
- if (*((int *)kp->arg) == MEMMAP_ON_MEMORY_FORCE)
- return sprintf(buffer, "force\n");
- return param_get_bool(buffer, kp);
+ int mode = *((int *)kp->arg);
+
+ if (mode == MEMMAP_ON_MEMORY_FORCE)
+ return sprintf(buffer, "force\n");
+ return sprintf(buffer, "%c\n", mode ? 'Y' : 'N');
}
static const struct kernel_param_ops memmap_mode_ops = {
@@ -645,7 +647,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
unsigned long pfn;
/*
- * Online the pages in MAX_ORDER aligned chunks. The callback might
+ * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this
* zone ("present").
@@ -660,12 +662,13 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* Free to online pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (pfn)
- order = min_t(int, MAX_ORDER, __ffs(pfn));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
(*online_page_callback)(pfn_to_page(pfn), order);
pfn += (1UL << order);
@@ -1380,6 +1383,85 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
return arch_supports_memmap_on_memory(vmemmap_size);
}
+static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+
+ /*
+ * For memmap_on_memory, the altmaps were added on a per-memblock
+ * basis; we have to process each individual memory block.
+ */
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct vmem_altmap *altmap = NULL;
+ struct memory_block *mem;
+
+ mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
+ if (WARN_ON_ONCE(!mem))
+ continue;
+
+ altmap = mem->altmap;
+ mem->altmap = NULL;
+
+ remove_memory_block_devices(cur_start, memblock_size);
+
+ arch_remove_memory(cur_start, memblock_size, altmap);
+
+ /* Verify that all vmemmap pages have actually been freed. */
+ WARN(altmap->alloc, "Altmap not fully unmapped");
+ kfree(altmap);
+ }
+}
+
+static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
+ u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+ int ret;
+
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct mhp_params params = { .pgprot =
+ pgprot_mhp(PAGE_KERNEL) };
+ struct vmem_altmap mhp_altmap = {
+ .base_pfn = PHYS_PFN(cur_start),
+ .end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
+ };
+
+ mhp_altmap.free = memory_block_memmap_on_memory_pages();
+ params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
+ GFP_KERNEL);
+ if (!params.altmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, cur_start, memblock_size, &params);
+ if (ret < 0) {
+ kfree(params.altmap);
+ goto out;
+ }
+
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(cur_start, memblock_size,
+ params.altmap, group);
+ if (ret) {
+ arch_remove_memory(cur_start, memblock_size, NULL);
+ kfree(params.altmap);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (ret && cur_start != start)
+ remove_memory_blocks_and_altmaps(start, cur_start - start);
+ return ret;
+}
+
/*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations (triggered e.g. by sysfs).
@@ -1390,10 +1472,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
- struct vmem_altmap mhp_altmap = {
- .base_pfn = PHYS_PFN(res->start),
- .end_pfn = PHYS_PFN(res->end),
- };
struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
@@ -1436,30 +1514,22 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/*
* Self hosted memmap array
*/
- if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
- if (mhp_supports_memmap_on_memory(size)) {
- mhp_altmap.free = memory_block_memmap_on_memory_pages();
- params.altmap = kmalloc(sizeof(struct vmem_altmap), GFP_KERNEL);
- if (!params.altmap) {
- ret = -ENOMEM;
- goto error;
- }
+ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
+ mhp_supports_memmap_on_memory(memory_block_size_bytes())) {
+ ret = create_altmaps_and_memory_blocks(nid, group, start, size);
+ if (ret)
+ goto error;
+ } else {
+ ret = arch_add_memory(nid, start, size, &params);
+ if (ret < 0)
+ goto error;
- memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap));
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size, NULL, group);
+ if (ret) {
+ arch_remove_memory(start, size, params.altmap);
+ goto error;
}
- /* fallback to not using altmap */
- }
-
- /* call arch's memory hotadd */
- ret = arch_add_memory(nid, start, size, &params);
- if (ret < 0)
- goto error_free;
-
- /* create memory block devices after memory was added */
- ret = create_memory_block_devices(start, size, params.altmap, group);
- if (ret) {
- arch_remove_memory(start, size, params.altmap);
- goto error_free;
}
if (new_node) {
@@ -1496,8 +1566,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
-error_free:
- kfree(params.altmap);
error:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
@@ -2067,17 +2135,13 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
return 0;
}
-static int test_has_altmap_cb(struct memory_block *mem, void *arg)
+static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
{
- struct memory_block **mem_ptr = (struct memory_block **)arg;
- /*
- * return the memblock if we have altmap
- * and break callback.
- */
- if (mem->altmap) {
- *mem_ptr = mem;
- return 1;
- }
+ u64 *num_altmaps = (u64 *)arg;
+
+ if (mem->altmap)
+ *num_altmaps += 1;
+
return 0;
}
@@ -2151,11 +2215,29 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
+static int memory_blocks_have_altmaps(u64 start, u64 size)
+{
+ u64 num_memblocks = size / memory_block_size_bytes();
+ u64 num_altmaps = 0;
+
+ if (!mhp_memmap_on_memory())
+ return 0;
+
+ walk_memory_blocks(start, size, &num_altmaps,
+ count_memory_range_altmaps_cb);
+
+ if (num_altmaps == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(num_memblocks != num_altmaps))
+ return -EINVAL;
+
+ return 1;
+}
+
static int __ref try_remove_memory(u64 start, u64 size)
{
- struct memory_block *mem;
- int rc = 0, nid = NUMA_NO_NODE;
- struct vmem_altmap *altmap = NULL;
+ int rc, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
@@ -2172,45 +2254,26 @@ static int __ref try_remove_memory(u64 start, u64 size)
if (rc)
return rc;
- /*
- * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
- * the same granularity it was added - a single memory block.
- */
- if (mhp_memmap_on_memory()) {
- rc = walk_memory_blocks(start, size, &mem, test_has_altmap_cb);
- if (rc) {
- if (size != memory_block_size_bytes()) {
- pr_warn("Refuse to remove %#llx - %#llx,"
- "wrong granularity\n",
- start, start + size);
- return -EINVAL;
- }
- altmap = mem->altmap;
- /*
- * Mark altmap NULL so that we can add a debug
- * check on memblock free.
- */
- mem->altmap = NULL;
- }
- }
-
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
- /*
- * Memory block device removal under the device_hotplug_lock is
- * a barrier against racing online attempts.
- */
- remove_memory_block_devices(start, size);
-
mem_hotplug_begin();
- arch_remove_memory(start, size, altmap);
-
- /* Verify that all vmemmap pages have actually been freed. */
- if (altmap) {
- WARN(altmap->alloc, "Altmap not fully unmapped");
- kfree(altmap);
+ rc = memory_blocks_have_altmaps(start, size);
+ if (rc < 0) {
+ mem_hotplug_done();
+ return rc;
+ } else if (!rc) {
+ /*
+ * Memory block device removal under the device_hotplug_lock is
+ * a barrier against racing online attempts.
+ * No altmaps present, do the removal directly
+ */
+ remove_memory_block_devices(start, size);
+ arch_remove_memory(start, size, NULL);
+ } else {
+ /* all memblocks in the range have altmaps */
+ remove_memory_blocks_and_altmaps(start, size);
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {