summaryrefslogtreecommitdiffstats
path: root/drivers/xen/mem-reservation.c
blob: 3782cf070338e3fa5f830184a784dd9e2d0c666a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
// SPDX-License-Identifier: GPL-2.0

/******************************************************************************
 * Xen memory reservation utilities.
 *
 * Copyright (c) 2003, B Dragovic
 * Copyright (c) 2003-2004, M Williamson, K Fraser
 * Copyright (c) 2005 Dan M. Smith, IBM Corporation
 * Copyright (c) 2010 Daniel Kiper
 * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
 */

#include <asm/xen/hypercall.h>

#include <xen/interface/memory.h>
#include <xen/mem-reservation.h>
#include <linux/moduleparam.h>

bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT);
core_param(xen_scrub_pages, xen_scrub_pages, bool, 0);

/*
 * Use one extent per PAGE_SIZE to avoid to break down the page into
 * multiple frame.
 */
#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)

#ifdef CONFIG_XEN_HAVE_PVMMU
void __xenmem_reservation_va_mapping_update(unsigned long count,
					    struct page **pages,
					    xen_pfn_t *frames)
{
	int i;

	for (i = 0; i < count; i++) {
		struct page *page = pages[i];
		unsigned long pfn = page_to_pfn(page);

		BUG_ON(!page);

		/*
		 * We don't support PV MMU when Linux and Xen is using
		 * different page granularity.
		 */
		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

		set_phys_to_machine(pfn, frames[i]);

		/* Link back into the page tables if not highmem. */
		if (!PageHighMem(page)) {
			int ret;

			ret = HYPERVISOR_update_va_mapping(
					(unsigned long)__va(pfn << PAGE_SHIFT),
					mfn_pte(frames[i], PAGE_KERNEL),
					0);
			BUG_ON(ret);
		}
	}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_update);

void __xenmem_reservation_va_mapping_reset(unsigned long count,
					   struct page **pages)
{
	int i;

	for (i = 0; i < count; i++) {
		struct page *page = pages[i];
		unsigned long pfn = page_to_pfn(page);

		/*
		 * We don't support PV MMU when Linux and Xen are using
		 * different page granularity.
		 */
		BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);

		if (!PageHighMem(page)) {
			int ret;

			ret = HYPERVISOR_update_va_mapping(
					(unsigned long)__va(pfn << PAGE_SHIFT),
					__pte_ma(0), 0);
			BUG_ON(ret);
		}
		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
	}
}
EXPORT_SYMBOL_GPL(__xenmem_reservation_va_mapping_reset);
#endif /* CONFIG_XEN_HAVE_PVMMU */

/* @frames is an array of PFNs */
int xenmem_reservation_increase(int count, xen_pfn_t *frames)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = EXTENT_ORDER,
		.domid        = DOMID_SELF
	};

	/* XENMEM_populate_physmap requires a PFN based on Xen granularity. */
	set_xen_guest_handle(reservation.extent_start, frames);
	reservation.nr_extents = count;
	return HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_increase);

/* @frames is an array of GFNs */
int xenmem_reservation_decrease(int count, xen_pfn_t *frames)
{
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = EXTENT_ORDER,
		.domid        = DOMID_SELF
	};

	/* XENMEM_decrease_reservation requires a GFN */
	set_xen_guest_handle(reservation.extent_start, frames);
	reservation.nr_extents = count;
	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
}
EXPORT_SYMBOL_GPL(xenmem_reservation_decrease);