aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev/xics/icp-opal.c
blob: 7fa520efcefa0b18e4f8ad61d01ca0c87d4a3823 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * Copyright 2016 IBM Corporation.
 */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/of.h>

#include <asm/smp.h>
#include <asm/irq.h>
#include <asm/errno.h>
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/opal.h>
#include <asm/kvm_ppc.h>

static void icp_opal_teardown_cpu(void)
{
	int hw_cpu = hard_smp_processor_id();

	/* Clear any pending IPI */
	opal_int_set_mfrr(hw_cpu, 0xff);
}

static void icp_opal_flush_ipi(void)
{
	/*
	 * We take the ipi irq but and never return so we need to EOI the IPI,
	 * but want to leave our priority 0.
	 *
	 * Should we check all the other interrupts too?
	 * Should we be flagging idle loop instead?
	 * Or creating some task to be scheduled?
	 */
	if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
		force_external_irq_replay();
}

static unsigned int icp_opal_get_xirr(void)
{
	unsigned int kvm_xirr;
	__be32 hw_xirr;
	int64_t rc;

	/* Handle an interrupt latched by KVM first */
	kvm_xirr = kvmppc_get_xics_latch();
	if (kvm_xirr)
		return kvm_xirr;

	/* Then ask OPAL */
	rc = opal_int_get_xirr(&hw_xirr, false);
	if (rc < 0)
		return 0;
	return be32_to_cpu(hw_xirr);
}

static unsigned int icp_opal_get_irq(void)
{
	unsigned int xirr;
	unsigned int vec;
	unsigned int irq;

	xirr = icp_opal_get_xirr();
	vec = xirr & 0x00ffffff;
	if (vec == XICS_IRQ_SPURIOUS)
		return 0;

	irq = irq_find_mapping(xics_host, vec);
	if (likely(irq)) {
		xics_push_cppr(vec);
		return irq;
	}

	/* We don't have a linux mapping, so have rtas mask it. */
	xics_mask_unknown_vec(vec);

	/* We might learn about it later, so EOI it */
	if (opal_int_eoi(xirr) > 0)
		force_external_irq_replay();

	return 0;
}

static void icp_opal_set_cpu_priority(unsigned char cppr)
{
	/*
	 * Here be dragons. The caller has asked to allow only IPI's and not
	 * external interrupts. But OPAL XIVE doesn't support that. So instead
	 * of allowing no interrupts allow all. That's still not right, but
	 * currently the only caller who does this is xics_migrate_irqs_away()
	 * and it works in that case.
	 */
	if (cppr >= DEFAULT_PRIORITY)
		cppr = LOWEST_PRIORITY;

	xics_set_base_cppr(cppr);
	opal_int_set_cppr(cppr);
	iosync();
}

static void icp_opal_eoi(struct irq_data *d)
{
	unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
	int64_t rc;

	iosync();
	rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);

	/*
	 * EOI tells us whether there are more interrupts to fetch.
	 *
	 * Some HW implementations might not be able to send us another
	 * external interrupt in that case, so we force a replay.
	 */
	if (rc > 0)
		force_external_irq_replay();
}

#ifdef CONFIG_SMP

static void icp_opal_cause_ipi(int cpu)
{
	int hw_cpu = get_hard_smp_processor_id(cpu);

	kvmppc_set_host_ipi(cpu);
	opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
}

static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
{
	int cpu = smp_processor_id();

	kvmppc_clear_host_ipi(cpu);
	opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);

	return smp_ipi_demux();
}

/*
 * Called when an interrupt is received on an off-line CPU to
 * clear the interrupt, so that the CPU can go back to nap mode.
 */
void icp_opal_flush_interrupt(void)
{
	unsigned int xirr;
	unsigned int vec;

	do {
		xirr = icp_opal_get_xirr();
		vec = xirr & 0x00ffffff;
		if (vec == XICS_IRQ_SPURIOUS)
			break;
		if (vec == XICS_IPI) {
			/* Clear pending IPI */
			int cpu = smp_processor_id();
			kvmppc_clear_host_ipi(cpu);
			opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
		} else {
			pr_err("XICS: hw interrupt 0x%x to offline cpu, "
			       "disabling\n", vec);
			xics_mask_unknown_vec(vec);
		}

		/* EOI the interrupt */
	} while (opal_int_eoi(xirr) > 0);
}

#endif /* CONFIG_SMP */

static const struct icp_ops icp_opal_ops = {
	.get_irq	= icp_opal_get_irq,
	.eoi		= icp_opal_eoi,
	.set_priority	= icp_opal_set_cpu_priority,
	.teardown_cpu	= icp_opal_teardown_cpu,
	.flush_ipi	= icp_opal_flush_ipi,
#ifdef CONFIG_SMP
	.ipi_action	= icp_opal_ipi_action,
	.cause_ipi	= icp_opal_cause_ipi,
#endif
};

int icp_opal_init(void)
{
	struct device_node *np;

	np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
	if (!np)
		return -ENODEV;

	icp_ops = &icp_opal_ops;

	printk("XICS: Using OPAL ICP fallbacks\n");

	of_node_put(np);
	return 0;
}