aboutsummaryrefslogtreecommitdiffstats
path: root/extras/recipes-kernel/linux/linux-omap-psp-2.6.32/0031-ARM-VFP-add-support-to-sync-the-VFP-state-of-the-cur.patch
blob: 28d9f376100c15b1aa995b5e2755d3528488039f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
From b366c2810c736d8d544cf261f3354eebaf5e5655 Mon Sep 17 00:00:00 2001
From: Imre Deak <imre.deak@nokia.com>
Date: Thu, 4 Feb 2010 21:38:02 +0200
Subject: [PATCH 31/45] ARM: VFP: add support to sync the VFP state of the current thread

ARM: VFP: add support to sync the VFP state of the current thread

So far vfp_sync_state worked only for threads other than the current
one. This worked for tracing other threads, but not for PTRACE_TRACEME.
Syncing for the current thread will also be needed by an upcoming patch
adding support for VFP context save / restore around signal handlers.

For SMP we need get_cpu now, since we have to protect the FPEXC
register, other than this things remained the same for threads other
than the current.

Signed-off-by: Imre Deak <imre.deak@nokia.com>
Signed-off-by: Bryan Wu <bryan.wu@canonical.com>
---
 arch/arm/vfp/vfpmodule.c |   46 +++++++++++++++++++++++++++++++---------------
 1 files changed, 31 insertions(+), 15 deletions(-)

diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index aed05bc..f28f45b 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -423,12 +423,19 @@ static inline void vfp_pm_init(void) { }
 #endif /* CONFIG_PM */
 
 /*
- * Synchronise the hardware VFP state of a thread other than current with the
- * saved one. This function is used by the ptrace mechanism.
+ * Synchronise the hardware VFP state of a thread with the saved one.
+ * This function is used by the ptrace mechanism and the signal handler path.
  */
-#ifdef CONFIG_SMP
 void vfp_sync_state(struct thread_info *thread)
 {
+	unsigned int cpu = get_cpu();
+	u32 fpexc = fmrx(FPEXC);
+	int vfp_enabled;
+	int self;
+
+	vfp_enabled = fpexc & FPEXC_EN;
+	self = thread == current_thread_info();
+#ifdef CONFIG_SMP
 	/*
 	 * On SMP systems, the VFP state is automatically saved at every
 	 * context switch. We mark the thread VFP state as belonging to a
@@ -436,18 +443,22 @@ void vfp_sync_state(struct thread_info *thread)
 	 * needed.
 	 */
 	thread->vfpstate.hard.cpu = NR_CPUS;
-}
-#else
-void vfp_sync_state(struct thread_info *thread)
-{
-	unsigned int cpu = get_cpu();
-	u32 fpexc = fmrx(FPEXC);
-
 	/*
-	 * If VFP is enabled, the previous state was already saved and
-	 * last_VFP_context updated.
+	 * Only the current thread's saved VFP context can be out-of-date.
+	 * For others there is nothing else to do, since we already ensured
+	 * force loading above.
 	 */
-	if (fpexc & FPEXC_EN)
+	if (!self)
+		goto out;
+#endif
+	/*
+	 * If the VFP is enabled only the current thread's saved VFP
+	 * context can get out-of-date. For other threads the context
+	 * was updated when the current thread started to use the VFP.
+	 * This also means that the context will be reloaded next time
+	 * the thread uses the VFP, so no need to enforce it.
+	 */
+	if (vfp_enabled && !self)
 		goto out;
 
 	if (!last_VFP_context[cpu])
@@ -456,8 +467,14 @@ void vfp_sync_state(struct thread_info *thread)
 	/*
 	 * Save the last VFP state on this CPU.
 	 */
-	fmxr(FPEXC, fpexc | FPEXC_EN);
+	if (!vfp_enabled)
+		fmxr(FPEXC, fpexc | FPEXC_EN);
 	vfp_save_state(last_VFP_context[cpu], fpexc);
+	/*
+	 * Disable VFP in case it was enabled so that the force reload
+	 * can happen.
+	 */
+	fpexc &= ~FPEXC_EN;
 	fmxr(FPEXC, fpexc);
 
 	/*
@@ -469,7 +486,6 @@ void vfp_sync_state(struct thread_info *thread)
 out:
 	put_cpu();
 }
-#endif
 
 #include <linux/smp.h>
 
-- 
1.6.6.1