1 /******************************************************************************
2 * common/stop_machine.c
3 *
4 * Facilities to put whole machine in a safe 'stop' state
5 *
6 * Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation
7 * Copyright 2008 Kevin Tian <kevin.tian@intel.com>, Intel Corporation.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; If not, see <http://www.gnu.org/licenses/>.
20 */
21
22 #include <xen/init.h>
23 #include <xen/sched.h>
24 #include <xen/spinlock.h>
25 #include <xen/tasklet.h>
26 #include <xen/stop_machine.h>
27 #include <xen/errno.h>
28 #include <xen/smp.h>
29 #include <xen/cpu.h>
30 #include <asm/current.h>
31 #include <asm/processor.h>
32
33 enum stopmachine_state {
34 STOPMACHINE_START,
35 STOPMACHINE_PREPARE,
36 STOPMACHINE_DISABLE_IRQ,
37 STOPMACHINE_INVOKE,
38 STOPMACHINE_EXIT
39 };
40
41 struct stopmachine_data {
42 unsigned int nr_cpus;
43
44 enum stopmachine_state state;
45 atomic_t done;
46
47 unsigned int fn_cpu;
48 int fn_result;
49 int (*fn)(void *);
50 void *fn_data;
51 };
52
53 static DEFINE_PER_CPU(struct tasklet, stopmachine_tasklet);
54 static struct stopmachine_data stopmachine_data;
55 static DEFINE_SPINLOCK(stopmachine_lock);
56
stopmachine_set_state(enum stopmachine_state state)57 static void stopmachine_set_state(enum stopmachine_state state)
58 {
59 atomic_set(&stopmachine_data.done, 0);
60 smp_wmb();
61 stopmachine_data.state = state;
62 }
63
stopmachine_wait_state(void)64 static void stopmachine_wait_state(void)
65 {
66 while ( atomic_read(&stopmachine_data.done) != stopmachine_data.nr_cpus )
67 cpu_relax();
68 }
69
stop_machine_run(int (* fn)(void *),void * data,unsigned int cpu)70 int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
71 {
72 cpumask_t allbutself;
73 unsigned int i, nr_cpus;
74 int ret;
75
76 BUG_ON(!local_irq_is_enabled());
77
78 /* cpu_online_map must not change. */
79 if ( !get_cpu_maps() )
80 return -EBUSY;
81
82 cpumask_andnot(&allbutself, &cpu_online_map,
83 cpumask_of(smp_processor_id()));
84 nr_cpus = cpumask_weight(&allbutself);
85
86 /* Must not spin here as the holder will expect us to be descheduled. */
87 if ( !spin_trylock(&stopmachine_lock) )
88 {
89 put_cpu_maps();
90 return -EBUSY;
91 }
92
93 stopmachine_data.fn = fn;
94 stopmachine_data.fn_data = data;
95 stopmachine_data.nr_cpus = nr_cpus;
96 stopmachine_data.fn_cpu = cpu;
97 stopmachine_data.fn_result = 0;
98 atomic_set(&stopmachine_data.done, 0);
99 stopmachine_data.state = STOPMACHINE_START;
100
101 smp_wmb();
102
103 for_each_cpu ( i, &allbutself )
104 tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i);
105
106 stopmachine_set_state(STOPMACHINE_PREPARE);
107 stopmachine_wait_state();
108
109 local_irq_disable();
110 stopmachine_set_state(STOPMACHINE_DISABLE_IRQ);
111 stopmachine_wait_state();
112 spin_debug_disable();
113
114 stopmachine_set_state(STOPMACHINE_INVOKE);
115 if ( (cpu == smp_processor_id()) || (cpu == NR_CPUS) )
116 {
117 ret = (*fn)(data);
118 if ( ret )
119 write_atomic(&stopmachine_data.fn_result, ret);
120 }
121 stopmachine_wait_state();
122 ret = stopmachine_data.fn_result;
123
124 spin_debug_enable();
125 stopmachine_set_state(STOPMACHINE_EXIT);
126 stopmachine_wait_state();
127 local_irq_enable();
128
129 spin_unlock(&stopmachine_lock);
130
131 put_cpu_maps();
132
133 return ret;
134 }
135
stopmachine_action(unsigned long cpu)136 static void stopmachine_action(unsigned long cpu)
137 {
138 enum stopmachine_state state = STOPMACHINE_START;
139
140 BUG_ON(cpu != smp_processor_id());
141
142 smp_mb();
143
144 while ( state != STOPMACHINE_EXIT )
145 {
146 while ( stopmachine_data.state == state )
147 cpu_relax();
148
149 state = stopmachine_data.state;
150 switch ( state )
151 {
152 case STOPMACHINE_DISABLE_IRQ:
153 local_irq_disable();
154 break;
155 case STOPMACHINE_INVOKE:
156 if ( (stopmachine_data.fn_cpu == smp_processor_id()) ||
157 (stopmachine_data.fn_cpu == NR_CPUS) )
158 {
159 int ret = stopmachine_data.fn(stopmachine_data.fn_data);
160
161 if ( ret )
162 write_atomic(&stopmachine_data.fn_result, ret);
163 }
164 break;
165 default:
166 break;
167 }
168
169 smp_mb();
170 atomic_inc(&stopmachine_data.done);
171 }
172
173 local_irq_enable();
174 }
175
cpu_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)176 static int cpu_callback(
177 struct notifier_block *nfb, unsigned long action, void *hcpu)
178 {
179 unsigned int cpu = (unsigned long)hcpu;
180
181 if ( action == CPU_UP_PREPARE )
182 tasklet_init(&per_cpu(stopmachine_tasklet, cpu),
183 stopmachine_action, cpu);
184
185 return NOTIFY_DONE;
186 }
187
188 static struct notifier_block cpu_nfb = {
189 .notifier_call = cpu_callback
190 };
191
cpu_stopmachine_init(void)192 static int __init cpu_stopmachine_init(void)
193 {
194 unsigned int cpu;
195 for_each_online_cpu ( cpu )
196 {
197 void *hcpu = (void *)(long)cpu;
198 cpu_callback(&cpu_nfb, CPU_UP_PREPARE, hcpu);
199 }
200 register_cpu_notifier(&cpu_nfb);
201 return 0;
202 }
203 __initcall(cpu_stopmachine_init);
204