1 /******************************************************************************
2 * keyhandler.c
3 */
4
5 #include <asm/regs.h>
6 #include <xen/keyhandler.h>
7 #include <xen/shutdown.h>
8 #include <xen/event.h>
9 #include <xen/console.h>
10 #include <xen/serial.h>
11 #include <xen/sched.h>
12 #include <xen/tasklet.h>
13 #include <xen/domain.h>
14 #include <xen/rangeset.h>
15 #include <xen/compat.h>
16 #include <xen/ctype.h>
17 #include <xen/perfc.h>
18 #include <xen/mm.h>
19 #include <xen/watchdog.h>
20 #include <xen/init.h>
21 #include <asm/debugger.h>
22 #include <asm/div64.h>
23
24 static unsigned char keypress_key;
25 static bool_t alt_key_handling;
26
27 static keyhandler_fn_t show_handlers, dump_hwdom_registers,
28 dump_domains, read_clocks;
29 static irq_keyhandler_fn_t do_toggle_alt_key, dump_registers,
30 reboot_machine, run_all_keyhandlers, do_debug_key;
31
32 static struct keyhandler {
33 union {
34 keyhandler_fn_t *fn;
35 irq_keyhandler_fn_t *irq_fn;
36 };
37
38 const char *desc; /* Description for help message. */
39 bool_t irq_callback, /* Call in irq context? if not, tasklet context. */
40 diagnostic; /* Include in 'dump all' handler. */
41 } key_table[128] __read_mostly =
42 {
43 #define KEYHANDLER(k, f, desc, diag) \
44 [k] = { { (f) }, desc, 0, diag }
45
46 #define IRQ_KEYHANDLER(k, f, desc, diag) \
47 [k] = { { (keyhandler_fn_t *)(f) }, desc, 1, diag }
48
49 IRQ_KEYHANDLER('A', do_toggle_alt_key, "toggle alternative key handling", 0),
50 IRQ_KEYHANDLER('d', dump_registers, "dump registers", 1),
51 KEYHANDLER('h', show_handlers, "show this message", 0),
52 KEYHANDLER('q', dump_domains, "dump domain (and guest debug) info", 1),
53 KEYHANDLER('r', dump_runq, "dump run queues", 1),
54 IRQ_KEYHANDLER('R', reboot_machine, "reboot machine", 0),
55 KEYHANDLER('t', read_clocks, "display multi-cpu clock info", 1),
56 KEYHANDLER('0', dump_hwdom_registers, "dump Dom0 registers", 1),
57 IRQ_KEYHANDLER('%', do_debug_key, "trap to xendbg", 0),
58 IRQ_KEYHANDLER('*', run_all_keyhandlers, "print all diagnostics", 0),
59
60 #ifdef CONFIG_PERF_COUNTERS
61 KEYHANDLER('p', perfc_printall, "print performance counters", 1),
62 KEYHANDLER('P', perfc_reset, "reset performance counters", 0),
63 #endif
64
65 #ifdef CONFIG_DEBUG_LOCK_PROFILE
66 KEYHANDLER('l', spinlock_profile_printall, "print lock profile info", 1),
67 KEYHANDLER('L', spinlock_profile_reset, "reset lock profile info", 0),
68 #endif
69
70 #undef IRQ_KEYHANDLER
71 #undef KEYHANDLER
72 };
73
keypress_action(void * unused)74 static void keypress_action(void *unused)
75 {
76 handle_keypress(keypress_key, NULL);
77 }
78
79 static DECLARE_TASKLET(keypress_tasklet, keypress_action, NULL);
80
handle_keypress(unsigned char key,struct cpu_user_regs * regs)81 void handle_keypress(unsigned char key, struct cpu_user_regs *regs)
82 {
83 struct keyhandler *h;
84
85 if ( key >= ARRAY_SIZE(key_table) || !(h = &key_table[key])->fn )
86 return;
87
88 if ( !in_irq() || h->irq_callback )
89 {
90 console_start_log_everything();
91 h->irq_callback ? h->irq_fn(key, regs) : h->fn(key);
92 console_end_log_everything();
93 }
94 else
95 {
96 keypress_key = key;
97 tasklet_schedule(&keypress_tasklet);
98 }
99 }
100
register_keyhandler(unsigned char key,keyhandler_fn_t fn,const char * desc,bool_t diagnostic)101 void register_keyhandler(unsigned char key, keyhandler_fn_t fn,
102 const char *desc, bool_t diagnostic)
103 {
104 BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */
105 ASSERT(!key_table[key].fn); /* Clobbering something else? */
106
107 key_table[key].fn = fn;
108 key_table[key].desc = desc;
109 key_table[key].irq_callback = 0;
110 key_table[key].diagnostic = diagnostic;
111 }
112
register_irq_keyhandler(unsigned char key,irq_keyhandler_fn_t fn,const char * desc,bool_t diagnostic)113 void register_irq_keyhandler(unsigned char key, irq_keyhandler_fn_t fn,
114 const char *desc, bool_t diagnostic)
115 {
116 BUG_ON(key >= ARRAY_SIZE(key_table)); /* Key in range? */
117 ASSERT(!key_table[key].irq_fn); /* Clobbering something else? */
118
119 key_table[key].irq_fn = fn;
120 key_table[key].desc = desc;
121 key_table[key].irq_callback = 1;
122 key_table[key].diagnostic = diagnostic;
123 }
124
show_handlers(unsigned char key)125 static void show_handlers(unsigned char key)
126 {
127 unsigned int i;
128
129 printk("'%c' pressed -> showing installed handlers\n", key);
130 for ( i = 0; i < ARRAY_SIZE(key_table); i++ )
131 if ( key_table[i].fn )
132 printk(" key '%c' (ascii '%02x') => %s\n",
133 isprint(i) ? i : ' ', i, key_table[i].desc);
134 }
135
136 static cpumask_t dump_execstate_mask;
137
dump_execstate(struct cpu_user_regs * regs)138 void dump_execstate(struct cpu_user_regs *regs)
139 {
140 unsigned int cpu = smp_processor_id();
141
142 if ( !guest_mode(regs) )
143 {
144 printk("*** Dumping CPU%u host state: ***\n", cpu);
145 show_execution_state(regs);
146 }
147
148 if ( !is_idle_vcpu(current) )
149 {
150 printk("*** Dumping CPU%u guest state (%pv): ***\n",
151 smp_processor_id(), current);
152 show_execution_state(guest_cpu_user_regs());
153 printk("\n");
154 }
155
156 cpumask_clear_cpu(cpu, &dump_execstate_mask);
157 if ( !alt_key_handling )
158 return;
159
160 cpu = cpumask_cycle(cpu, &dump_execstate_mask);
161 if ( cpu < nr_cpu_ids )
162 {
163 smp_send_state_dump(cpu);
164 return;
165 }
166
167 console_end_sync();
168 watchdog_enable();
169 }
170
dump_registers(unsigned char key,struct cpu_user_regs * regs)171 static void dump_registers(unsigned char key, struct cpu_user_regs *regs)
172 {
173 unsigned int cpu;
174
175 /* We want to get everything out that we possibly can. */
176 watchdog_disable();
177 console_start_sync();
178
179 printk("'%c' pressed -> dumping registers\n\n", key);
180
181 cpumask_copy(&dump_execstate_mask, &cpu_online_map);
182
183 /* Get local execution state out immediately, in case we get stuck. */
184 dump_execstate(regs);
185
186 /* Alt. handling: remaining CPUs are dumped asynchronously one-by-one. */
187 if ( alt_key_handling )
188 return;
189
190 /* Normal handling: synchronously dump the remaining CPUs' states. */
191 for_each_cpu ( cpu, &dump_execstate_mask )
192 {
193 smp_send_state_dump(cpu);
194 while ( cpumask_test_cpu(cpu, &dump_execstate_mask) )
195 cpu_relax();
196 }
197
198 console_end_sync();
199 watchdog_enable();
200 }
201
202 static DECLARE_TASKLET(dump_hwdom_tasklet, NULL, NULL);
203
dump_hwdom_action(void * data)204 static void dump_hwdom_action(void *data)
205 {
206 struct vcpu *v = data;
207
208 for ( ; ; )
209 {
210 vcpu_show_execution_state(v);
211 if ( (v = v->next_in_list) == NULL )
212 break;
213 if ( softirq_pending(smp_processor_id()) )
214 {
215 dump_hwdom_tasklet.data = v;
216 tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
217 break;
218 }
219 }
220 }
221
dump_hwdom_registers(unsigned char key)222 static void dump_hwdom_registers(unsigned char key)
223 {
224 struct vcpu *v;
225
226 if ( hardware_domain == NULL )
227 return;
228
229 printk("'%c' pressed -> dumping Dom0's registers\n", key);
230
231 for_each_vcpu ( hardware_domain, v )
232 {
233 if ( alt_key_handling && softirq_pending(smp_processor_id()) )
234 {
235 tasklet_kill(&dump_hwdom_tasklet);
236 tasklet_init(&dump_hwdom_tasklet, dump_hwdom_action, v);
237 tasklet_schedule_on_cpu(&dump_hwdom_tasklet, v->processor);
238 return;
239 }
240 vcpu_show_execution_state(v);
241 }
242 }
243
reboot_machine(unsigned char key,struct cpu_user_regs * regs)244 static void reboot_machine(unsigned char key, struct cpu_user_regs *regs)
245 {
246 printk("'%c' pressed -> rebooting machine\n", key);
247 machine_restart(0);
248 }
249
dump_domains(unsigned char key)250 static void dump_domains(unsigned char key)
251 {
252 struct domain *d;
253 const struct sched_unit *unit;
254 struct vcpu *v;
255 s_time_t now = NOW();
256
257 printk("'%c' pressed -> dumping domain info (now = %"PRI_stime")\n",
258 key, now);
259
260 rcu_read_lock(&domlist_read_lock);
261
262 for_each_domain ( d )
263 {
264 unsigned int i;
265
266 process_pending_softirqs();
267
268 printk("General information for domain %u:\n", d->domain_id);
269 printk(" refcnt=%d dying=%d pause_count=%d\n",
270 atomic_read(&d->refcnt), d->is_dying,
271 atomic_read(&d->pause_count));
272 printk(" nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
273 "dirty_cpus={%*pbl} max_pages=%u\n",
274 domain_tot_pages(d), d->xenheap_pages, atomic_read(&d->shr_pages),
275 atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
276 d->max_pages);
277 printk(" handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
278 "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
279 d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
280 d->handle[ 4], d->handle[ 5], d->handle[ 6], d->handle[ 7],
281 d->handle[ 8], d->handle[ 9], d->handle[10], d->handle[11],
282 d->handle[12], d->handle[13], d->handle[14], d->handle[15],
283 d->vm_assist);
284 for ( i = 0 ; i < NR_DOMAIN_WATCHDOG_TIMERS; i++ )
285 if ( test_bit(i, &d->watchdog_inuse_map) )
286 printk(" watchdog %d expires in %d seconds\n",
287 i, (u32)((d->watchdog_timer[i].expires - NOW()) >> 30));
288
289 arch_dump_domain_info(d);
290
291 rangeset_domain_printk(d);
292
293 dump_pageframe_info(d);
294
295 printk("NODE affinity for domain %d: [%*pbl]\n",
296 d->domain_id, NODEMASK_PR(&d->node_affinity));
297
298 printk("VCPU information and callbacks for domain %u:\n",
299 d->domain_id);
300
301 for_each_sched_unit ( d, unit )
302 {
303 printk(" UNIT%d affinities: hard={%*pbl} soft={%*pbl}\n",
304 unit->unit_id, CPUMASK_PR(unit->cpu_hard_affinity),
305 CPUMASK_PR(unit->cpu_soft_affinity));
306
307 for_each_sched_unit_vcpu ( unit, v )
308 {
309 if ( !(v->vcpu_id & 0x3f) )
310 process_pending_softirqs();
311
312 printk(" VCPU%d: CPU%d [has=%c] poll=%d "
313 "upcall_pend=%02x upcall_mask=%02x ",
314 v->vcpu_id, v->processor,
315 v->is_running ? 'T':'F', v->poll_evtchn,
316 vcpu_info(v, evtchn_upcall_pending),
317 !vcpu_event_delivery_is_enabled(v));
318 if ( vcpu_cpu_dirty(v) )
319 printk("dirty_cpu=%u", read_atomic(&v->dirty_cpu));
320 printk("\n");
321 printk(" pause_count=%d pause_flags=%lx\n",
322 atomic_read(&v->pause_count), v->pause_flags);
323 arch_dump_vcpu_info(v);
324
325 if ( v->periodic_period == 0 )
326 printk("No periodic timer\n");
327 else
328 printk("%"PRI_stime" Hz periodic timer (period %"PRI_stime" ms)\n",
329 1000000000 / v->periodic_period,
330 v->periodic_period / 1000000);
331 }
332 }
333 }
334
335 for_each_domain ( d )
336 {
337 for_each_vcpu ( d, v )
338 {
339 if ( !(v->vcpu_id & 0x3f) )
340 process_pending_softirqs();
341
342 printk("Notifying guest %d:%d (virq %d, port %d)\n",
343 d->domain_id, v->vcpu_id,
344 VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG]);
345 send_guest_vcpu_virq(v, VIRQ_DEBUG);
346 }
347 }
348
349 arch_dump_shared_mem_info();
350
351 rcu_read_unlock(&domlist_read_lock);
352 }
353
354 static cpumask_t read_clocks_cpumask;
355 static DEFINE_PER_CPU(s_time_t, read_clocks_time);
356 static DEFINE_PER_CPU(u64, read_cycles_time);
357
read_clocks_slave(void * unused)358 static void read_clocks_slave(void *unused)
359 {
360 unsigned int cpu = smp_processor_id();
361 local_irq_disable();
362 while ( !cpumask_test_cpu(cpu, &read_clocks_cpumask) )
363 cpu_relax();
364 per_cpu(read_clocks_time, cpu) = NOW();
365 per_cpu(read_cycles_time, cpu) = get_cycles();
366 cpumask_clear_cpu(cpu, &read_clocks_cpumask);
367 local_irq_enable();
368 }
369
read_clocks(unsigned char key)370 static void read_clocks(unsigned char key)
371 {
372 unsigned int cpu = smp_processor_id(), min_stime_cpu, max_stime_cpu;
373 unsigned int min_cycles_cpu, max_cycles_cpu;
374 u64 min_stime, max_stime, dif_stime;
375 u64 min_cycles, max_cycles, dif_cycles;
376 static u64 sumdif_stime = 0, maxdif_stime = 0;
377 static u64 sumdif_cycles = 0, maxdif_cycles = 0;
378 static u32 count = 0;
379 static DEFINE_SPINLOCK(lock);
380
381 spin_lock(&lock);
382
383 smp_call_function(read_clocks_slave, NULL, 0);
384
385 local_irq_disable();
386 cpumask_andnot(&read_clocks_cpumask, &cpu_online_map, cpumask_of(cpu));
387 per_cpu(read_clocks_time, cpu) = NOW();
388 per_cpu(read_cycles_time, cpu) = get_cycles();
389 local_irq_enable();
390
391 while ( !cpumask_empty(&read_clocks_cpumask) )
392 cpu_relax();
393
394 min_stime_cpu = max_stime_cpu = min_cycles_cpu = max_cycles_cpu = cpu;
395 for_each_online_cpu ( cpu )
396 {
397 if ( per_cpu(read_clocks_time, cpu) <
398 per_cpu(read_clocks_time, min_stime_cpu) )
399 min_stime_cpu = cpu;
400 if ( per_cpu(read_clocks_time, cpu) >
401 per_cpu(read_clocks_time, max_stime_cpu) )
402 max_stime_cpu = cpu;
403 if ( per_cpu(read_cycles_time, cpu) <
404 per_cpu(read_cycles_time, min_cycles_cpu) )
405 min_cycles_cpu = cpu;
406 if ( per_cpu(read_cycles_time, cpu) >
407 per_cpu(read_cycles_time, max_cycles_cpu) )
408 max_cycles_cpu = cpu;
409 }
410
411 min_stime = per_cpu(read_clocks_time, min_stime_cpu);
412 max_stime = per_cpu(read_clocks_time, max_stime_cpu);
413 min_cycles = per_cpu(read_cycles_time, min_cycles_cpu);
414 max_cycles = per_cpu(read_cycles_time, max_cycles_cpu);
415
416 spin_unlock(&lock);
417
418 dif_stime = max_stime - min_stime;
419 if ( dif_stime > maxdif_stime )
420 maxdif_stime = dif_stime;
421 sumdif_stime += dif_stime;
422 dif_cycles = max_cycles - min_cycles;
423 if ( dif_cycles > maxdif_cycles )
424 maxdif_cycles = dif_cycles;
425 sumdif_cycles += dif_cycles;
426 count++;
427 printk("Synced stime skew: max=%"PRIu64"ns avg=%"PRIu64"ns "
428 "samples=%"PRIu32" current=%"PRIu64"ns\n",
429 maxdif_stime, sumdif_stime/count, count, dif_stime);
430 printk("Synced cycles skew: max=%"PRIu64" avg=%"PRIu64" "
431 "samples=%"PRIu32" current=%"PRIu64"\n",
432 maxdif_cycles, sumdif_cycles/count, count, dif_cycles);
433 }
434
run_all_nonirq_keyhandlers(void * unused)435 static void run_all_nonirq_keyhandlers(void *unused)
436 {
437 /* Fire all the non-IRQ-context diagnostic keyhandlers */
438 struct keyhandler *h;
439 int k;
440
441 console_start_log_everything();
442
443 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
444 {
445 process_pending_softirqs();
446 h = &key_table[k];
447 if ( !h->fn || !h->diagnostic || h->irq_callback )
448 continue;
449 printk("[%c: %s]\n", k, h->desc);
450 h->fn(k);
451 }
452
453 console_end_log_everything();
454 }
455
456 static DECLARE_TASKLET(run_all_keyhandlers_tasklet,
457 run_all_nonirq_keyhandlers, NULL);
458
run_all_keyhandlers(unsigned char key,struct cpu_user_regs * regs)459 static void run_all_keyhandlers(unsigned char key, struct cpu_user_regs *regs)
460 {
461 struct keyhandler *h;
462 unsigned int k;
463
464 watchdog_disable();
465
466 printk("'%c' pressed -> firing all diagnostic keyhandlers\n", key);
467
468 /* Fire all the IRQ-context diangostic keyhandlers now */
469 for ( k = 0; k < ARRAY_SIZE(key_table); k++ )
470 {
471 h = &key_table[k];
472 if ( !h->irq_fn || !h->diagnostic || !h->irq_callback )
473 continue;
474 printk("[%c: %s]\n", k, h->desc);
475 h->irq_fn(k, regs);
476 }
477
478 watchdog_enable();
479
480 /* Trigger the others from a tasklet in non-IRQ context */
481 tasklet_schedule(&run_all_keyhandlers_tasklet);
482 }
483
do_debug_key(unsigned char key,struct cpu_user_regs * regs)484 static void do_debug_key(unsigned char key, struct cpu_user_regs *regs)
485 {
486 printk("'%c' pressed -> trapping into debugger\n", key);
487 (void)debugger_trap_fatal(0xf001, regs);
488
489 /* Prevent tail call optimisation, which confuses xendbg. */
490 barrier();
491 }
492
do_toggle_alt_key(unsigned char key,struct cpu_user_regs * regs)493 static void do_toggle_alt_key(unsigned char key, struct cpu_user_regs *regs)
494 {
495 alt_key_handling = !alt_key_handling;
496 printk("'%c' pressed -> using %s key handling\n", key,
497 alt_key_handling ? "alternative" : "normal");
498 }
499
initialize_keytable(void)500 void __init initialize_keytable(void)
501 {
502 if ( num_present_cpus() > 16 )
503 {
504 alt_key_handling = 1;
505 printk(XENLOG_INFO "Defaulting to alternative key handling; "
506 "send 'A' to switch to normal mode.\n");
507 }
508 }
509
510 /*
511 * Local variables:
512 * mode: C
513 * c-file-style: "BSD"
514 * c-basic-offset: 4
515 * tab-width: 4
516 * indent-tabs-mode: nil
517 * End:
518 */
519