1 /*
2 * Copyright (c) 2016 Oracle and/or its affiliates. All rights reserved.
3 *
4 */
5
6 #include <xen/cpu.h>
7 #include <xen/elf.h>
8 #include <xen/err.h>
9 #include <xen/guest_access.h>
10 #include <xen/keyhandler.h>
11 #include <xen/lib.h>
12 #include <xen/list.h>
13 #include <xen/mm.h>
14 #include <xen/sched.h>
15 #include <xen/smp.h>
16 #include <xen/softirq.h>
17 #include <xen/spinlock.h>
18 #include <xen/string.h>
19 #include <xen/symbols.h>
20 #include <xen/version.h>
21 #include <xen/virtual_region.h>
22 #include <xen/vmap.h>
23 #include <xen/wait.h>
24 #include <xen/livepatch_elf.h>
25 #include <xen/livepatch.h>
26 #include <xen/livepatch_payload.h>
27
28 #include <asm/alternative.h>
29 #include <asm/event.h>
30
31 /*
32 * Protects against payload_list operations and also allows only one
33 * caller in schedule_work.
34 */
35 static DEFINE_SPINLOCK(payload_lock);
36 static LIST_HEAD(payload_list);
37
38 /*
39 * Patches which have been applied. Need RCU in case we crash (and then
40 * traps code would iterate via applied_list) when adding entries onthe list.
41 */
42 static DEFINE_RCU_READ_LOCK(rcu_applied_lock);
43 static LIST_HEAD(applied_list);
44
45 static unsigned int payload_cnt;
46 static unsigned int payload_version = 1;
47
48 /* To contain the ELF Note header. */
49 struct livepatch_build_id {
50 const void *p;
51 unsigned int len;
52 };
53
54 struct payload {
55 uint32_t state; /* One of the LIVEPATCH_STATE_*. */
56 int32_t rc; /* 0 or -XEN_EXX. */
57 bool reverted; /* Whether it was reverted. */
58 bool safe_to_reapply; /* Can apply safely after revert. */
59 struct list_head list; /* Linked to 'payload_list'. */
60 const void *text_addr; /* Virtual address of .text. */
61 size_t text_size; /* .. and its size. */
62 const void *rw_addr; /* Virtual address of .data. */
63 size_t rw_size; /* .. and its size (if any). */
64 const void *ro_addr; /* Virtual address of .rodata. */
65 size_t ro_size; /* .. and its size (if any). */
66 unsigned int pages; /* Total pages for [text,rw,ro]_addr */
67 struct list_head applied_list; /* Linked to 'applied_list'. */
68 struct livepatch_func *funcs; /* The array of functions to patch. */
69 unsigned int nfuncs; /* Nr of functions to patch. */
70 const struct livepatch_symbol *symtab; /* All symbols. */
71 const char *strtab; /* Pointer to .strtab. */
72 struct virtual_region region; /* symbol, bug.frame patching and
73 exception table (x86). */
74 unsigned int nsyms; /* Nr of entries in .strtab and symbols. */
75 struct livepatch_build_id id; /* ELFNOTE_DESC(.note.gnu.build-id) of the payload. */
76 struct livepatch_build_id dep; /* ELFNOTE_DESC(.livepatch.depends). */
77 livepatch_loadcall_t *const *load_funcs; /* The array of funcs to call after */
78 livepatch_unloadcall_t *const *unload_funcs;/* load and unload of the payload. */
79 unsigned int n_load_funcs; /* Nr of the funcs to load and execute. */
80 unsigned int n_unload_funcs; /* Nr of funcs to call durung unload. */
81 char name[XEN_LIVEPATCH_NAME_SIZE]; /* Name of it. */
82 };
83
84 /* Defines an outstanding patching action. */
85 struct livepatch_work
86 {
87 atomic_t semaphore; /* Used to rendezvous CPUs in
88 check_for_livepatch_work. */
89 uint32_t timeout; /* Timeout to do the operation. */
90 struct payload *data; /* The payload on which to act. */
91 volatile bool_t do_work; /* Signals work to do. */
92 volatile bool_t ready; /* Signals all CPUs synchronized. */
93 unsigned int cmd; /* Action request: LIVEPATCH_ACTION_* */
94 };
95
96 /* There can be only one outstanding patching action. */
97 static struct livepatch_work livepatch_work;
98
99 /*
100 * Indicate whether the CPU needs to consult livepatch_work structure.
101 * We want an per-cpu data structure otherwise the check_for_livepatch_work
102 * would hammer a global livepatch_work structure on every guest VMEXIT.
103 * Having an per-cpu lessens the load.
104 */
105 static DEFINE_PER_CPU(bool_t, work_to_do);
106
get_name(const struct xen_livepatch_name * name,char * n)107 static int get_name(const struct xen_livepatch_name *name, char *n)
108 {
109 if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
110 return -EINVAL;
111
112 if ( name->pad[0] || name->pad[1] || name->pad[2] )
113 return -EINVAL;
114
115 if ( copy_from_guest(n, name->name, name->size) )
116 return -EFAULT;
117
118 if ( n[name->size - 1] )
119 return -EINVAL;
120
121 return 0;
122 }
123
verify_payload(const struct xen_sysctl_livepatch_upload * upload,char * n)124 static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
125 {
126 if ( get_name(&upload->name, n) )
127 return -EINVAL;
128
129 if ( !upload->size )
130 return -EINVAL;
131
132 if ( upload->size > LIVEPATCH_MAX_SIZE )
133 return -EINVAL;
134
135 if ( !guest_handle_okay(upload->payload, upload->size) )
136 return -EFAULT;
137
138 return 0;
139 }
140
is_patch(const void * ptr)141 bool_t is_patch(const void *ptr)
142 {
143 const struct payload *data;
144 bool_t r = 0;
145
146 /*
147 * Only RCU locking since this list is only ever changed during apply
148 * or revert context. And in case it dies there we need an safe list.
149 */
150 rcu_read_lock(&rcu_applied_lock);
151 list_for_each_entry_rcu ( data, &applied_list, applied_list )
152 {
153 if ( (ptr >= data->rw_addr &&
154 ptr < (data->rw_addr + data->rw_size)) ||
155 (ptr >= data->ro_addr &&
156 ptr < (data->ro_addr + data->ro_size)) ||
157 (ptr >= data->text_addr &&
158 ptr < (data->text_addr + data->text_size)) )
159 {
160 r = 1;
161 break;
162 }
163
164 }
165 rcu_read_unlock(&rcu_applied_lock);
166
167 return r;
168 }
169
livepatch_symbols_lookup_by_name(const char * symname)170 unsigned long livepatch_symbols_lookup_by_name(const char *symname)
171 {
172 const struct payload *data;
173
174 ASSERT(spin_is_locked(&payload_lock));
175 list_for_each_entry ( data, &payload_list, list )
176 {
177 unsigned int i;
178
179 for ( i = 0; i < data->nsyms; i++ )
180 {
181 if ( !data->symtab[i].new_symbol )
182 continue;
183
184 if ( !strcmp(data->symtab[i].name, symname) )
185 return data->symtab[i].value;
186 }
187 }
188
189 return 0;
190 }
191
livepatch_symbols_lookup(unsigned long addr,unsigned long * symbolsize,unsigned long * offset,char * namebuf)192 static const char *livepatch_symbols_lookup(unsigned long addr,
193 unsigned long *symbolsize,
194 unsigned long *offset,
195 char *namebuf)
196 {
197 const struct payload *data;
198 unsigned int i, best;
199 const void *va = (const void *)addr;
200 const char *n = NULL;
201
202 /*
203 * Only RCU locking since this list is only ever changed during apply
204 * or revert context. And in case it dies there we need an safe list.
205 */
206 rcu_read_lock(&rcu_applied_lock);
207 list_for_each_entry_rcu ( data, &applied_list, applied_list )
208 {
209 if ( va < data->text_addr ||
210 va >= (data->text_addr + data->text_size) )
211 continue;
212
213 best = UINT_MAX;
214
215 for ( i = 0; i < data->nsyms; i++ )
216 {
217 if ( data->symtab[i].value <= addr &&
218 (best == UINT_MAX ||
219 data->symtab[best].value < data->symtab[i].value) )
220 best = i;
221 }
222
223 if ( best == UINT_MAX )
224 break;
225
226 if ( symbolsize )
227 *symbolsize = data->symtab[best].size;
228 if ( offset )
229 *offset = addr - data->symtab[best].value;
230 if ( namebuf )
231 strlcpy(namebuf, data->name, KSYM_NAME_LEN);
232
233 n = data->symtab[best].name;
234 break;
235 }
236 rcu_read_unlock(&rcu_applied_lock);
237
238 return n;
239 }
240
241 /* Lookup function's old address if not already resolved. */
resolve_old_address(struct livepatch_func * f,const struct livepatch_elf * elf)242 static int resolve_old_address(struct livepatch_func *f,
243 const struct livepatch_elf *elf)
244 {
245 if ( f->old_addr )
246 return 0;
247
248 f->old_addr = (void *)symbols_lookup_by_name(f->name);
249 if ( !f->old_addr )
250 {
251 f->old_addr = (void *)livepatch_symbols_lookup_by_name(f->name);
252 if ( !f->old_addr )
253 {
254 dprintk(XENLOG_ERR, LIVEPATCH "%s: Could not resolve old address of %s\n",
255 elf->name, f->name);
256 return -ENOENT;
257 }
258 }
259 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Resolved old address %s => %p\n",
260 elf->name, f->name, f->old_addr);
261
262 return 0;
263 }
264
find_payload(const char * name)265 static struct payload *find_payload(const char *name)
266 {
267 struct payload *data, *found = NULL;
268
269 ASSERT(spin_is_locked(&payload_lock));
270 list_for_each_entry ( data, &payload_list, list )
271 {
272 if ( !strcmp(data->name, name) )
273 {
274 found = data;
275 break;
276 }
277 }
278
279 return found;
280 }
281
282 /*
283 * Functions related to XEN_SYSCTL_LIVEPATCH_UPLOAD (see livepatch_upload), and
284 * freeing payload (XEN_SYSCTL_LIVEPATCH_ACTION:LIVEPATCH_ACTION_UNLOAD).
285 */
286
free_payload_data(struct payload * payload)287 static void free_payload_data(struct payload *payload)
288 {
289 /* Set to zero until "move_payload". */
290 if ( !payload->pages )
291 return;
292
293 vfree((void *)payload->text_addr);
294
295 payload->pages = 0;
296 }
297
298 /*
299 * calc_section computes the size (taking into account section alignment).
300 *
301 * Furthermore the offset is set with the offset from the start of the virtual
302 * address space for the payload (using passed in size). This is used in
303 * move_payload to figure out the destination location (load_addr).
304 */
calc_section(const struct livepatch_elf_sec * sec,size_t * size,unsigned int * offset)305 static void calc_section(const struct livepatch_elf_sec *sec, size_t *size,
306 unsigned int *offset)
307 {
308 const Elf_Shdr *s = sec->sec;
309 size_t align_size;
310
311 align_size = ROUNDUP(*size, s->sh_addralign);
312 *offset = align_size;
313 *size = s->sh_size + align_size;
314 }
315
move_payload(struct payload * payload,struct livepatch_elf * elf)316 static int move_payload(struct payload *payload, struct livepatch_elf *elf)
317 {
318 void *text_buf, *ro_buf, *rw_buf;
319 unsigned int i, rw_buf_sec, rw_buf_cnt = 0;
320 size_t size = 0;
321 unsigned int *offset;
322 int rc = 0;
323
324 offset = xmalloc_array(unsigned int, elf->hdr->e_shnum);
325 if ( !offset )
326 return -ENOMEM;
327
328 /* Compute size of different regions. */
329 for ( i = 1; i < elf->hdr->e_shnum; i++ )
330 {
331 /*
332 * Do nothing. These are .rel.text, rel.*, .symtab, .strtab,
333 * and .shstrtab. For the non-relocate we allocate and copy these
334 * via other means - and the .rel we can ignore as we only use it
335 * once during loading.
336 *
337 * Also ignore sections with zero size. Those can be for example:
338 * data, or .bss.
339 */
340 if ( livepatch_elf_ignore_section(elf->sec[i].sec) )
341 offset[i] = UINT_MAX;
342 else if ( (elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
343 !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
344 calc_section(&elf->sec[i], &payload->text_size, &offset[i]);
345 else if ( !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
346 (elf->sec[i].sec->sh_flags & SHF_WRITE) )
347 calc_section(&elf->sec[i], &payload->rw_size, &offset[i]);
348 else if ( !(elf->sec[i].sec->sh_flags & SHF_EXECINSTR) &&
349 !(elf->sec[i].sec->sh_flags & SHF_WRITE) )
350 calc_section(&elf->sec[i], &payload->ro_size, &offset[i]);
351 else
352 {
353 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Not supporting %s section!\n",
354 elf->name, elf->sec[i].name);
355 rc = -EOPNOTSUPP;
356 goto out;
357 }
358 }
359
360 /*
361 * Total of all three regions - RX, RW, and RO. We have to have
362 * keep them in seperate pages so we PAGE_ALIGN the RX and RW to have
363 * them on seperate pages. The last one will by default fall on its
364 * own page.
365 */
366 size = PAGE_ALIGN(payload->text_size) + PAGE_ALIGN(payload->rw_size) +
367 payload->ro_size;
368
369 size = PFN_UP(size); /* Nr of pages. */
370 text_buf = vmalloc_xen(size * PAGE_SIZE);
371 if ( !text_buf )
372 {
373 dprintk(XENLOG_ERR, LIVEPATCH "%s: Could not allocate memory for payload!\n",
374 elf->name);
375 rc = -ENOMEM;
376 goto out;
377 }
378 rw_buf = text_buf + PAGE_ALIGN(payload->text_size);
379 ro_buf = rw_buf + PAGE_ALIGN(payload->rw_size);
380
381 payload->pages = size;
382 payload->text_addr = text_buf;
383 payload->rw_addr = rw_buf;
384 payload->ro_addr = ro_buf;
385
386 for ( i = 1; i < elf->hdr->e_shnum; i++ )
387 {
388 if ( !livepatch_elf_ignore_section(elf->sec[i].sec) )
389 {
390 void *buf;
391
392 if ( elf->sec[i].sec->sh_flags & SHF_EXECINSTR )
393 buf = text_buf;
394 else if ( elf->sec[i].sec->sh_flags & SHF_WRITE )
395 {
396 buf = rw_buf;
397 rw_buf_sec = i;
398 rw_buf_cnt++;
399 }
400 else
401 buf = ro_buf;
402
403 ASSERT(offset[i] != UINT_MAX);
404
405 elf->sec[i].load_addr = buf + offset[i];
406
407 /* Don't copy NOBITS - such as BSS. */
408 if ( elf->sec[i].sec->sh_type != SHT_NOBITS )
409 {
410 memcpy(elf->sec[i].load_addr, elf->sec[i].data,
411 elf->sec[i].sec->sh_size);
412 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Loaded %s at %p\n",
413 elf->name, elf->sec[i].name, elf->sec[i].load_addr);
414 }
415 else
416 memset(elf->sec[i].load_addr, 0, elf->sec[i].sec->sh_size);
417 }
418 }
419
420 /*
421 * Only one RW section with non-zero size: .livepatch.funcs,
422 * or only RO sections.
423 */
424 if ( !rw_buf_cnt || (rw_buf_cnt == 1 &&
425 !strcmp(elf->sec[rw_buf_sec].name, ELF_LIVEPATCH_FUNC)) )
426 payload->safe_to_reapply = true;
427 out:
428 xfree(offset);
429
430 return rc;
431 }
432
secure_payload(struct payload * payload,struct livepatch_elf * elf)433 static int secure_payload(struct payload *payload, struct livepatch_elf *elf)
434 {
435 int rc = 0;
436 unsigned int text_pages, rw_pages, ro_pages;
437
438 text_pages = PFN_UP(payload->text_size);
439
440 if ( text_pages )
441 {
442 rc = arch_livepatch_secure(payload->text_addr, text_pages, LIVEPATCH_VA_RX);
443 if ( rc )
444 return rc;
445 }
446 rw_pages = PFN_UP(payload->rw_size);
447 if ( rw_pages )
448 {
449 rc = arch_livepatch_secure(payload->rw_addr, rw_pages, LIVEPATCH_VA_RW);
450 if ( rc )
451 return rc;
452 }
453
454 ro_pages = PFN_UP(payload->ro_size);
455 if ( ro_pages )
456 rc = arch_livepatch_secure(payload->ro_addr, ro_pages, LIVEPATCH_VA_RO);
457
458 ASSERT(ro_pages + rw_pages + text_pages == payload->pages);
459
460 return rc;
461 }
462
section_ok(const struct livepatch_elf * elf,const struct livepatch_elf_sec * sec,size_t sz)463 static bool section_ok(const struct livepatch_elf *elf,
464 const struct livepatch_elf_sec *sec, size_t sz)
465 {
466 if ( !elf || !sec )
467 return false;
468
469 if ( sec->sec->sh_size % sz )
470 {
471 dprintk(XENLOG_ERR, LIVEPATCH "%s: Wrong size %"PRIuElfWord" of %s (must be multiple of %zu)\n",
472 elf->name, sec->sec->sh_size, sec->name, sz);
473 return false;
474 }
475
476 return true;
477 }
478
check_special_sections(const struct livepatch_elf * elf)479 static int check_special_sections(const struct livepatch_elf *elf)
480 {
481 unsigned int i;
482 static const char *const names[] = { ELF_LIVEPATCH_FUNC,
483 ELF_LIVEPATCH_DEPENDS,
484 ELF_BUILD_ID_NOTE};
485 DECLARE_BITMAP(found, ARRAY_SIZE(names)) = { 0 };
486
487 for ( i = 0; i < ARRAY_SIZE(names); i++ )
488 {
489 const struct livepatch_elf_sec *sec;
490
491 sec = livepatch_elf_sec_by_name(elf, names[i]);
492 if ( !sec )
493 {
494 dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is missing!\n",
495 elf->name, names[i]);
496 return -EINVAL;
497 }
498
499 if ( !sec->sec->sh_size )
500 {
501 dprintk(XENLOG_ERR, LIVEPATCH "%s: %s is empty!\n",
502 elf->name, names[i]);
503 return -EINVAL;
504 }
505
506 if ( test_and_set_bit(i, found) )
507 {
508 dprintk(XENLOG_ERR, LIVEPATCH "%s: %s was seen more than once!\n",
509 elf->name, names[i]);
510 return -EINVAL;
511 }
512 }
513
514 return 0;
515 }
516
prepare_payload(struct payload * payload,struct livepatch_elf * elf)517 static int prepare_payload(struct payload *payload,
518 struct livepatch_elf *elf)
519 {
520 const struct livepatch_elf_sec *sec;
521 unsigned int i;
522 struct livepatch_func *f;
523 struct virtual_region *region;
524 const Elf_Note *n;
525
526 sec = livepatch_elf_sec_by_name(elf, ELF_LIVEPATCH_FUNC);
527 ASSERT(sec);
528 if ( !section_ok(elf, sec, sizeof(*payload->funcs)) )
529 return -EINVAL;
530
531 payload->funcs = sec->load_addr;
532 payload->nfuncs = sec->sec->sh_size / sizeof(*payload->funcs);
533
534 for ( i = 0; i < payload->nfuncs; i++ )
535 {
536 int rc;
537
538 f = &(payload->funcs[i]);
539
540 if ( f->version != LIVEPATCH_PAYLOAD_VERSION )
541 {
542 dprintk(XENLOG_ERR, LIVEPATCH "%s: Wrong version (%u). Expected %d!\n",
543 elf->name, f->version, LIVEPATCH_PAYLOAD_VERSION);
544 return -EOPNOTSUPP;
545 }
546
547 /* 'old_addr', 'new_addr', 'new_size' can all be zero. */
548 if ( !f->old_size )
549 {
550 dprintk(XENLOG_ERR, LIVEPATCH "%s: Address or size fields are zero!\n",
551 elf->name);
552 return -EINVAL;
553 }
554
555 rc = arch_livepatch_verify_func(f);
556 if ( rc )
557 return rc;
558
559 rc = resolve_old_address(f, elf);
560 if ( rc )
561 return rc;
562
563 rc = livepatch_verify_distance(f);
564 if ( rc )
565 return rc;
566 }
567
568 sec = livepatch_elf_sec_by_name(elf, ".livepatch.hooks.load");
569 if ( sec )
570 {
571 if ( !section_ok(elf, sec, sizeof(*payload->load_funcs)) )
572 return -EINVAL;
573
574 payload->load_funcs = sec->load_addr;
575 payload->n_load_funcs = sec->sec->sh_size / sizeof(*payload->load_funcs);
576 }
577
578 sec = livepatch_elf_sec_by_name(elf, ".livepatch.hooks.unload");
579 if ( sec )
580 {
581 if ( !section_ok(elf, sec, sizeof(*payload->unload_funcs)) )
582 return -EINVAL;
583
584 payload->unload_funcs = sec->load_addr;
585 payload->n_unload_funcs = sec->sec->sh_size / sizeof(*payload->unload_funcs);
586 }
587 sec = livepatch_elf_sec_by_name(elf, ELF_BUILD_ID_NOTE);
588 if ( sec )
589 {
590 const struct payload *data;
591
592 n = sec->load_addr;
593
594 if ( sec->sec->sh_size <= sizeof(*n) )
595 return -EINVAL;
596
597 if ( xen_build_id_check(n, sec->sec->sh_size,
598 &payload->id.p, &payload->id.len) )
599 return -EINVAL;
600
601 if ( !payload->id.len || !payload->id.p )
602 return -EINVAL;
603
604 /* Make sure it is not a duplicate. */
605 list_for_each_entry ( data, &payload_list, list )
606 {
607 /* No way _this_ payload is on the list. */
608 ASSERT(data != payload);
609 if ( data->id.len == payload->id.len &&
610 !memcmp(data->id.p, payload->id.p, data->id.len) )
611 {
612 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: Already loaded as %s!\n",
613 elf->name, data->name);
614 return -EEXIST;
615 }
616 }
617 }
618
619 sec = livepatch_elf_sec_by_name(elf, ELF_LIVEPATCH_DEPENDS);
620 if ( sec )
621 {
622 n = sec->load_addr;
623
624 if ( sec->sec->sh_size <= sizeof(*n) )
625 return -EINVAL;
626
627 if ( xen_build_id_check(n, sec->sec->sh_size,
628 &payload->dep.p, &payload->dep.len) )
629 return -EINVAL;
630
631 if ( !payload->dep.len || !payload->dep.p )
632 return -EINVAL;
633 }
634
635 /* Setup the virtual region with proper data. */
636 region = &payload->region;
637
638 region->symbols_lookup = livepatch_symbols_lookup;
639 region->start = payload->text_addr;
640 region->end = payload->text_addr + payload->text_size;
641
642 /* Optional sections. */
643 for ( i = 0; i < BUGFRAME_NR; i++ )
644 {
645 char str[14];
646
647 snprintf(str, sizeof(str), ".bug_frames.%u", i);
648 sec = livepatch_elf_sec_by_name(elf, str);
649 if ( !sec )
650 continue;
651
652 if ( !section_ok(elf, sec, sizeof(*region->frame[i].bugs)) )
653 return -EINVAL;
654
655 region->frame[i].bugs = sec->load_addr;
656 region->frame[i].n_bugs = sec->sec->sh_size /
657 sizeof(*region->frame[i].bugs);
658 }
659
660 sec = livepatch_elf_sec_by_name(elf, ".altinstructions");
661 if ( sec )
662 {
663 #ifdef CONFIG_HAS_ALTERNATIVE
664 struct alt_instr *a, *start, *end;
665
666 if ( !section_ok(elf, sec, sizeof(*a)) )
667 return -EINVAL;
668
669 start = sec->load_addr;
670 end = sec->load_addr + sec->sec->sh_size;
671
672 for ( a = start; a < end; a++ )
673 {
674 const void *instr = ALT_ORIG_PTR(a);
675 const void *replacement = ALT_REPL_PTR(a);
676
677 if ( (instr < region->start && instr >= region->end) ||
678 (replacement < region->start && replacement >= region->end) )
679 {
680 dprintk(XENLOG_ERR, LIVEPATCH "%s Alt patching outside payload: %p!\n",
681 elf->name, instr);
682 return -EINVAL;
683 }
684 }
685 apply_alternatives(start, end);
686 #else
687 dprintk(XENLOG_ERR, LIVEPATCH "%s: We don't support alternative patching!\n",
688 elf->name);
689 return -EOPNOTSUPP;
690 #endif
691 }
692
693 sec = livepatch_elf_sec_by_name(elf, ".ex_table");
694 if ( sec )
695 {
696 #ifdef CONFIG_HAS_EX_TABLE
697 struct exception_table_entry *s, *e;
698
699 if ( !section_ok(elf, sec, sizeof(*region->ex)) )
700 return -EINVAL;
701
702 s = sec->load_addr;
703 e = sec->load_addr + sec->sec->sh_size;
704
705 sort_exception_table(s ,e);
706
707 region->ex = s;
708 region->ex_end = e;
709 #else
710 dprintk(XENLOG_ERR, LIVEPATCH "%s: We don't support .ex_table!\n",
711 elf->name);
712 return -EOPNOTSUPP;
713 #endif
714 }
715
716 return 0;
717 }
718
is_payload_symbol(const struct livepatch_elf * elf,const struct livepatch_elf_sym * sym)719 static bool_t is_payload_symbol(const struct livepatch_elf *elf,
720 const struct livepatch_elf_sym *sym)
721 {
722 if ( sym->sym->st_shndx == SHN_UNDEF ||
723 sym->sym->st_shndx >= elf->hdr->e_shnum )
724 return 0;
725
726 /*
727 * The payload is not a final image as we dynmically link against it.
728 * As such the linker has left symbols we don't care about and which
729 * binutils would have removed had it be a final image. Hence we:
730 * - For SHF_ALLOC - ignore symbols referring to sections that are not
731 * loaded.
732 */
733 if ( !(elf->sec[sym->sym->st_shndx].sec->sh_flags & SHF_ALLOC) )
734 return 0;
735
736 /* - And ignore empty symbols (\0). */
737 if ( *sym->name == '\0' )
738 return 0;
739
740 /*
741 * - For SHF_MERGE - ignore local symbols referring to mergeable sections.
742 * (ld squashes them all in one section and discards the symbols) when
743 * those symbols start with '.L' (like .LCx). Those are intermediate
744 * artifacts of assembly.
745 *
746 * See elf_link_input_bfd and _bfd_elf_is_local_label_name in binutils.
747 */
748 if ( (elf->sec[sym->sym->st_shndx].sec->sh_flags & SHF_MERGE) &&
749 !strncmp(sym->name, ".L", 2) )
750 return 0;
751
752 return arch_livepatch_symbol_ok(elf, sym);
753 }
754
build_symbol_table(struct payload * payload,const struct livepatch_elf * elf)755 static int build_symbol_table(struct payload *payload,
756 const struct livepatch_elf *elf)
757 {
758 unsigned int i, j, nsyms = 0;
759 size_t strtab_len = 0;
760 struct livepatch_symbol *symtab;
761 char *strtab;
762
763 ASSERT(payload->nfuncs);
764
765 /* Recall that section @0 is always NULL. */
766 for ( i = 1; i < elf->nsym; i++ )
767 {
768 if ( is_payload_symbol(elf, elf->sym + i) )
769 {
770 nsyms++;
771 strtab_len += strlen(elf->sym[i].name) + 1;
772 }
773 }
774
775 symtab = xzalloc_array(struct livepatch_symbol, nsyms);
776 strtab = xzalloc_array(char, strtab_len);
777
778 if ( !strtab || !symtab )
779 {
780 xfree(strtab);
781 xfree(symtab);
782 return -ENOMEM;
783 }
784
785 nsyms = 0;
786 strtab_len = 0;
787 for ( i = 1; i < elf->nsym; i++ )
788 {
789 if ( is_payload_symbol(elf, elf->sym + i) )
790 {
791 symtab[nsyms].name = strtab + strtab_len;
792 symtab[nsyms].size = elf->sym[i].sym->st_size;
793 symtab[nsyms].value = elf->sym[i].sym->st_value;
794 symtab[nsyms].new_symbol = 0; /* May be overwritten below. */
795 strtab_len += strlcpy(strtab + strtab_len, elf->sym[i].name,
796 KSYM_NAME_LEN) + 1;
797 nsyms++;
798 }
799 }
800
801 for ( i = 0; i < nsyms; i++ )
802 {
803 bool_t found = 0;
804
805 for ( j = 0; j < payload->nfuncs; j++ )
806 {
807 if ( symtab[i].value == (unsigned long)payload->funcs[j].new_addr )
808 {
809 found = 1;
810 break;
811 }
812 }
813
814 if ( !found )
815 {
816 if ( symbols_lookup_by_name(symtab[i].name) ||
817 livepatch_symbols_lookup_by_name(symtab[i].name) )
818 {
819 dprintk(XENLOG_ERR, LIVEPATCH "%s: duplicate new symbol: %s\n",
820 elf->name, symtab[i].name);
821 xfree(symtab);
822 xfree(strtab);
823 return -EEXIST;
824 }
825 symtab[i].new_symbol = 1;
826 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: new symbol %s\n",
827 elf->name, symtab[i].name);
828 }
829 else
830 {
831 /* new_symbol is not set. */
832 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: overriding symbol %s\n",
833 elf->name, symtab[i].name);
834 }
835 }
836
837 payload->symtab = symtab;
838 payload->strtab = strtab;
839 payload->nsyms = nsyms;
840
841 return 0;
842 }
843
free_payload(struct payload * data)844 static void free_payload(struct payload *data)
845 {
846 ASSERT(spin_is_locked(&payload_lock));
847 list_del(&data->list);
848 payload_cnt--;
849 payload_version++;
850 free_payload_data(data);
851 xfree((void *)data->symtab);
852 xfree((void *)data->strtab);
853 xfree(data);
854 }
855
load_payload_data(struct payload * payload,void * raw,size_t len)856 static int load_payload_data(struct payload *payload, void *raw, size_t len)
857 {
858 struct livepatch_elf elf = { .name = payload->name, .len = len };
859 int rc = 0;
860
861 rc = livepatch_elf_load(&elf, raw);
862 if ( rc )
863 goto out;
864
865 rc = move_payload(payload, &elf);
866 if ( rc )
867 goto out;
868
869 rc = livepatch_elf_resolve_symbols(&elf);
870 if ( rc )
871 goto out;
872
873 rc = livepatch_elf_perform_relocs(&elf);
874 if ( rc )
875 goto out;
876
877 rc = check_special_sections(&elf);
878 if ( rc )
879 goto out;
880
881 rc = prepare_payload(payload, &elf);
882 if ( rc )
883 goto out;
884
885 rc = build_symbol_table(payload, &elf);
886 if ( rc )
887 goto out;
888
889 rc = secure_payload(payload, &elf);
890
891 out:
892 if ( rc )
893 free_payload_data(payload);
894
895 /* Free our temporary data structure. */
896 livepatch_elf_free(&elf);
897
898 return rc;
899 }
900
livepatch_upload(struct xen_sysctl_livepatch_upload * upload)901 static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
902 {
903 struct payload *data, *found;
904 char n[XEN_LIVEPATCH_NAME_SIZE];
905 void *raw_data;
906 int rc;
907
908 rc = verify_payload(upload, n);
909 if ( rc )
910 return rc;
911
912 data = xzalloc(struct payload);
913 raw_data = vmalloc(upload->size);
914
915 spin_lock(&payload_lock);
916
917 found = find_payload(n);
918 if ( IS_ERR(found) )
919 rc = PTR_ERR(found);
920 else if ( found )
921 rc = -EEXIST;
922 else if ( !data || !raw_data )
923 rc = -ENOMEM;
924 else if ( __copy_from_guest(raw_data, upload->payload, upload->size) )
925 rc = -EFAULT;
926 else
927 {
928 memcpy(data->name, n, strlen(n));
929
930 rc = load_payload_data(data, raw_data, upload->size);
931 if ( rc )
932 goto out;
933
934 data->state = LIVEPATCH_STATE_CHECKED;
935 INIT_LIST_HEAD(&data->list);
936 INIT_LIST_HEAD(&data->applied_list);
937
938 list_add_tail(&data->list, &payload_list);
939 payload_cnt++;
940 payload_version++;
941 }
942
943 out:
944 spin_unlock(&payload_lock);
945
946 vfree(raw_data);
947
948 if ( rc && data )
949 {
950 xfree((void *)data->symtab);
951 xfree((void *)data->strtab);
952 xfree(data);
953 }
954
955 return rc;
956 }
957
livepatch_get(struct xen_sysctl_livepatch_get * get)958 static int livepatch_get(struct xen_sysctl_livepatch_get *get)
959 {
960 struct payload *data;
961 int rc;
962 char n[XEN_LIVEPATCH_NAME_SIZE];
963
964 rc = get_name(&get->name, n);
965 if ( rc )
966 return rc;
967
968 spin_lock(&payload_lock);
969
970 data = find_payload(n);
971 if ( IS_ERR_OR_NULL(data) )
972 {
973 spin_unlock(&payload_lock);
974
975 if ( !data )
976 return -ENOENT;
977
978 return PTR_ERR(data);
979 }
980
981 get->status.state = data->state;
982 get->status.rc = data->rc;
983
984 spin_unlock(&payload_lock);
985
986 return 0;
987 }
988
livepatch_list(struct xen_sysctl_livepatch_list * list)989 static int livepatch_list(struct xen_sysctl_livepatch_list *list)
990 {
991 struct xen_livepatch_status status;
992 struct payload *data;
993 unsigned int idx = 0, i = 0;
994 int rc = 0;
995
996 if ( list->nr > 1024 )
997 return -E2BIG;
998
999 if ( list->pad )
1000 return -EINVAL;
1001
1002 if ( list->nr &&
1003 (!guest_handle_okay(list->status, list->nr) ||
1004 !guest_handle_okay(list->name, XEN_LIVEPATCH_NAME_SIZE * list->nr) ||
1005 !guest_handle_okay(list->len, list->nr)) )
1006 return -EINVAL;
1007
1008 spin_lock(&payload_lock);
1009 if ( list->idx >= payload_cnt && payload_cnt )
1010 {
1011 spin_unlock(&payload_lock);
1012 return -EINVAL;
1013 }
1014
1015 if ( list->nr )
1016 {
1017 list_for_each_entry( data, &payload_list, list )
1018 {
1019 uint32_t len;
1020
1021 if ( list->idx > i++ )
1022 continue;
1023
1024 status.state = data->state;
1025 status.rc = data->rc;
1026 len = strlen(data->name) + 1;
1027
1028 /* N.B. 'idx' != 'i'. */
1029 if ( __copy_to_guest_offset(list->name, idx * XEN_LIVEPATCH_NAME_SIZE,
1030 data->name, len) ||
1031 __copy_to_guest_offset(list->len, idx, &len, 1) ||
1032 __copy_to_guest_offset(list->status, idx, &status, 1) )
1033 {
1034 rc = -EFAULT;
1035 break;
1036 }
1037
1038 idx++;
1039
1040 if ( (idx >= list->nr) || hypercall_preempt_check() )
1041 break;
1042 }
1043 }
1044 list->nr = payload_cnt - i; /* Remaining amount. */
1045 list->version = payload_version;
1046 spin_unlock(&payload_lock);
1047
1048 /* And how many we have processed. */
1049 return rc ? : idx;
1050 }
1051
1052 /*
1053 * The following functions get the CPUs into an appropriate state and
1054 * apply (or revert) each of the payload's functions. This is needed
1055 * for XEN_SYSCTL_LIVEPATCH_ACTION operation (see livepatch_action).
1056 */
1057
apply_payload(struct payload * data)1058 static int apply_payload(struct payload *data)
1059 {
1060 unsigned int i;
1061 int rc;
1062
1063 printk(XENLOG_INFO LIVEPATCH "%s: Applying %u functions\n",
1064 data->name, data->nfuncs);
1065
1066 rc = arch_livepatch_quiesce();
1067 if ( rc )
1068 {
1069 printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
1070 return rc;
1071 }
1072
1073 /*
1074 * Since we are running with IRQs disabled and the hooks may call common
1075 * code - which expects certain spinlocks to run with IRQs enabled - we
1076 * temporarily disable the spin locks IRQ state checks.
1077 */
1078 spin_debug_disable();
1079 for ( i = 0; i < data->n_load_funcs; i++ )
1080 data->load_funcs[i]();
1081 spin_debug_enable();
1082
1083 ASSERT(!local_irq_is_enabled());
1084
1085 for ( i = 0; i < data->nfuncs; i++ )
1086 arch_livepatch_apply(&data->funcs[i]);
1087
1088 arch_livepatch_revive();
1089
1090 /*
1091 * We need RCU variant (which has barriers) in case we crash here.
1092 * The applied_list is iterated by the trap code.
1093 */
1094 list_add_tail_rcu(&data->applied_list, &applied_list);
1095 register_virtual_region(&data->region);
1096
1097 return 0;
1098 }
1099
revert_payload(struct payload * data)1100 static int revert_payload(struct payload *data)
1101 {
1102 unsigned int i;
1103 int rc;
1104
1105 printk(XENLOG_INFO LIVEPATCH "%s: Reverting\n", data->name);
1106
1107 rc = arch_livepatch_quiesce();
1108 if ( rc )
1109 {
1110 printk(XENLOG_ERR LIVEPATCH "%s: unable to quiesce!\n", data->name);
1111 return rc;
1112 }
1113
1114 for ( i = 0; i < data->nfuncs; i++ )
1115 arch_livepatch_revert(&data->funcs[i]);
1116
1117 /*
1118 * Since we are running with IRQs disabled and the hooks may call common
1119 * code - which expects certain spinlocks to run with IRQs enabled - we
1120 * temporarily disable the spin locks IRQ state checks.
1121 */
1122 spin_debug_disable();
1123 for ( i = 0; i < data->n_unload_funcs; i++ )
1124 data->unload_funcs[i]();
1125 spin_debug_enable();
1126
1127 ASSERT(!local_irq_is_enabled());
1128
1129 arch_livepatch_revive();
1130
1131 /*
1132 * We need RCU variant (which has barriers) in case we crash here.
1133 * The applied_list is iterated by the trap code.
1134 */
1135 list_del_rcu(&data->applied_list);
1136 unregister_virtual_region(&data->region);
1137
1138 data->reverted = true;
1139 return 0;
1140 }
1141
1142 /*
1143 * This function is executed having all other CPUs with no deep stack (we may
1144 * have cpu_idle on it) and IRQs disabled.
1145 */
livepatch_do_action(void)1146 static void livepatch_do_action(void)
1147 {
1148 int rc;
1149 struct payload *data, *other, *tmp;
1150
1151 data = livepatch_work.data;
1152 /*
1153 * This function and the transition from asm to C code should be the only
1154 * one on any stack. No need to lock the payload list or applied list.
1155 */
1156 switch ( livepatch_work.cmd )
1157 {
1158 case LIVEPATCH_ACTION_APPLY:
1159 rc = apply_payload(data);
1160 if ( rc == 0 )
1161 data->state = LIVEPATCH_STATE_APPLIED;
1162 break;
1163
1164 case LIVEPATCH_ACTION_REVERT:
1165 rc = revert_payload(data);
1166 if ( rc == 0 )
1167 data->state = LIVEPATCH_STATE_CHECKED;
1168 break;
1169
1170 case LIVEPATCH_ACTION_REPLACE:
1171 rc = 0;
1172 /*
1173 * N.B: Use 'applied_list' member, not 'list'. We also abuse the
1174 * the 'normal' list iterator as the list is an RCU one.
1175 */
1176 list_for_each_entry_safe_reverse ( other, tmp, &applied_list, applied_list )
1177 {
1178 other->rc = revert_payload(other);
1179 if ( other->rc == 0 )
1180 other->state = LIVEPATCH_STATE_CHECKED;
1181 else
1182 {
1183 rc = -EINVAL;
1184 break;
1185 }
1186 }
1187
1188 if ( rc == 0 )
1189 {
1190 rc = apply_payload(data);
1191 if ( rc == 0 )
1192 data->state = LIVEPATCH_STATE_APPLIED;
1193 }
1194 break;
1195
1196 default:
1197 rc = -EINVAL; /* Make GCC5 happy. */
1198 ASSERT_UNREACHABLE();
1199 break;
1200 }
1201
1202 /* We must set rc as livepatch_action sets it to -EAGAIN when kicking of. */
1203 data->rc = rc;
1204 }
1205
is_work_scheduled(const struct payload * data)1206 static bool_t is_work_scheduled(const struct payload *data)
1207 {
1208 ASSERT(spin_is_locked(&payload_lock));
1209
1210 return livepatch_work.do_work && livepatch_work.data == data;
1211 }
1212
schedule_work(struct payload * data,uint32_t cmd,uint32_t timeout)1213 static int schedule_work(struct payload *data, uint32_t cmd, uint32_t timeout)
1214 {
1215 ASSERT(spin_is_locked(&payload_lock));
1216
1217 /* Fail if an operation is already scheduled. */
1218 if ( livepatch_work.do_work )
1219 return -EBUSY;
1220
1221 if ( !get_cpu_maps() )
1222 {
1223 printk(XENLOG_ERR LIVEPATCH "%s: unable to get cpu_maps lock!\n",
1224 data->name);
1225 return -EBUSY;
1226 }
1227
1228 livepatch_work.cmd = cmd;
1229 livepatch_work.data = data;
1230 livepatch_work.timeout = timeout ?: MILLISECS(30);
1231
1232 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: timeout is %"PRIu32"ns\n",
1233 data->name, livepatch_work.timeout);
1234
1235 atomic_set(&livepatch_work.semaphore, -1);
1236
1237 livepatch_work.ready = 0;
1238
1239 smp_wmb();
1240
1241 livepatch_work.do_work = 1;
1242 this_cpu(work_to_do) = 1;
1243
1244 put_cpu_maps();
1245
1246 return 0;
1247 }
1248
reschedule_fn(void * unused)1249 static void reschedule_fn(void *unused)
1250 {
1251 this_cpu(work_to_do) = 1;
1252 raise_softirq(SCHEDULE_SOFTIRQ);
1253 }
1254
livepatch_spin(atomic_t * counter,s_time_t timeout,unsigned int cpus,const char * s)1255 static int livepatch_spin(atomic_t *counter, s_time_t timeout,
1256 unsigned int cpus, const char *s)
1257 {
1258 int rc = 0;
1259
1260 while ( atomic_read(counter) != cpus && NOW() < timeout )
1261 cpu_relax();
1262
1263 /* Log & abort. */
1264 if ( atomic_read(counter) != cpus )
1265 {
1266 printk(XENLOG_ERR LIVEPATCH "%s: Timed out on semaphore in %s quiesce phase %u/%u\n",
1267 livepatch_work.data->name, s, atomic_read(counter), cpus);
1268 rc = -EBUSY;
1269 livepatch_work.data->rc = rc;
1270 smp_wmb();
1271 livepatch_work.do_work = 0;
1272 }
1273
1274 return rc;
1275 }
1276
1277 /*
1278 * The main function which manages the work of quiescing the system and
1279 * patching code.
1280 */
check_for_livepatch_work(void)1281 void check_for_livepatch_work(void)
1282 {
1283 #define ACTION(x) [LIVEPATCH_ACTION_##x] = #x
1284 static const char *const names[] = {
1285 ACTION(APPLY),
1286 ACTION(REVERT),
1287 ACTION(REPLACE),
1288 };
1289 #undef ACTION
1290 unsigned int cpu = smp_processor_id();
1291 s_time_t timeout;
1292 unsigned long flags;
1293
1294 /* Fast path: no work to do. */
1295 if ( !per_cpu(work_to_do, cpu ) )
1296 return;
1297
1298 smp_rmb();
1299 /* In case we aborted, other CPUs can skip right away. */
1300 if ( !livepatch_work.do_work )
1301 {
1302 per_cpu(work_to_do, cpu) = 0;
1303 return;
1304 }
1305
1306 ASSERT(local_irq_is_enabled());
1307
1308 /* Set at -1, so will go up to num_online_cpus - 1. */
1309 if ( atomic_inc_and_test(&livepatch_work.semaphore) )
1310 {
1311 struct payload *p;
1312 unsigned int cpus;
1313
1314 p = livepatch_work.data;
1315 if ( !get_cpu_maps() )
1316 {
1317 printk(XENLOG_ERR LIVEPATCH "%s: CPU%u - unable to get cpu_maps lock!\n",
1318 p->name, cpu);
1319 per_cpu(work_to_do, cpu) = 0;
1320 livepatch_work.data->rc = -EBUSY;
1321 smp_wmb();
1322 livepatch_work.do_work = 0;
1323 /*
1324 * Do NOT decrement livepatch_work.semaphore down - as that may cause
1325 * the other CPU (which may be at this point ready to increment it)
1326 * to assume the role of master and then needlessly time out
1327 * out (as do_work is zero).
1328 */
1329 return;
1330 }
1331 /* "Mask" NMIs. */
1332 arch_livepatch_mask();
1333
1334 barrier(); /* MUST do it after get_cpu_maps. */
1335 cpus = num_online_cpus() - 1;
1336
1337 if ( cpus )
1338 {
1339 dprintk(XENLOG_DEBUG, LIVEPATCH "%s: CPU%u - IPIing the other %u CPUs\n",
1340 p->name, cpu, cpus);
1341 smp_call_function(reschedule_fn, NULL, 0);
1342 }
1343
1344 timeout = livepatch_work.timeout + NOW();
1345 if ( livepatch_spin(&livepatch_work.semaphore, timeout, cpus, "CPU") )
1346 goto abort;
1347
1348 /* All CPUs are waiting, now signal to disable IRQs. */
1349 atomic_set(&livepatch_work.semaphore, 0);
1350 /*
1351 * MUST have a barrier after semaphore so that the other CPUs don't
1352 * leak out of the 'Wait for all CPUs to rendezvous' loop and increment
1353 * 'semaphore' before we set it to zero.
1354 */
1355 smp_wmb();
1356 livepatch_work.ready = 1;
1357
1358 if ( !livepatch_spin(&livepatch_work.semaphore, timeout, cpus, "IRQ") )
1359 {
1360 local_irq_save(flags);
1361 /* Do the patching. */
1362 livepatch_do_action();
1363 /* Serialize and flush out the CPU via CPUID instruction (on x86). */
1364 arch_livepatch_post_action();
1365 local_irq_restore(flags);
1366 }
1367
1368 abort:
1369 arch_livepatch_unmask();
1370
1371 per_cpu(work_to_do, cpu) = 0;
1372 livepatch_work.do_work = 0;
1373
1374 /* put_cpu_maps has an barrier(). */
1375 put_cpu_maps();
1376
1377 printk(XENLOG_INFO LIVEPATCH "%s finished %s with rc=%d\n",
1378 p->name, names[livepatch_work.cmd], p->rc);
1379 }
1380 else
1381 {
1382 /* Wait for all CPUs to rendezvous. */
1383 while ( livepatch_work.do_work && !livepatch_work.ready )
1384 cpu_relax();
1385
1386 /* Disable IRQs and signal. */
1387 local_irq_save(flags);
1388 /*
1389 * We re-use the sempahore, so MUST have it reset by master before
1390 * we exit the loop above.
1391 */
1392 atomic_inc(&livepatch_work.semaphore);
1393
1394 /* Wait for patching to complete. */
1395 while ( livepatch_work.do_work )
1396 cpu_relax();
1397
1398 /* To flush out pipeline. */
1399 arch_livepatch_post_action();
1400 local_irq_restore(flags);
1401
1402 per_cpu(work_to_do, cpu) = 0;
1403 }
1404 }
1405
1406 /*
1407 * Only allow dependent payload is applied on top of the correct
1408 * build-id.
1409 *
1410 * This enforces an stacking order - the first payload MUST be against the
1411 * hypervisor. The second against the first payload, and so on.
1412 *
1413 * Unless the 'internal' parameter is used - in which case we only
1414 * check against the hypervisor.
1415 */
build_id_dep(struct payload * payload,bool_t internal)1416 static int build_id_dep(struct payload *payload, bool_t internal)
1417 {
1418 const void *id = NULL;
1419 unsigned int len = 0;
1420 int rc;
1421 const char *name = "hypervisor";
1422
1423 ASSERT(payload->dep.len && payload->dep.p);
1424
1425 /* First time user is against hypervisor. */
1426 if ( internal )
1427 {
1428 rc = xen_build_id(&id, &len);
1429 if ( rc )
1430 return rc;
1431 }
1432 else
1433 {
1434 /* We should be against the last applied one. */
1435 const struct payload *data;
1436
1437 data = list_last_entry(&applied_list, struct payload, applied_list);
1438
1439 id = data->id.p;
1440 len = data->id.len;
1441 name = data->name;
1442 }
1443
1444 if ( payload->dep.len != len ||
1445 memcmp(id, payload->dep.p, len) )
1446 {
1447 dprintk(XENLOG_ERR, "%s%s: check against %s build-id failed!\n",
1448 LIVEPATCH, payload->name, name);
1449 return -EINVAL;
1450 }
1451
1452 return 0;
1453 }
1454
livepatch_action(struct xen_sysctl_livepatch_action * action)1455 static int livepatch_action(struct xen_sysctl_livepatch_action *action)
1456 {
1457 struct payload *data;
1458 char n[XEN_LIVEPATCH_NAME_SIZE];
1459 int rc;
1460
1461 rc = get_name(&action->name, n);
1462 if ( rc )
1463 return rc;
1464
1465 spin_lock(&payload_lock);
1466
1467 data = find_payload(n);
1468 if ( IS_ERR_OR_NULL(data) )
1469 {
1470 spin_unlock(&payload_lock);
1471
1472 if ( !data )
1473 return -ENOENT;
1474
1475 return PTR_ERR(data);
1476 }
1477
1478 if ( is_work_scheduled(data) )
1479 {
1480 rc = -EBUSY;
1481 goto out;
1482 }
1483
1484 switch ( action->cmd )
1485 {
1486 case LIVEPATCH_ACTION_UNLOAD:
1487 if ( data->state == LIVEPATCH_STATE_CHECKED )
1488 {
1489 free_payload(data);
1490 /* No touching 'data' from here on! */
1491 data = NULL;
1492 }
1493 else
1494 rc = -EINVAL;
1495 break;
1496
1497 case LIVEPATCH_ACTION_REVERT:
1498 if ( data->state == LIVEPATCH_STATE_APPLIED )
1499 {
1500 const struct payload *p;
1501
1502 p = list_last_entry(&applied_list, struct payload, applied_list);
1503 ASSERT(p);
1504 /* We should be the last applied one. */
1505 if ( p != data )
1506 {
1507 dprintk(XENLOG_ERR, "%s%s: can't unload. Top is %s!\n",
1508 LIVEPATCH, data->name, p->name);
1509 rc = -EBUSY;
1510 break;
1511 }
1512 data->rc = -EAGAIN;
1513 rc = schedule_work(data, action->cmd, action->timeout);
1514 }
1515 break;
1516
1517 case LIVEPATCH_ACTION_APPLY:
1518 if ( data->state == LIVEPATCH_STATE_CHECKED )
1519 {
1520 /*
1521 * It is unsafe to apply an reverted payload as the .data (or .bss)
1522 * may not be in in pristine condition. Hence MUST unload and then
1523 * apply patch again. Unless the payload has only one
1524 * RW section (.livepatch.funcs).
1525 */
1526 if ( data->reverted && !data->safe_to_reapply )
1527 {
1528 dprintk(XENLOG_ERR, "%s%s: can't revert as payload has .data. Please unload!\n",
1529 LIVEPATCH, data->name);
1530 data->rc = -EINVAL;
1531 break;
1532 }
1533
1534 rc = build_id_dep(data, !!list_empty(&applied_list));
1535 if ( rc )
1536 break;
1537 data->rc = -EAGAIN;
1538 rc = schedule_work(data, action->cmd, action->timeout);
1539 }
1540 break;
1541
1542 case LIVEPATCH_ACTION_REPLACE:
1543 if ( data->state == LIVEPATCH_STATE_CHECKED )
1544 {
1545 rc = build_id_dep(data, 1 /* against hypervisor. */);
1546 if ( rc )
1547 break;
1548 data->rc = -EAGAIN;
1549 rc = schedule_work(data, action->cmd, action->timeout);
1550 }
1551 break;
1552
1553 default:
1554 rc = -EOPNOTSUPP;
1555 break;
1556 }
1557
1558 out:
1559 spin_unlock(&payload_lock);
1560
1561 return rc;
1562 }
1563
livepatch_op(struct xen_sysctl_livepatch_op * livepatch)1564 int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
1565 {
1566 int rc;
1567
1568 if ( livepatch->pad )
1569 return -EINVAL;
1570
1571 switch ( livepatch->cmd )
1572 {
1573 case XEN_SYSCTL_LIVEPATCH_UPLOAD:
1574 rc = livepatch_upload(&livepatch->u.upload);
1575 break;
1576
1577 case XEN_SYSCTL_LIVEPATCH_GET:
1578 rc = livepatch_get(&livepatch->u.get);
1579 break;
1580
1581 case XEN_SYSCTL_LIVEPATCH_LIST:
1582 rc = livepatch_list(&livepatch->u.list);
1583 break;
1584
1585 case XEN_SYSCTL_LIVEPATCH_ACTION:
1586 rc = livepatch_action(&livepatch->u.action);
1587 break;
1588
1589 default:
1590 rc = -EOPNOTSUPP;
1591 break;
1592 }
1593
1594 return rc;
1595 }
1596
state2str(unsigned int state)1597 static const char *state2str(unsigned int state)
1598 {
1599 #define STATE(x) [LIVEPATCH_STATE_##x] = #x
1600 static const char *const names[] = {
1601 STATE(CHECKED),
1602 STATE(APPLIED),
1603 };
1604 #undef STATE
1605
1606 if ( state >= ARRAY_SIZE(names) || !names[state] )
1607 return "unknown";
1608
1609 return names[state];
1610 }
1611
livepatch_printall(unsigned char key)1612 static void livepatch_printall(unsigned char key)
1613 {
1614 struct payload *data;
1615 const void *binary_id = NULL;
1616 unsigned int len = 0;
1617 unsigned int i;
1618
1619 printk("'%c' pressed - Dumping all livepatch patches\n", key);
1620
1621 if ( !xen_build_id(&binary_id, &len) )
1622 printk("build-id: %*phN\n", len, binary_id);
1623
1624 if ( !spin_trylock(&payload_lock) )
1625 {
1626 printk("Lock held. Try again.\n");
1627 return;
1628 }
1629
1630 list_for_each_entry ( data, &payload_list, list )
1631 {
1632 printk(" name=%s state=%s(%d) %p (.data=%p, .rodata=%p) using %u pages.\n",
1633 data->name, state2str(data->state), data->state, data->text_addr,
1634 data->rw_addr, data->ro_addr, data->pages);
1635
1636 for ( i = 0; i < data->nfuncs; i++ )
1637 {
1638 struct livepatch_func *f = &(data->funcs[i]);
1639 printk(" %s patch %p(%u) with %p (%u)\n",
1640 f->name, f->old_addr, f->old_size, f->new_addr, f->new_size);
1641
1642 if ( i && !(i % 64) )
1643 {
1644 spin_unlock(&payload_lock);
1645 process_pending_softirqs();
1646 if ( !spin_trylock(&payload_lock) )
1647 {
1648 printk("Couldn't reacquire lock. Try again.\n");
1649 return;
1650 }
1651 }
1652 }
1653 if ( data->id.len )
1654 printk("build-id=%*phN\n", data->id.len, data->id.p);
1655
1656 if ( data->dep.len )
1657 printk("depend-on=%*phN\n", data->dep.len, data->dep.p);
1658 }
1659
1660 spin_unlock(&payload_lock);
1661 }
1662
livepatch_init(void)1663 static int __init livepatch_init(void)
1664 {
1665 register_keyhandler('x', livepatch_printall, "print livepatch info", 1);
1666
1667 arch_livepatch_init();
1668 return 0;
1669 }
1670 __initcall(livepatch_init);
1671
1672 /*
1673 * Local variables:
1674 * mode: C
1675 * c-file-style: "BSD"
1676 * c-basic-offset: 4
1677 * tab-width: 4
1678 * indent-tabs-mode: nil
1679 * End:
1680 */
1681