1 /*
2 * Copyright (c) 2010-2012 United States Government, as represented by
3 * the Secretary of Defense. All rights reserved.
4 *
5 * based off of the original tools/vtpm_manager code base which is:
6 * Copyright (c) 2005, Intel Corp.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 *
13 * * Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * * Redistributions in binary form must reproduce the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer in the documentation and/or other materials provided
18 * with the distribution.
19 * * Neither the name of Intel Corporation nor the names of its
20 * contributors may be used to endorse or promote products derived
21 * from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
32 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
34 * OF THE POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include <inttypes.h>
38 #include <string.h>
39 #include <stdlib.h>
40 #include <mini-os/console.h>
41 #include <mini-os/lib.h>
42 #include <polarssl/sha1.h>
43 #include <polarssl/sha2.h>
44
45 #include "marshal.h"
46 #include "log.h"
47 #include "vtpm_disk.h"
48 #include "vtpmmgr.h"
49 #include "tpm.h"
50 #include "tpmrsa.h"
51 #include "tcg.h"
52 #include "mgmt_authority.h"
53 #include "disk_crypto.h"
54
gen_random_uuid(uuid_t uuid)55 static void gen_random_uuid(uuid_t uuid)
56 {
57 do_random(uuid, 16);
58 // make the 128-bit random number a valid UUID (122 bits remain)
59 uuid[6] = 0x40 | (uuid[6] & 0x0F);
60 uuid[8] = 0x80 | (uuid[8] & 0x3F);
61 }
62
63 /*
64 * Instead of using a kernel hash, which requires a trusted domain builder to
65 * report, use the XSM label as a substitute.
66 */
find_vtpm_khash(int domid,struct tpm_opaque * opq)67 static TPM_RESULT find_vtpm_khash(int domid, struct tpm_opaque *opq)
68 {
69 char buf[128];
70 int i, rv;
71 buf[127] = 0;
72 rv = tpmback_get_peercontext(opq->domid, opq->handle, buf, sizeof(buf) - 1);
73 if (rv < 0)
74 return TPM_FAIL;
75
76 sha1((void*)buf, strlen(buf), opq->kern_hash);
77
78 /*
79 * As a hack to support the use of the XSM user field as an optional
80 * wildcard, check the hash against the group here. If it fails, replace
81 * the user field with a "*" and return the hash of that value.
82 */
83 for(i=0; i < be32_native(opq->group->seal_bits.nr_kerns); i++) {
84 if (!memcmp(opq->group->seal_bits.kernels[i].bits, opq->kern_hash, 20)) {
85 return TPM_SUCCESS;
86 }
87 }
88
89 char* upos = strchr(buf, ':');
90 if (upos == NULL || upos == buf)
91 return TPM_SUCCESS;
92
93 upos--;
94 upos[0] = '*';
95
96 sha1((void*)upos, strlen(upos), opq->kern_hash);
97 return TPM_SUCCESS;
98 }
99
find_vtpm_verified(int domid,struct tpm_opaque * opq)100 static TPM_RESULT find_vtpm_verified(int domid, struct tpm_opaque *opq)
101 {
102 TPM_RESULT rc;
103 int i;
104 if (opq->vtpm)
105 return TPM_SUCCESS;
106
107 rc = find_vtpm(&opq->group, &opq->vtpm, *opq->uuid);
108 if (rc)
109 return TPM_BAD_PARAMETER;
110
111 if (opq->vtpm->flags & VTPM_FLAG_OPEN) {
112 printk("Attempted to open vTPM twice!\n");
113 opq->vtpm = NULL;
114 opq->group = NULL;
115 return TPM_DISABLED;
116 }
117
118 rc = find_vtpm_khash(domid, opq);
119 if (rc)
120 return rc;
121
122 for(i=0; i < be32_native(opq->group->seal_bits.nr_kerns); i++) {
123 if (!memcmp(opq->group->seal_bits.kernels[i].bits, opq->kern_hash, 20)) {
124 opq->vtpm->flags |= VTPM_FLAG_OPEN;
125 return TPM_SUCCESS;
126 }
127 }
128 printk("Unauthorized vTPM kernel image used!\n");
129 return TPM_DISABLED;
130 }
131
vtpmmgr_SaveHashKey(struct tpm_opaque * opq,tpmcmd_t * tpmcmd)132 static TPM_RESULT vtpmmgr_SaveHashKey(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
133 {
134 TPM_RESULT status = TPM_SUCCESS;
135 int rc = 0;
136
137 size_t bufsize = tpmcmd->req_len - VTPM_COMMAND_HEADER_SIZE;
138 const void *buf = tpmcmd->req + VTPM_COMMAND_HEADER_SIZE;
139
140 if (bufsize < 52) {
141 vtpmlogerror(VTPM_LOG_VTPM, "VTPM_ORD_SAVEHASHKEY hashkey too short!\n");
142 status = TPM_BAD_PARAMETER;
143 goto abort_egress;
144 }
145 if (bufsize > 64) {
146 vtpmlogerror(VTPM_LOG_VTPM, "VTPM_ORD_SAVEHASHKEY hashkey too long!\n");
147 status = TPM_BAD_PARAMETER;
148 goto abort_egress;
149 }
150
151 vtpmloginfo(VTPM_LOG_VTPM, "vtpmmgr_SaveHashKey\n");
152 status = find_vtpm_verified(tpmcmd->domid, opq);
153
154 // auto-create vTPMs in group0 when saving a new UUID
155 // TODO restrict to certain UUIDs (such as all-zero)
156 // this is not done yet to simplify use of the TPM Manager
157 if (status == TPM_BAD_PARAMETER) {
158 opq->group = g_mgr->groups[0].v;
159 rc = create_vtpm(opq->group, &opq->vtpm, *opq->uuid);
160 if (rc) {
161 status = TPM_BAD_PARAMETER;
162 goto abort_egress;
163 }
164 if (opq->group->nr_vtpms == 1)
165 opq->vtpm->flags = VTPM_FLAG_ADMIN;
166 printk("SaveHashKey with unknown UUID="UUID_FMT" - creating in auth0 (f=%d)\n",
167 UUID_BYTES((*opq->uuid)), opq->vtpm->flags);
168 status = TPM_SUCCESS;
169 }
170 if (status)
171 goto abort_egress;
172
173 memcpy(opq->vtpm->data, buf, bufsize);
174 memset(opq->vtpm->data + bufsize, 0, 64 - bufsize);
175
176 vtpm_sync(opq->group, opq->vtpm);
177
178 abort_egress:
179 pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, VTPM_COMMAND_HEADER_SIZE, status);
180 tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
181
182 return status;
183 }
184
vtpmmgr_LoadHashKey(struct tpm_opaque * opq,tpmcmd_t * tpmcmd)185 static TPM_RESULT vtpmmgr_LoadHashKey(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
186 {
187 TPM_RESULT status = TPM_SUCCESS;
188 int i;
189 uint8_t *buf = tpmcmd->resp + VTPM_COMMAND_HEADER_SIZE;
190
191 vtpmloginfo(VTPM_LOG_VTPM, "vtpmmgr_LoadHashKey\n");
192 tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
193
194 status = find_vtpm_verified(tpmcmd->domid, opq);
195 if (status)
196 goto abort_egress;
197
198 memcpy(buf, opq->vtpm->data, 64);
199
200 for(i=52; i < 64; i++) {
201 if (buf[i]) {
202 tpmcmd->resp_len += 64;
203 goto abort_egress;
204 }
205 }
206 tpmcmd->resp_len += 52;
207
208 abort_egress:
209 pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status);
210
211 return status;
212 }
213
214 #define CMD_BEGIN \
215 TPM_RESULT status = TPM_SUCCESS; \
216 uint32_t in_pos = VTPM_COMMAND_HEADER_SIZE; \
217 tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE; \
218 vtpmloginfo(VTPM_LOG_TPM, "%s\n", __func__);
219
220 #define CMD_END \
221 abort_egress: \
222 if (status) \
223 tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE; \
224 pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status); \
225 return status
226
227 #define UNPACK_IN(type, item...) do { \
228 status = unpack3_ ## type (tpmcmd->req, &in_pos, tpmcmd->req_len, item); \
229 if (status) { \
230 status = TPM_BAD_PARAMETER; \
231 goto abort_egress; \
232 } \
233 } while (0)
234
235 #define UNPACK_GROUP(group) do { \
236 uint32_t group_idx; \
237 UNPACK_IN(UINT32, &group_idx); \
238 if (group_idx >= g_mgr->nr_groups) { \
239 status = TPM_BADINDEX; \
240 goto abort_egress; \
241 } \
242 group = g_mgr->groups[group_idx].v; \
243 if (!group) { \
244 status = TPM_AUTHFAIL; \
245 goto abort_egress; \
246 } \
247 } while (0)
248
249 #define UNPACK_DONE() do { \
250 if (in_pos != tpmcmd->req_len) { \
251 status = TPM_BAD_PARAMETER; \
252 goto abort_egress; \
253 } \
254 } while (0)
255
256 #define PACK_OUT(type, item...) do { \
257 UINT32 isize = sizeof_ ## type(item); \
258 if (isize + tpmcmd->resp_len > TCPA_MAX_BUFFER_LENGTH) { \
259 status = TPM_SIZE; \
260 goto abort_egress; \
261 } \
262 pack_ ## type (tpmcmd->resp + tpmcmd->resp_len, item); \
263 tpmcmd->resp_len += isize; \
264 } while (0)
265
266 #define PACK_BUF ((void*)(tpmcmd->resp + tpmcmd->resp_len))
267
vtpmmgr_GetBootHash(struct tpm_opaque * opq,tpmcmd_t * tpmcmd)268 static TPM_RESULT vtpmmgr_GetBootHash(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
269 {
270 CMD_BEGIN;
271 UNPACK_DONE();
272
273 PACK_OUT(BUFFER, opq->kern_hash, 20);
274
275 CMD_END;
276 }
277
vtpmmgr_GetQuote(struct tpm_opaque * opq,tpmcmd_t * tpmcmd)278 static TPM_RESULT vtpmmgr_GetQuote(struct tpm_opaque *opq, tpmcmd_t* tpmcmd)
279 {
280 CMD_BEGIN;
281 int i;
282 void *ibuf;
283 uint32_t pcr_size;
284 TPM_PCR_SELECTION sel;
285 uint32_t extra_info_flags;
286
287 UNPACK_IN(VPTR, &ibuf, 20, UNPACK_ALIAS);
288 UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
289 UNPACK_IN(TPM_DEEP_QUOTE_INFO, &extra_info_flags);
290 UNPACK_DONE();
291
292 if (!opq->vtpm) {
293 status = TPM_BAD_PARAMETER;
294 goto abort_egress;
295 }
296
297 printk("ibuf: ");
298 for (i=0; i < 20; i++)
299 printk("%02x", ((uint8_t*)ibuf)[i]);
300 printk("\n");
301
302 status = vtpm_do_quote(opq->group, *opq->uuid, opq->kern_hash, ibuf, &sel, extra_info_flags, PACK_BUF + 256, &pcr_size, PACK_BUF);
303 if (status)
304 goto abort_egress;
305 tpmcmd->resp_len += 256 + pcr_size;
306
307 CMD_END;
308 }
309
vtpmmgr_GroupList(tpmcmd_t * tpmcmd)310 static TPM_RESULT vtpmmgr_GroupList(tpmcmd_t* tpmcmd)
311 {
312 CMD_BEGIN;
313 UNPACK_DONE();
314 PACK_OUT(UINT32, g_mgr->nr_groups);
315 CMD_END;
316 }
317
vtpmmgr_GroupNew(tpmcmd_t * tpmcmd)318 static TPM_RESULT vtpmmgr_GroupNew(tpmcmd_t* tpmcmd)
319 {
320 void *privCADigest;
321 BYTE *pubkey;
322 struct mem_group *group;
323 uint32_t group_idx;
324 CMD_BEGIN;
325
326 UNPACK_IN(VPTR, &privCADigest, 20, UNPACK_ALIAS);
327 UNPACK_IN(PTR, &pubkey, 256, UNPACK_ALIAS);
328 UNPACK_DONE();
329
330 group = vtpm_new_group(privCADigest);
331 if (!group) {
332 status = TPM_FAIL;
333 goto abort_egress;
334 }
335
336 memcpy(group->id_data.saa_pubkey, pubkey, 256);
337
338 PACK_OUT(BUFFER, group->id_data.uuid, 16);
339 PACK_OUT(BUFFER, group->id_data.tpm_aik_public, 256);
340 PACK_OUT(BUFFER, group->details.recovery_data, 256);
341
342 memset(group->details.recovery_data, 0, 256);
343
344 group->details.sequence = native_be64(g_mgr->sequence);
345
346 if (group != g_mgr->groups[0].v) {
347 group_idx = g_mgr->nr_groups;
348 g_mgr->nr_groups++;
349 g_mgr->groups = realloc(g_mgr->groups, g_mgr->nr_groups*sizeof(struct mem_group_hdr));
350 memset(&g_mgr->groups[group_idx], 0, sizeof(g_mgr->groups[0]));
351 g_mgr->groups[group_idx].v = group;
352 }
353
354 vtpm_sync_group(group, SEQ_UPDATE);
355 CMD_END;
356 }
357
vtpmmgr_GroupDel(tpmcmd_t * tpmcmd)358 static TPM_RESULT vtpmmgr_GroupDel(tpmcmd_t* tpmcmd)
359 {
360 CMD_BEGIN;
361 struct mem_group *group;
362 uint32_t group_idx, nr_mov;
363
364 UNPACK_IN(UINT32, &group_idx);
365 UNPACK_DONE();
366
367 if (group_idx > g_mgr->nr_groups) {
368 status = TPM_BADINDEX;
369 goto abort_egress;
370 }
371 group = g_mgr->groups[group_idx].v;
372
373 if (group) {
374 int i, j;
375 for (i = 0; i < group->nr_pages; i++) {
376 for (j = 0; j < group->data[i].size; j++) {
377 if (group->data[i].vtpms[j]->flags & VTPM_FLAG_OPEN) {
378 status = TPM_FAIL;
379 goto abort_egress;
380 }
381 }
382 }
383
384 for (i = 0; i < group->nr_pages; i++) {
385 for (j = 0; j < group->data[i].size; j++) {
386 free(group->data[i].vtpms[j]);
387 }
388 }
389 free(group->data);
390 free(group->seals);
391 free(group);
392 }
393
394 g_mgr->nr_groups--;
395 nr_mov = g_mgr->nr_groups - group_idx;
396 memmove(&g_mgr->groups[group_idx], &g_mgr->groups[group_idx + 1], nr_mov * sizeof(g_mgr->groups[0]));
397
398 vtpm_sync_disk(g_mgr, CTR_UPDATE);
399
400 CMD_END;
401 }
402
pack_cfg_list(void * buf,struct mem_group * group)403 static int pack_cfg_list(void* buf, struct mem_group *group)
404 {
405 int i;
406 void *bstart = buf;
407 memcpy(buf, &group->details.cfg_seq, 8); buf += 8;
408 buf = pack_UINT32(buf, group->nr_seals);
409 for(i=0; i < group->nr_seals; i++) {
410 memcpy(buf, &group->seals[i].digest_release, 20);
411 buf += 20;
412 }
413 memcpy(buf, &group->seal_bits.nr_kerns, 4); buf += 4;
414 memcpy(buf, &group->seal_bits.kernels, 20 * be32_native(group->seal_bits.nr_kerns));
415 return buf - bstart + 20 * be32_native(group->seal_bits.nr_kerns);
416 }
417
vtpmmgr_GroupShow(tpmcmd_t * tpmcmd)418 static TPM_RESULT vtpmmgr_GroupShow(tpmcmd_t* tpmcmd)
419 {
420 CMD_BEGIN;
421 struct mem_group *group;
422
423 UNPACK_GROUP(group);
424 UNPACK_DONE();
425
426 // TODO show is read-only access, need to hit disk if group is NULL
427
428 PACK_OUT(BUFFER, group->id_data.uuid, 16);
429 PACK_OUT(BUFFER, group->id_data.saa_pubkey, 256);
430 tpmcmd->resp_len += pack_cfg_list(PACK_BUF, group);
431
432 CMD_END;
433 }
434
vtpmmgr_GroupActivate(tpmcmd_t * tpmcmd)435 static TPM_RESULT vtpmmgr_GroupActivate(tpmcmd_t* tpmcmd)
436 {
437 CMD_BEGIN;
438 struct mem_group *group;
439 uint32_t blobSize;
440 void *blob;
441
442 UNPACK_GROUP(group);
443 UNPACK_IN(UINT32, &blobSize);
444 UNPACK_IN(VPTR, &blob, blobSize, UNPACK_ALIAS);
445 UNPACK_DONE();
446
447 status = group_do_activate(group, blob, blobSize, tpmcmd->resp, &tpmcmd->resp_len);
448
449 CMD_END;
450 }
451
452 /* 2048-bit MODP Group from RFC3526:
453 * 2^2048 - 2^1984 - 1 + 2^64 * { [2^1918 pi] + 124476 }
454 */
455 static uint8_t P_bytes[256] = {
456 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC9, 0x0F, 0xDA, 0xA2,
457 0x21, 0x68, 0xC2, 0x34, 0xC4, 0xC6, 0x62, 0x8B, 0x80, 0xDC, 0x1C, 0xD1,
458 0x29, 0x02, 0x4E, 0x08, 0x8A, 0x67, 0xCC, 0x74, 0x02, 0x0B, 0xBE, 0xA6,
459 0x3B, 0x13, 0x9B, 0x22, 0x51, 0x4A, 0x08, 0x79, 0x8E, 0x34, 0x04, 0xDD,
460 0xEF, 0x95, 0x19, 0xB3, 0xCD, 0x3A, 0x43, 0x1B, 0x30, 0x2B, 0x0A, 0x6D,
461 0xF2, 0x5F, 0x14, 0x37, 0x4F, 0xE1, 0x35, 0x6D, 0x6D, 0x51, 0xC2, 0x45,
462 0xE4, 0x85, 0xB5, 0x76, 0x62, 0x5E, 0x7E, 0xC6, 0xF4, 0x4C, 0x42, 0xE9,
463 0xA6, 0x37, 0xED, 0x6B, 0x0B, 0xFF, 0x5C, 0xB6, 0xF4, 0x06, 0xB7, 0xED,
464 0xEE, 0x38, 0x6B, 0xFB, 0x5A, 0x89, 0x9F, 0xA5, 0xAE, 0x9F, 0x24, 0x11,
465 0x7C, 0x4B, 0x1F, 0xE6, 0x49, 0x28, 0x66, 0x51, 0xEC, 0xE4, 0x5B, 0x3D,
466 0xC2, 0x00, 0x7C, 0xB8, 0xA1, 0x63, 0xBF, 0x05, 0x98, 0xDA, 0x48, 0x36,
467 0x1C, 0x55, 0xD3, 0x9A, 0x69, 0x16, 0x3F, 0xA8, 0xFD, 0x24, 0xCF, 0x5F,
468 0x83, 0x65, 0x5D, 0x23, 0xDC, 0xA3, 0xAD, 0x96, 0x1C, 0x62, 0xF3, 0x56,
469 0x20, 0x85, 0x52, 0xBB, 0x9E, 0xD5, 0x29, 0x07, 0x70, 0x96, 0x96, 0x6D,
470 0x67, 0x0C, 0x35, 0x4E, 0x4A, 0xBC, 0x98, 0x04, 0xF1, 0x74, 0x6C, 0x08,
471 0xCA, 0x18, 0x21, 0x7C, 0x32, 0x90, 0x5E, 0x46, 0x2E, 0x36, 0xCE, 0x3B,
472 0xE3, 0x9E, 0x77, 0x2C, 0x18, 0x0E, 0x86, 0x03, 0x9B, 0x27, 0x83, 0xA2,
473 0xEC, 0x07, 0xA2, 0x8F, 0xB5, 0xC5, 0x5D, 0xF0, 0x6F, 0x4C, 0x52, 0xC9,
474 0xDE, 0x2B, 0xCB, 0xF6, 0x95, 0x58, 0x17, 0x18, 0x39, 0x95, 0x49, 0x7C,
475 0xEA, 0x95, 0x6A, 0xE5, 0x15, 0xD2, 0x26, 0x18, 0x98, 0xFA, 0x05, 0x10,
476 0x15, 0x72, 0x8E, 0x5A, 0x8A, 0xAC, 0xAA, 0x68, 0xFF, 0xFF, 0xFF, 0xFF,
477 0xFF, 0xFF, 0xFF, 0xFF,
478 };
479 static t_uint Gp[] = { 2 };
480
tm_dhkx_gen(void * dhkx1,void * dhkx2,void * out)481 static void tm_dhkx_gen(void* dhkx1, void* dhkx2, void* out)
482 {
483 mpi GX = { 0 }, GY = { 0 }, K = { 0 }, RP = { 0 };
484 mpi P = { 0 };
485
486 t_uint Xp[sizeof(P_bytes) / sizeof(t_uint)];
487 mpi X = {
488 .s = 1,
489 .n = ARRAY_SIZE(Xp),
490 .p = Xp
491 };
492 mpi G = {
493 .s = 1,
494 .n = 1,
495 .p = Gp,
496 };
497 mpi_read_binary(&P, P_bytes, sizeof(P_bytes));
498
499 do_random(Xp, sizeof(Xp));
500
501 mpi_exp_mod(&GX, &G, &X, &P, &RP);
502 mpi_write_binary(&GX, dhkx2, 256);
503 mpi_free(&GX);
504
505 mpi_read_binary(&GY, dhkx1, 256);
506 mpi_exp_mod(&K, &GY, &X, &P, &RP);
507 mpi_free(&P);
508 mpi_free(&RP);
509 mpi_free(&GY);
510
511 mpi_write_binary(&K, (void*)Xp, 256);
512 mpi_free(&K);
513 sha2((void*)Xp, 256, out, 0);
514 }
515
xor2_256b(void * xv,const void * yv)516 static void xor2_256b(void *xv, const void* yv)
517 {
518 int i;
519 uint64_t *x = xv;
520 const uint64_t *y = yv;
521 for(i=0; i < 4; i++)
522 x[i] ^= y[i];
523 }
524
vtpmmgr_GroupRegister(tpmcmd_t * tpmcmd)525 static TPM_RESULT vtpmmgr_GroupRegister(tpmcmd_t* tpmcmd)
526 {
527 CMD_BEGIN;
528 struct mem_group *group = NULL;
529 tpmrsa_context saa_rsa = TPMRSA_CTX_INIT;
530 struct tpm_authdata digest;
531 sha1_context ctx;
532 TPM_PCR_SELECTION sel;
533 void *dhkx1, *dhkx2, *gk, *sig;
534 uint32_t extra_info_flags = 0;
535
536 UNPACK_GROUP(group);
537 UNPACK_IN(VPTR, &dhkx1, 256, UNPACK_ALIAS);
538 UNPACK_IN(VPTR, &sig, 256, UNPACK_ALIAS);
539 UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
540 UNPACK_DONE();
541
542 /* Only generating this quote during the same boot that this group was
543 * created in allows the quote to prove that the group key has never
544 * been available outside a configuration approved by its SAA.
545 */
546 if (!(group->flags & MEM_GROUP_FLAG_FIRSTBOOT)) {
547 status = TPM_FAIL;
548 goto abort_egress;
549 }
550
551 sha1(dhkx1, 256, digest.bits);
552 tpmrsa_set_pubkey(&saa_rsa, group->id_data.saa_pubkey, 256, 0, 0);
553 if (tpmrsa_sigcheck(&saa_rsa, sig, digest.bits))
554 status = TPM_FAIL;
555 tpmrsa_free(&saa_rsa);
556 if (status)
557 goto abort_egress;
558
559 dhkx2 = PACK_BUF;
560 tpmcmd->resp_len += 256;
561 gk = PACK_BUF;
562 tpmcmd->resp_len += 32;
563
564 tm_dhkx_gen(dhkx1, dhkx2, gk);
565 xor2_256b(gk, &group->group_key);
566
567 sha1_starts(&ctx);
568 sha1_update(&ctx, (void*)"REGR", 4);
569 sha1_update(&ctx, dhkx1, 256);
570 sha1_update(&ctx, dhkx2, 256 + 32);
571 sha1_finish(&ctx, digest.bits);
572
573 status = vtpm_do_quote(group, NULL, NULL, &digest, &sel, extra_info_flags,NULL, NULL, PACK_BUF);
574 tpmcmd->resp_len += 256;
575
576 CMD_END;
577 }
578
vtpmmgr_GroupUpdate(tpmcmd_t * tpmcmd)579 static TPM_RESULT vtpmmgr_GroupUpdate(tpmcmd_t* tpmcmd)
580 {
581 CMD_BEGIN;
582 struct mem_group *group;
583 int i;
584 int hstart;
585 uint32_t nr_kerns, nr_seals;
586 uint64_t old_seq, new_seq;
587 struct mem_seal *seals = NULL;
588 tpmrsa_context saa_rsa = TPMRSA_CTX_INIT;
589 unsigned char digest[20];
590 TPM_RESULT rc;
591 void *sig, *seal_bits, *kern_bits;
592
593 UNPACK_GROUP(group);
594 UNPACK_IN(VPTR, &sig, 256, UNPACK_ALIAS);
595
596 // Hash starts here
597 hstart = in_pos;
598
599 new_seq = be64_native(*(be64_t*)(tpmcmd->req + in_pos));
600 old_seq = be64_native(group->details.cfg_seq);
601 in_pos += 8;
602 if (old_seq > new_seq) {
603 status = TPM_FAIL;
604 goto abort_egress;
605 }
606
607 UNPACK_IN(UINT32, &nr_seals);
608 UNPACK_IN(VPTR, &seal_bits, nr_seals * 20, UNPACK_ALIAS);
609
610 UNPACK_IN(UINT32, &nr_kerns);
611 UNPACK_IN(VPTR, &kern_bits, nr_kerns * 20, UNPACK_ALIAS);
612
613 // TODO handle saving larger lists on disk
614 if (nr_seals > NR_SEALS_PER_GROUP) {
615 status = TPM_SIZE;
616 goto abort_egress;
617 }
618
619 if (nr_kerns > NR_KERNS_PER_GROUP) {
620 status = TPM_SIZE;
621 goto abort_egress;
622 }
623
624 sha1(tpmcmd->req + hstart, in_pos - hstart, digest);
625
626 seals = calloc(nr_seals, sizeof(seals[0]));
627
628 for(i=0; i < nr_seals; i++) {
629 TPM_PCR_SELECTION sel;
630 UNPACK_IN(TPM_PCR_SELECTION, &sel, UNPACK_ALIAS);
631 memcpy(&seals[i].digest_release, seal_bits, 20);
632 seal_bits += 20;
633 if (sel.sizeOfSelect > 4) {
634 status = TPM_BAD_PARAMETER;
635 goto abort_egress;
636 }
637 seals[i].pcr_selection = native_le32(0);
638 memcpy(&seals[i].pcr_selection, sel.pcrSelect, sel.sizeOfSelect);
639 }
640
641 UNPACK_DONE();
642
643 tpmrsa_set_pubkey(&saa_rsa, group->id_data.saa_pubkey, 256, 0, 0);
644 rc = tpmrsa_sigcheck(&saa_rsa, sig, digest);
645 tpmrsa_free(&saa_rsa);
646 if (rc) {
647 printk("sigcheck failed: %d\n", rc);
648 status = rc;
649 goto abort_egress;
650 }
651
652 // Commit
653 free(group->seals);
654
655 memcpy(&group->seal_bits.kernels, kern_bits, 20 * nr_kerns);
656 group->details.cfg_seq = native_be64(new_seq);
657 group->nr_seals = nr_seals;
658 group->seals = seals;
659 group->seal_bits.nr_kerns = native_be32(nr_kerns);
660
661 seals = NULL;
662
663 group->flags &= ~MEM_GROUP_FLAG_SEAL_VALID;
664 if (group == g_mgr->groups[0].v)
665 g_mgr->root_seals_valid = 0;
666
667 // TODO use GROUP_KEY_UPDATE or MGR_KEY_UPDATE here?
668 // only required if this update was to address a potential key leak
669 vtpm_sync_group(group, SEQ_UPDATE);
670
671 abort_egress:
672 free(seals);
673
674 pack_TPM_RSP_HEADER(tpmcmd->resp, VTPM_TAG_RSP, tpmcmd->resp_len, status);
675 return status;
676 }
677
vtpmmgr_VtpmList(tpmcmd_t * tpmcmd)678 static TPM_RESULT vtpmmgr_VtpmList(tpmcmd_t* tpmcmd)
679 {
680 CMD_BEGIN;
681 struct mem_group *group;
682 uint32_t vtpm_offset;
683 int i, j;
684
685 UNPACK_GROUP(group);
686 UNPACK_IN(UINT32, &vtpm_offset);
687
688 PACK_OUT(UINT32, group->nr_vtpms);
689 if (vtpm_offset > group->nr_vtpms)
690 goto egress;
691
692 for(i=0; i < group->nr_pages; i++) {
693 struct mem_vtpm_page *pg = &group->data[i];
694 for(j=0; j < pg->size; j++) {
695 if (vtpm_offset) {
696 // TODO a proper seek would be far faster
697 vtpm_offset--;
698 continue;
699 }
700 memcpy(PACK_BUF, pg->vtpms[j]->uuid, 16);
701 tpmcmd->resp_len += 16;
702 if (tpmcmd->resp_len + 16 > TCPA_MAX_BUFFER_LENGTH)
703 goto egress;
704 }
705 }
706
707 egress:
708 CMD_END;
709 }
710
vtpmmgr_VtpmNew(tpmcmd_t * tpmcmd)711 static TPM_RESULT vtpmmgr_VtpmNew(tpmcmd_t* tpmcmd)
712 {
713 CMD_BEGIN;
714 struct mem_group *group;
715 struct mem_vtpm *vtpm;
716 uuid_t newuuid;
717 int rc;
718
719 UNPACK_GROUP(group);
720
721 // XXX allow non-random UUIDs for testing
722 if (tpmcmd->req_len == 14 + 16)
723 UNPACK_IN(BUFFER, newuuid, 16);
724 else
725 gen_random_uuid(newuuid);
726 UNPACK_DONE();
727
728 rc = create_vtpm(group, &vtpm, newuuid);
729 if (rc) {
730 status = TPM_FAIL;
731 goto abort_egress;
732 }
733 memset(vtpm->data, 0, 64);
734 vtpm_sync(group, vtpm);
735
736 PACK_OUT(BUFFER, newuuid, 16);
737 CMD_END;
738 }
739
vtpmmgr_VtpmDel(tpmcmd_t * tpmcmd)740 static TPM_RESULT vtpmmgr_VtpmDel(tpmcmd_t* tpmcmd)
741 {
742 CMD_BEGIN;
743 uuid_t uuid;
744 struct mem_group *group;
745 struct mem_vtpm *vtpm;
746 int rc;
747
748 UNPACK_IN(BUFFER, uuid, 16);
749 UNPACK_DONE();
750 rc = find_vtpm(&group, &vtpm, uuid);
751 if (rc) {
752 status = TPM_FAIL;
753 goto abort_egress;
754 }
755
756 if (vtpm->flags & VTPM_FLAG_OPEN) {
757 status = TPM_FAIL;
758 goto abort_egress;
759 }
760
761 delete_vtpm(group, vtpm);
762
763 CMD_END;
764 }
765
vtpmmgr_permcheck(struct tpm_opaque * opq)766 static int vtpmmgr_permcheck(struct tpm_opaque *opq)
767 {
768 if (!opq->vtpm)
769 return 1;
770 if (opq->vtpm->flags & VTPM_FLAG_ADMIN)
771 return 0;
772 return 1;
773 }
774
vtpmmgr_handle_cmd(struct tpm_opaque * opaque,tpmcmd_t * tpmcmd)775 TPM_RESULT vtpmmgr_handle_cmd(
776 struct tpm_opaque *opaque,
777 tpmcmd_t* tpmcmd)
778 {
779 TPM_RESULT status = TPM_SUCCESS;
780 TPM_TAG tag;
781 UINT32 size;
782 TPM_COMMAND_CODE ord;
783
784 unpack_TPM_RQU_HEADER(tpmcmd->req,
785 &tag, &size, &ord);
786
787 /* Handle the command now */
788 switch(tag) {
789 case VTPM_TAG_REQ:
790 // This is a vTPM command
791 switch(ord) {
792 case VTPM_ORD_SAVEHASHKEY:
793 return vtpmmgr_SaveHashKey(opaque, tpmcmd);
794 case VTPM_ORD_LOADHASHKEY:
795 return vtpmmgr_LoadHashKey(opaque, tpmcmd);
796 case VTPM_ORD_GET_BOOT_HASH:
797 return vtpmmgr_GetBootHash(opaque, tpmcmd);
798 case VTPM_ORD_GET_QUOTE:
799 return vtpmmgr_GetQuote(opaque, tpmcmd);
800 default:
801 vtpmlogerror(VTPM_LOG_VTPM, "Invalid vTPM Ordinal %" PRIu32 "\n", ord);
802 status = TPM_BAD_ORDINAL;
803 }
804 break;
805 case VTPM_TAG_REQ2:
806 // This is a management command
807 if (vtpmmgr_permcheck(opaque)) {
808 status = TPM_AUTHFAIL;
809 vtpmlogerror(VTPM_LOG_VTPM, "Rejected attempt to use management command from client\n");
810 break;
811 }
812 switch (ord) {
813 case VTPM_ORD_GROUP_LIST:
814 return vtpmmgr_GroupList(tpmcmd);
815 case VTPM_ORD_GROUP_NEW:
816 return vtpmmgr_GroupNew(tpmcmd);
817 case VTPM_ORD_GROUP_DEL:
818 return vtpmmgr_GroupDel(tpmcmd);
819 case VTPM_ORD_GROUP_ACTIVATE:
820 return vtpmmgr_GroupActivate(tpmcmd);
821 case VTPM_ORD_GROUP_REGISTER:
822 return vtpmmgr_GroupRegister(tpmcmd);
823 case VTPM_ORD_GROUP_UPDATE:
824 return vtpmmgr_GroupUpdate(tpmcmd);
825 case VTPM_ORD_GROUP_SHOW:
826 return vtpmmgr_GroupShow(tpmcmd);
827 case VTPM_ORD_VTPM_LIST:
828 return vtpmmgr_VtpmList(tpmcmd);
829 case VTPM_ORD_VTPM_NEW:
830 return vtpmmgr_VtpmNew(tpmcmd);
831 case VTPM_ORD_VTPM_DEL:
832 return vtpmmgr_VtpmDel(tpmcmd);
833 default:
834 vtpmlogerror(VTPM_LOG_VTPM, "Invalid TM Ordinal %" PRIu32 "\n", ord);
835 status = TPM_BAD_ORDINAL;
836 }
837 break;
838 case TPM_TAG_RQU_COMMAND:
839 case TPM_TAG_RQU_AUTH1_COMMAND:
840 case TPM_TAG_RQU_AUTH2_COMMAND:
841 //This is a TPM passthrough command
842 switch(ord) {
843 case TPM_ORD_GetRandom:
844 vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_GetRandom\n");
845 break;
846 case TPM_ORD_PcrRead:
847 vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_PcrRead\n");
848 // Quotes also need to be restricted to hide PCR values
849 break;
850 case TPM_ORD_Extend:
851 // TODO allow to certain clients? A malicious client
852 // could scramble PCRs and make future quotes invalid.
853 if (vtpmmgr_permcheck(opaque)) {
854 vtpmlogerror(VTPM_LOG_VTPM, "Disallowed TPM_Extend\n");
855 status = TPM_DISABLED_CMD;
856 goto abort_egress;
857 } else {
858 vtpmloginfo(VTPM_LOG_VTPM, "Passthrough: TPM_Extend\n");
859 }
860 break;
861 default:
862 vtpmlogerror(VTPM_LOG_VTPM, "TPM Disallowed Passthrough ord=%" PRIu32 "\n", ord);
863 status = TPM_DISABLED_CMD;
864 goto abort_egress;
865 }
866
867 size = TCPA_MAX_BUFFER_LENGTH;
868 TPMTRYRETURN(TPM_TransmitData(tpmcmd->req, tpmcmd->req_len, tpmcmd->resp, &size));
869 tpmcmd->resp_len = size;
870
871 return TPM_SUCCESS;
872 default:
873 vtpmlogerror(VTPM_LOG_VTPM, "Invalid tag=%" PRIu16 "\n", tag);
874 status = TPM_BADTAG;
875 }
876
877 abort_egress:
878 tpmcmd->resp_len = VTPM_COMMAND_HEADER_SIZE;
879 pack_TPM_RSP_HEADER(tpmcmd->resp, tag + 3, tpmcmd->resp_len, status);
880
881 return status;
882 }
883