1 #include <console.h>
2 #include <unistd.h>
3 #include <errno.h>
4 #include <string.h>
5 #include <inttypes.h>
6 #include <stdlib.h>
7 #include <stdbool.h>
8 #include <mini-os/byteorder.h>
9 
10 #include "vtpm_manager.h"
11 #include "log.h"
12 #include "uuid.h"
13 
14 #include "vtpmmgr.h"
15 #include "vtpm_disk.h"
16 #include "disk_tpm.h"
17 #include "disk_io.h"
18 #include "disk_crypto.h"
19 #include "disk_format.h"
20 #include "mgmt_authority.h"
21 
disk_write_crypt_sector(sector_t * dst,const void * data,size_t size,const struct mem_tpm_mgr * mgr)22 static void disk_write_crypt_sector(sector_t *dst, const void *data, size_t size, const struct mem_tpm_mgr *mgr)
23 {
24 	struct disk_crypt_sector_plain *sector = disk_write_buf();
25 	*dst = disk_find_free(mgr);
26 	aes_encrypt_ctr(sector->iv_data, sizeof(sector->iv_data), data, size, &mgr->tm_key_e);
27 	aes_cmac(&sector->mac, sector->data, sizeof(sector->data), &mgr->tm_key_e);
28 	disk_write_sector(*dst, sector, sizeof(*sector));
29 }
30 
31 /*
32  * Mark unchanged sectors on disk as being used
33  */
disk_populate_used_vtpm(const struct mem_vtpm_page * src,const struct mem_tpm_mgr * mgr)34 static void disk_populate_used_vtpm(const struct mem_vtpm_page *src, const struct mem_tpm_mgr *mgr)
35 {
36 	if (be32_native(src->disk_loc) != 0)
37 		disk_set_used(src->disk_loc, mgr);
38 }
39 
40 /*
41  * Write out a vTPM page to disk, doing nothing if the existing copy is valid
42  */
disk_write_vtpm_page(struct mem_vtpm_page * dst,const aes_context * auth_key,const struct mem_tpm_mgr * mgr)43 static void disk_write_vtpm_page(struct mem_vtpm_page *dst, const aes_context *auth_key,
44 		const struct mem_tpm_mgr *mgr)
45 {
46 	struct disk_vtpm_sector pt;
47 	int i;
48 	memset(&pt, 0, sizeof(pt));
49 	if (be32_native(dst->disk_loc) != 0)
50 		return;
51 
52 	for(i=0; i < dst->size; i++) {
53 		memcpy(pt.header[i].uuid, dst->vtpms[i]->uuid, 16);
54 		memcpy(pt.data[i].data, dst->vtpms[i]->data, 64);
55 		pt.header[i].flags = native_be32(dst->vtpms[i]->flags & VTPM_FLAG_DISK_MASK);
56 	}
57 	aes_encrypt_ctr(&pt.iv, sizeof(pt.data) + 16, &pt.data, sizeof(pt.data), auth_key);
58 
59 	sha256(&dst->disk_hash, &pt, sizeof(pt));
60 
61 	disk_write_crypt_sector(&dst->disk_loc, &pt, sizeof(pt), mgr);
62 }
63 
64 /*
65  * Generate TPM seal blobs for a group's keys; do nothing if existing copy is valid
66  */
generate_group_seals(struct mem_group * src,const struct mem_tpm_mgr * parent)67 static void generate_group_seals(struct mem_group *src, const struct mem_tpm_mgr *parent)
68 {
69 	int i;
70 	struct disk_group_sealed_data sblob;
71 
72 	// previous seals are still valid, skip talking to the TPM
73 	if (src->flags & MEM_GROUP_FLAG_SEAL_VALID)
74 		return;
75 
76 	memcpy(&sblob.magic, DISK_GROUP_BOUND_MAGIC, 4);
77 	memcpy(sblob.tpm_manager_uuid, parent->uuid, 16);
78 	memcpy(&sblob.aik_authdata, &src->aik_authdata, 20);
79 	memcpy(&sblob.group_key, &src->group_key, 16);
80 	memcpy(&sblob.rollback_mac_key, &src->rollback_mac_key, 16);
81 
82 	/* TODO support for more than NR_SEALS_PER_GROUP seals */
83 	if (src->nr_seals > NR_SEALS_PER_GROUP)
84 		abort();
85 
86 	for(i=0; i < src->nr_seals; i++) {
87 		struct disk_seal_entry *dst = &src->seal_bits.entry[i];
88 		dst->pcr_selection = src->seals[i].pcr_selection;
89 		memcpy(&dst->digest_release, &src->seals[i].digest_release, 20);
90 		TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
91 
92         /*TPM 2.0 bind | TPM 1.x seal*/
93         if (hw_is_tpm2())
94             TPM2_disk_bind(dst, &sblob, sizeof(sblob));
95         else
96             TPM_disk_seal(dst, &sblob, sizeof(sblob));
97 	}
98 	src->seal_bits.nr_cfgs = native_be32(src->nr_seals);
99 
100 	src->flags |= MEM_GROUP_FLAG_SEAL_VALID;
101 }
102 
103 /*
104  * Mark unchanged sectors on disk as being used
105  */
disk_populate_used_group(const struct mem_group_hdr * src,const struct mem_tpm_mgr * mgr)106 static void disk_populate_used_group(const struct mem_group_hdr *src, const struct mem_tpm_mgr *mgr)
107 {
108 	int i;
109 	struct mem_group *group = src->v;
110 	if (be32_native(src->disk_loc) != 0) {
111 		// entire group is unchanged - mark group, itree, and vtpm sectors
112 		// TODO mark other children (seal)
113 		disk_set_used(src->disk_loc, mgr);
114 		for(i = 0; i < src->disk_nr_inuse; i++)
115 			disk_set_used(src->disk_inuse[i], mgr);
116 		return;
117 	}
118 
119 	// unopened groups should never have been invalidated
120 	if (!group)
121 		abort();
122 
123 	for (i = 0; i < group->nr_pages; i++)
124 		disk_populate_used_vtpm(&group->data[i], mgr);
125 }
126 
127 static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
128 		struct hash256 *hash, sector_t *loc, int hsize,
129 		const aes_context *group_key, const struct mem_tpm_mgr *mgr);
130 
disk_write_vtpm_itree(struct mem_group_hdr * hdr,int base,int nr_entries,struct hash256 * hash,sector_t * loc,int hsize,const aes_context * group_key,const struct mem_tpm_mgr * mgr)131 static void disk_write_vtpm_itree(struct mem_group_hdr *hdr, int base, int nr_entries,
132 		struct hash256 *hash, sector_t *loc, int hsize,
133 		const aes_context *group_key, const struct mem_tpm_mgr *mgr)
134 {
135 	int i, incr = 1, inuse_base, lsize;
136 
137 	while (nr_entries > incr * hsize)
138 		incr *= NR_ENTRIES_PER_ITREE;
139 
140 	if (nr_entries <= hsize) {
141 		struct mem_group *group = hdr->v;
142 		for (i = 0; i < nr_entries; i++) {
143 			struct mem_vtpm_page *page = group->data + base + i;
144 			disk_write_vtpm_page(page, group_key, mgr);
145 			loc[i] = page->disk_loc;
146 			hash[i] = page->disk_hash;
147 		}
148 	} else {
149 		for (i = 0; i * incr < nr_entries; i++) {
150 			struct disk_itree_sector pt;
151 			int child_entries = incr;
152 
153 			// the last sector is not completely full
154 			if (nr_entries - i * incr < incr)
155 				child_entries = nr_entries - i * incr;
156 
157 			disk_write_vtpm_itree(hdr, base, child_entries, pt.hash, pt.location,
158 					NR_ENTRIES_PER_ITREE, group_key, mgr);
159 
160 			sha256(&hash[i], &pt.hash, sizeof(pt.hash));
161 			disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
162 
163 			base += incr;
164 		}
165 	}
166 
167 	// save the list of used sectors (itree and vtpm) in the header
168 	inuse_base = hdr->disk_nr_inuse;
169 	lsize = 1 + (nr_entries - 1) / incr;
170 	hdr->disk_nr_inuse += lsize;
171 	hdr->disk_inuse = realloc(hdr->disk_inuse, hdr->disk_nr_inuse * sizeof(sector_t));
172 	memcpy(&hdr->disk_inuse[inuse_base], loc, lsize * sizeof(sector_t));
173 }
174 
175 /*
176  * Write out a vTPM group sector and its children
177  */
disk_write_group_sector(struct mem_group_hdr * src,const struct mem_tpm_mgr * mgr)178 static void disk_write_group_sector(struct mem_group_hdr *src,
179 		const struct mem_tpm_mgr *mgr)
180 {
181 	struct disk_group_sector disk;
182 	struct mem_group *group = src->v;
183 	aes_context key_e;
184 
185 	/* Don't write if the data hasn't changed */
186 	if (be32_native(src->disk_loc) != 0)
187 		return;
188 
189 	// if the group was not opened, it should not have been changed
190 	if (!group)
191 		abort();
192 
193 	memset(&disk, 0, sizeof(disk));
194 	memcpy(&disk.v.id_data, &group->id_data, sizeof(disk.v.id_data));
195 	memcpy(&disk.v.details, &group->details, sizeof(disk.v.details));
196 
197 	aes_setup(&key_e, &group->group_key);
198 
199 	disk.v.nr_vtpms = native_be32(group->nr_vtpms);
200 
201 	// regenerated
202 	src->disk_nr_inuse = 0;
203 
204 	disk_write_vtpm_itree(src, 0, group->nr_pages, disk.v.vtpm_hash, disk.vtpm_location,
205 			NR_ENTRIES_PER_GROUP_BASE, &key_e, mgr);
206 
207 	generate_group_seals(group, mgr);
208 	memcpy(&disk.v.boot_configs, &group->seal_bits, sizeof(group->seal_bits));
209 
210 	aes_cmac(&disk.group_mac, &disk.v, sizeof(disk.v), &key_e);
211 	sha256(&src->disk_hash, &disk.v, sizeof(disk.v) + sizeof(disk.group_mac));
212 	disk_write_crypt_sector(&src->disk_loc, &disk, sizeof(disk), mgr);
213 }
214 
215 /*
216  * Write TPM seal blobs for the manager's keys, using the given group's list
217  * of valid configurations
218  */
disk_write_seal_list(struct mem_tpm_mgr * mgr,struct mem_group * group)219 static void disk_write_seal_list(struct mem_tpm_mgr *mgr, struct mem_group *group)
220 {
221 	int i;
222 	struct disk_seal_list *seal = disk_write_buf();
223 	struct disk_root_sealed_data sblob;
224 
225 	if (mgr->root_seals_valid & (1 + mgr->active_root))
226 		return;
227 
228 	memcpy(&sblob.magic, DISK_ROOT_BOUND_MAGIC, 4);
229 	memcpy(sblob.tpm_manager_uuid, mgr->uuid, 16);
230 	memcpy(&sblob.nvram_slot, &mgr->nvram_slot, 4);
231 	memcpy(&sblob.nvram_auth, &mgr->nvram_auth, 20);
232 	memcpy(&sblob.counter_index, &mgr->counter_index, 4);
233 	memcpy(&sblob.counter_auth, &mgr->counter_auth, 20);
234 
235 	// TODO when an NV slot in the physical TPM is used to populate nv_key,
236 	// that value should be used to mask the master key so that the value
237 	// can be changed to revoke old disk state
238 #if 0
239 	aes_encrypt_one(&sblob.tm_key, &mgr->tm_key, &mgr->nv_key);
240 #else
241 	memcpy(&sblob.tm_key, &mgr->tm_key, 16);
242 #endif
243 
244 	memset(seal, 0, sizeof(*seal));
245 	seal->length = native_be32(group->nr_seals);
246 
247 	// TODO support for more entries
248 	if (group->nr_seals > SEALS_PER_ROOT_SEAL_LIST)
249 		abort();
250 
251 	for(i=0; i < group->nr_seals; i++) {
252 		struct mem_seal *src = &group->seals[i];
253 		struct disk_seal_entry *dst = &seal->entry[i];
254 		dst->pcr_selection = src->pcr_selection;
255 		memcpy(&dst->digest_release, &src->digest_release, 20);
256 		TPM_pcr_digest(&dst->digest_at_seal, dst->pcr_selection);
257 
258         /*TPM 2.0 bind / TPM 1.x seal*/
259         if (hw_is_tpm2())
260             TPM2_disk_bind(dst, &sblob, sizeof(sblob));
261         else
262             TPM_disk_seal(dst, &sblob, sizeof(sblob));
263 	}
264 
265 	memcpy(seal->hdr.magic, TPM_MGR_MAGIC, 12);
266 	seal->hdr.version = native_be32(TPM_MGR_VERSION);
267 
268 	disk_write_sector(seal_loc(mgr), seal, sizeof(*seal));
269 	mgr->root_seals_valid |= 1 + mgr->active_root;
270 }
271 
272 /*
273  * Mark unchanged sectors on disk as being used
274  */
disk_populate_used_mgr(const struct mem_tpm_mgr * mgr)275 static void disk_populate_used_mgr(const struct mem_tpm_mgr *mgr)
276 {
277 	int i;
278 
279 	// TODO walk the linked lists for seals, rb_macs here (when supported)
280 
281 	for(i=0; i < mgr->nr_groups; i++)
282 		disk_populate_used_group(&mgr->groups[i], mgr);
283 }
284 
285 static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
286 		struct hash256 *hash, sector_t *loc, int hsize);
287 
disk_write_group_itree(struct mem_tpm_mgr * mgr,int base,int nr_entries,struct hash256 * hash,sector_t * loc,int hsize)288 static void disk_write_group_itree(struct mem_tpm_mgr *mgr, int base, int nr_entries,
289 		struct hash256 *hash, sector_t *loc, int hsize)
290 {
291 	int i, incr = 1;
292 
293 	if (nr_entries <= hsize) {
294 		for(i=0; i < mgr->nr_groups; i++) {
295 			struct mem_group_hdr *group = mgr->groups + base + i;
296 			disk_write_group_sector(group, mgr);
297 			loc[i] = group->disk_loc;
298 			hash[i] = group->disk_hash;
299 		}
300 		return;
301 	}
302 
303 	while (nr_entries > incr * hsize)
304 		incr *= NR_ENTRIES_PER_ITREE;
305 
306 	for (i = 0; i * incr < nr_entries; i++) {
307 		struct disk_itree_sector pt;
308 		int child_entries = incr;
309 
310 		// the last sector is not completely full
311 		if (nr_entries - i * incr < incr)
312 			child_entries = nr_entries - i * incr;
313 
314 		disk_write_group_itree(mgr, base, child_entries, pt.hash, pt.location, NR_ENTRIES_PER_ITREE);
315 
316 		sha256(&hash[i], &pt.hash, sizeof(pt.hash));
317 		disk_write_crypt_sector(&loc[i], &pt, sizeof(pt), mgr);
318 
319 		base += incr;
320 	}
321 }
322 
323 /*
324  * Write out the root TPM Manager sector and its children
325  */
disk_write_root_sector(struct mem_tpm_mgr * mgr)326 static void disk_write_root_sector(struct mem_tpm_mgr *mgr)
327 {
328 	int i, j;
329 	struct disk_root_sector root;
330 	memset(&root, 0, sizeof(root));
331 	root.v.sequence = native_be64(mgr->sequence);
332 	root.v.tpm_counter_value = mgr->counter_value;
333 
334 	root.v.nr_groups = native_be32(mgr->nr_groups);
335 
336 	disk_write_group_itree(mgr, 0, mgr->nr_groups, root.v.group_hash, root.group_loc, NR_ENTRIES_PER_ROOT);
337 
338 	i = 0;
339 	j = 0;
340 	while (i < mgr->nr_groups) {
341 		aes_context key_e;
342 		struct mem_group_hdr *group = &mgr->groups[i];
343 		struct mem_group *groupv = group->v;
344 
345 		if (!groupv) {
346 			i++;
347 			continue;
348 		}
349 		if (groupv->details.flags.value & FLAG_ROLLBACK_DETECTED) {
350 			i++;
351 			continue;
352 		}
353 		if (j >= NR_RB_MACS_PER_ROOT)
354 			break; // TODO support for nr_rb_macs > 128
355 
356 		aes_setup(&key_e, &groupv->rollback_mac_key);
357 		root.rb_macs[j].id = native_be32(i);
358 		aes_cmac(&root.rb_macs[j].mac, &root.v, sizeof(root.v), &key_e);
359 		i++; j++;
360 	}
361 	root.nr_rb_macs = native_be32(j);
362 
363 	struct disk_crypt_sector_plain *root_sect = disk_write_buf();
364 	aes_encrypt_ctr(root_sect->iv_data, sizeof(root_sect->iv_data), &root, sizeof(root), &mgr->tm_key_e);
365 	aes_cmac(&root_sect->mac, &root_sect->data, sizeof(root_sect->data), &mgr->tm_key_e);
366 	disk_write_sector(root_loc(mgr), root_sect, sizeof(*root_sect));
367 }
368 
369 /*
370  * Write out changes to disk
371  */
disk_write_all(struct mem_tpm_mgr * mgr)372 void disk_write_all(struct mem_tpm_mgr *mgr)
373 {
374 	disk_flush_slot(mgr);
375 	disk_populate_used_mgr(mgr);
376 	disk_write_root_sector(mgr);
377 
378 	disk_write_seal_list(mgr, mgr->groups[0].v);
379 
380 	disk_write_barrier();
381 }
382 
383 /*
384  * Create a new (blank) TPM Manager disk image.
385  *
386  * Does not actually write anything to disk.
387  */
vtpm_new_disk(void)388 int vtpm_new_disk(void)
389 {
390 	int rc;
391 	struct mem_tpm_mgr *mgr = calloc(1, sizeof(*mgr));
392 
393 	do_random(mgr->uuid, 16);
394 	do_random(&mgr->tm_key, 16);
395 	do_random(&mgr->nvram_auth, 20);
396 	do_random(&mgr->counter_auth, 20);
397 	do_random(&mgr->nv_key, 16);
398 
399 	aes_setup(&mgr->tm_key_e, &mgr->tm_key);
400 
401 	// TODO postpone these allocs until first write?
402 	rc = TPM_disk_nvalloc(&mgr->nvram_slot, mgr->nvram_auth);
403 	if (rc)
404 		return rc;
405 
406 	rc = TPM_disk_alloc_counter(&mgr->counter_index, mgr->counter_auth, &mgr->counter_value);
407 	if (rc)
408 		return rc;
409 
410 	mgr->nr_groups = 1;
411 	mgr->groups = calloc(1, sizeof(mgr->groups[0]));
412 	mgr->groups[0].v = vtpm_new_group(NULL);
413 
414 	TPM_disk_nvwrite(&mgr->nv_key, 16, mgr->nvram_slot, mgr->nvram_auth);
415 
416 	g_mgr = mgr;
417 
418 	return 0;
419 }
420