1 /*
2 * Implementation of the kernel access vector cache (AVC).
3 *
4 * Authors: Stephen Smalley, <sds@epoch.ncsc.mil>
5 * James Morris <jmorris@redhat.com>
6 *
7 * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com>
8 * Replaced the avc_lock spinlock by RCU.
9 *
10 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2,
14 * as published by the Free Software Foundation.
15 */
16
17 /* Ported to Xen 3.0, George Coker, <gscoker@alpha.ncsc.mil> */
18
19 #include <xen/lib.h>
20 #include <xen/xmalloc.h>
21 #include <xen/types.h>
22 #include <xen/list.h>
23 #include <xen/spinlock.h>
24 #include <xen/prefetch.h>
25 #include <xen/kernel.h>
26 #include <xen/sched.h>
27 #include <xen/init.h>
28 #include <xen/rcupdate.h>
29 #include <asm/atomic.h>
30 #include <asm/current.h>
31 #include <public/xsm/flask_op.h>
32
33 #include "avc.h"
34 #include "avc_ss.h"
35
36 static const struct av_perm_to_string av_perm_to_string[] = {
37 #define S_(c, v, s) { c, v, s },
38 #include "av_perm_to_string.h"
39 #undef S_
40 };
41
42 static const char *class_to_string[] = {
43 #define S_(s) s,
44 #include "class_to_string.h"
45 #undef S_
46 };
47
48 const struct selinux_class_perm selinux_class_perm = {
49 .av_perm_to_string = av_perm_to_string,
50 .av_pts_len = ARRAY_SIZE(av_perm_to_string),
51 .class_to_string = class_to_string,
52 .cts_len = ARRAY_SIZE(class_to_string),
53 };
54
55 #define AVC_CACHE_SLOTS 512
56 #define AVC_DEF_CACHE_THRESHOLD 512
57 #define AVC_CACHE_RECLAIM 16
58
59 #ifdef CONFIG_FLASK_AVC_STATS
60 #define avc_cache_stats_incr(field) \
61 do { \
62 __get_cpu_var(avc_cache_stats).field++; \
63 } while (0)
64 #else
65 #define avc_cache_stats_incr(field) do {} while (0)
66 #endif
67
68 struct avc_entry {
69 u32 ssid;
70 u32 tsid;
71 u16 tclass;
72 struct av_decision avd;
73 };
74
75 struct avc_node {
76 struct avc_entry ae;
77 struct hlist_node list; /* anchored in avc_cache->slots[i] */
78 struct rcu_head rhead;
79 };
80
81 struct avc_cache {
82 struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
83 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
84 atomic_t lru_hint; /* LRU hint for reclaim scan */
85 atomic_t active_nodes;
86 u32 latest_notif; /* latest revocation notification */
87 };
88
89 /* Exported via Flask hypercall */
90 unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD;
91
92 #ifdef CONFIG_FLASK_AVC_STATS
93 DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats);
94 #endif
95
96 static struct avc_cache avc_cache;
97
98 static DEFINE_RCU_READ_LOCK(avc_rcu_lock);
99
avc_hash(u32 ssid,u32 tsid,u16 tclass)100 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
101 {
102 return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
103 }
104
105 /* no use making this larger than the printk buffer */
106 #define AVC_BUF_SIZE 1024
107 static DEFINE_SPINLOCK(avc_emerg_lock);
108 static char avc_emerg_buf[AVC_BUF_SIZE];
109
110 struct avc_dump_buf {
111 char *start;
112 char *pos;
113 u32 free;
114 };
115
avc_printk(struct avc_dump_buf * buf,const char * fmt,...)116 static void avc_printk(struct avc_dump_buf *buf, const char *fmt, ...)
117 {
118 int i;
119 va_list args;
120
121 again:
122 va_start(args, fmt);
123 i = vsnprintf(buf->pos, buf->free, fmt, args);
124 va_end(args);
125 if ( i < buf->free )
126 {
127 buf->pos += i;
128 buf->free -= i;
129 }
130 else if ( buf->free < AVC_BUF_SIZE )
131 {
132 buf->pos[0] = 0;
133 printk("%s", buf->start);
134 buf->pos = buf->start;
135 buf->free = AVC_BUF_SIZE;
136 goto again;
137 }
138 else
139 {
140 printk("%s", buf->start);
141 printk("\navc_printk: overflow\n");
142 buf->pos = buf->start;
143 buf->free = AVC_BUF_SIZE;
144 }
145 }
146
147 /**
148 * avc_dump_av - Display an access vector in human-readable form.
149 * @tclass: target security class
150 * @av: access vector
151 */
avc_dump_av(struct avc_dump_buf * buf,u16 tclass,u32 av)152 static void avc_dump_av(struct avc_dump_buf *buf, u16 tclass, u32 av)
153 {
154 int i, i2, perm;
155
156 if ( av == 0 )
157 {
158 avc_printk(buf, " null");
159 return;
160 }
161
162 avc_printk(buf, " {");
163 i = 0;
164 perm = 1;
165
166 while ( i < sizeof(av) * 8 )
167 {
168 if ( perm & av )
169 {
170 for ( i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++ )
171 {
172 if ( (av_perm_to_string[i2].tclass == tclass) &&
173 (av_perm_to_string[i2].value == perm) )
174 break;
175 }
176 if ( i2 < ARRAY_SIZE(av_perm_to_string) )
177 {
178 avc_printk(buf, " %s", av_perm_to_string[i2].name);
179 av &= ~perm;
180 }
181 }
182 i++;
183 perm <<= 1;
184 }
185
186 if ( av )
187 avc_printk(buf, " %#x", av);
188
189 avc_printk(buf, " }");
190 }
191
192 /**
193 * avc_dump_query - Display a SID pair and a class in human-readable form.
194 * @ssid: source security identifier
195 * @tsid: target security identifier
196 * @tclass: target security class
197 */
avc_dump_query(struct avc_dump_buf * buf,u32 ssid,u32 tsid,u16 tclass)198 static void avc_dump_query(struct avc_dump_buf *buf, u32 ssid, u32 tsid, u16 tclass)
199 {
200 int rc;
201 char *scontext;
202 u32 scontext_len;
203
204 rc = security_sid_to_context(ssid, &scontext, &scontext_len);
205 if ( rc )
206 avc_printk(buf, "ssid=%d", ssid);
207 else
208 {
209 avc_printk(buf, "scontext=%s", scontext);
210 xfree(scontext);
211 }
212
213 rc = security_sid_to_context(tsid, &scontext, &scontext_len);
214 if ( rc )
215 avc_printk(buf, " tsid=%d", tsid);
216 else
217 {
218 avc_printk(buf, " tcontext=%s", scontext);
219 xfree(scontext);
220 }
221
222 avc_printk(buf, " tclass=%s", class_to_string[tclass]);
223 }
224
225 /**
226 * avc_init - Initialize the AVC.
227 *
228 * Initialize the access vector cache.
229 */
avc_init(void)230 void __init avc_init(void)
231 {
232 int i;
233
234 for ( i = 0; i < AVC_CACHE_SLOTS; i++ )
235 {
236 INIT_HLIST_HEAD(&avc_cache.slots[i]);
237 spin_lock_init(&avc_cache.slots_lock[i]);
238 }
239 atomic_set(&avc_cache.active_nodes, 0);
240 atomic_set(&avc_cache.lru_hint, 0);
241 }
242
avc_get_hash_stats(struct xen_flask_hash_stats * arg)243 int avc_get_hash_stats(struct xen_flask_hash_stats *arg)
244 {
245 int i, chain_len, max_chain_len, slots_used;
246 struct avc_node *node;
247 struct hlist_head *head;
248
249 rcu_read_lock(&avc_rcu_lock);
250
251 slots_used = 0;
252 max_chain_len = 0;
253 for ( i = 0; i < AVC_CACHE_SLOTS; i++ )
254 {
255 head = &avc_cache.slots[i];
256 if ( !hlist_empty(head) )
257 {
258 struct hlist_node *next;
259
260 slots_used++;
261 chain_len = 0;
262 hlist_for_each_entry_rcu(node, next, head, list)
263 chain_len++;
264 if ( chain_len > max_chain_len )
265 max_chain_len = chain_len;
266 }
267 }
268
269 rcu_read_unlock(&avc_rcu_lock);
270
271 arg->entries = atomic_read(&avc_cache.active_nodes);
272 arg->buckets_used = slots_used;
273 arg->buckets_total = AVC_CACHE_SLOTS;
274 arg->max_chain_len = max_chain_len;
275
276 return 0;
277 }
278
avc_node_free(struct rcu_head * rhead)279 static void avc_node_free(struct rcu_head *rhead)
280 {
281 struct avc_node *node = container_of(rhead, struct avc_node, rhead);
282 xfree(node);
283 avc_cache_stats_incr(frees);
284 }
285
avc_node_delete(struct avc_node * node)286 static void avc_node_delete(struct avc_node *node)
287 {
288 hlist_del_rcu(&node->list);
289 call_rcu(&node->rhead, avc_node_free);
290 atomic_dec(&avc_cache.active_nodes);
291 }
292
avc_node_kill(struct avc_node * node)293 static void avc_node_kill(struct avc_node *node)
294 {
295 xfree(node);
296 avc_cache_stats_incr(frees);
297 atomic_dec(&avc_cache.active_nodes);
298 }
299
avc_node_replace(struct avc_node * new,struct avc_node * old)300 static void avc_node_replace(struct avc_node *new, struct avc_node *old)
301 {
302 hlist_replace_rcu(&old->list, &new->list);
303 call_rcu(&old->rhead, avc_node_free);
304 atomic_dec(&avc_cache.active_nodes);
305 }
306
avc_reclaim_node(void)307 static inline int avc_reclaim_node(void)
308 {
309 struct avc_node *node;
310 int hvalue, try, ecx;
311 unsigned long flags;
312 struct hlist_head *head;
313 struct hlist_node *next;
314 spinlock_t *lock;
315
316 for ( try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ )
317 {
318 atomic_inc(&avc_cache.lru_hint);
319 hvalue = atomic_read(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
320 head = &avc_cache.slots[hvalue];
321 lock = &avc_cache.slots_lock[hvalue];
322
323 spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flags);
324 rcu_read_lock(&avc_rcu_lock);
325 hlist_for_each_entry(node, next, head, list)
326 {
327 avc_node_delete(node);
328 avc_cache_stats_incr(reclaims);
329 ecx++;
330 if ( ecx >= AVC_CACHE_RECLAIM )
331 {
332 rcu_read_unlock(&avc_rcu_lock);
333 spin_unlock_irqrestore(lock, flags);
334 goto out;
335 }
336 }
337 rcu_read_unlock(&avc_rcu_lock);
338 spin_unlock_irqrestore(lock, flags);
339 }
340 out:
341 return ecx;
342 }
343
avc_alloc_node(void)344 static struct avc_node *avc_alloc_node(void)
345 {
346 struct avc_node *node;
347
348 node = xzalloc(struct avc_node);
349 if (!node)
350 goto out;
351
352 INIT_RCU_HEAD(&node->rhead);
353 INIT_HLIST_NODE(&node->list);
354 avc_cache_stats_incr(allocations);
355
356 atomic_inc(&avc_cache.active_nodes);
357 if ( atomic_read(&avc_cache.active_nodes) > avc_cache_threshold )
358 avc_reclaim_node();
359
360 out:
361 return node;
362 }
363
avc_node_populate(struct avc_node * node,u32 ssid,u32 tsid,u16 tclass,struct av_decision * avd)364 static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid,
365 u16 tclass, struct av_decision *avd)
366 {
367 node->ae.ssid = ssid;
368 node->ae.tsid = tsid;
369 node->ae.tclass = tclass;
370 memcpy(&node->ae.avd, avd, sizeof(node->ae.avd));
371 }
372
avc_search_node(u32 ssid,u32 tsid,u16 tclass)373 static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
374 {
375 struct avc_node *node, *ret = NULL;
376 int hvalue;
377 struct hlist_head *head;
378 struct hlist_node *next;
379
380 hvalue = avc_hash(ssid, tsid, tclass);
381 head = &avc_cache.slots[hvalue];
382 hlist_for_each_entry_rcu(node, next, head, list)
383 {
384 if ( ssid == node->ae.ssid &&
385 tclass == node->ae.tclass &&
386 tsid == node->ae.tsid )
387 {
388 ret = node;
389 break;
390 }
391 }
392
393 return ret;
394 }
395
396 /**
397 * avc_lookup - Look up an AVC entry.
398 * @ssid: source security identifier
399 * @tsid: target security identifier
400 * @tclass: target security class
401 * @requested: requested permissions, interpreted based on @tclass
402 *
403 * Look up an AVC entry that is valid for the
404 * @requested permissions between the SID pair
405 * (@ssid, @tsid), interpreting the permissions
406 * based on @tclass. If a valid AVC entry exists,
407 * then this function return the avc_node.
408 * Otherwise, this function returns NULL.
409 */
avc_lookup(u32 ssid,u32 tsid,u16 tclass)410 static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
411 {
412 struct avc_node *node;
413
414 avc_cache_stats_incr(lookups);
415 node = avc_search_node(ssid, tsid, tclass);
416
417 if ( node )
418 avc_cache_stats_incr(hits);
419 else
420 avc_cache_stats_incr(misses);
421
422 return node;
423 }
424
avc_latest_notif_update(int seqno,int is_insert)425 static int avc_latest_notif_update(int seqno, int is_insert)
426 {
427 int ret = 0;
428 static DEFINE_SPINLOCK(notif_lock);
429 unsigned long flag;
430
431 spin_lock_irqsave(¬if_lock, flag);
432 if ( is_insert )
433 {
434 if ( seqno < avc_cache.latest_notif )
435 {
436 printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n",
437 seqno, avc_cache.latest_notif);
438 ret = -EAGAIN;
439 }
440 }
441 else
442 {
443 if ( seqno > avc_cache.latest_notif )
444 avc_cache.latest_notif = seqno;
445 }
446 spin_unlock_irqrestore(¬if_lock, flag);
447
448 return ret;
449 }
450
451 /**
452 * avc_insert - Insert an AVC entry.
453 * @ssid: source security identifier
454 * @tsid: target security identifier
455 * @tclass: target security class
456 * @ae: AVC entry
457 *
458 * Insert an AVC entry for the SID pair
459 * (@ssid, @tsid) and class @tclass.
460 * The access vectors and the sequence number are
461 * normally provided by the security server in
462 * response to a security_compute_av() call. If the
463 * sequence number @ae->avd.seqno is not less than the latest
464 * revocation notification, then the function copies
465 * the access vectors into a cache entry, returns
466 * avc_node inserted. Otherwise, this function returns NULL.
467 */
avc_insert(u32 ssid,u32 tsid,u16 tclass,struct av_decision * avd)468 static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass,
469 struct av_decision *avd)
470 {
471 struct avc_node *pos, *node = NULL;
472 int hvalue;
473 unsigned long flag;
474
475 if ( avc_latest_notif_update(avd->seqno, 1) )
476 goto out;
477
478 node = avc_alloc_node();
479 if ( node )
480 {
481 struct hlist_head *head;
482 struct hlist_node *next;
483 spinlock_t *lock;
484
485 hvalue = avc_hash(ssid, tsid, tclass);
486 avc_node_populate(node, ssid, tsid, tclass, avd);
487
488 head = &avc_cache.slots[hvalue];
489 lock = &avc_cache.slots_lock[hvalue];
490
491 spin_lock_irqsave(lock, flag);
492 hlist_for_each_entry(pos, next, head, list)
493 {
494 if ( pos->ae.ssid == ssid &&
495 pos->ae.tsid == tsid &&
496 pos->ae.tclass == tclass )
497 {
498 avc_node_replace(node, pos);
499 goto found;
500 }
501 }
502 hlist_add_head_rcu(&node->list, head);
503 found:
504 spin_unlock_irqrestore(lock, flag);
505 }
506 out:
507 return node;
508 }
509
510 /**
511 * avc_audit - Audit the granting or denial of permissions.
512 * @ssid: source security identifier
513 * @tsid: target security identifier
514 * @tclass: target security class
515 * @requested: requested permissions
516 * @avd: access vector decisions
517 * @result: result from avc_has_perm_noaudit
518 * @a: auxiliary audit data
519 *
520 * Audit the granting or denial of permissions in accordance
521 * with the policy. This function is typically called by
522 * avc_has_perm() after a permission check, but can also be
523 * called directly by callers who use avc_has_perm_noaudit()
524 * in order to separate the permission check from the auditing.
525 * For example, this separation is useful when the permission check must
526 * be performed under a lock, to allow the lock to be released
527 * before calling the auditing code.
528 */
avc_audit(u32 ssid,u32 tsid,u16 tclass,u32 requested,struct av_decision * avd,int result,struct avc_audit_data * a)529 void avc_audit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
530 struct av_decision *avd, int result, struct avc_audit_data *a)
531 {
532 struct domain *cdom = current->domain;
533 u32 denied, audited;
534 struct avc_dump_buf buf;
535
536 denied = requested & ~avd->allowed;
537 if ( denied )
538 {
539 audited = denied;
540 if ( !(audited & avd->auditdeny) )
541 return;
542 }
543 else if ( result )
544 {
545 audited = denied = requested;
546 }
547 else
548 {
549 audited = requested;
550 if ( !(audited & avd->auditallow) )
551 return;
552 }
553 buf.start = xmalloc_bytes(AVC_BUF_SIZE);
554 if ( !buf.start )
555 {
556 spin_lock(&avc_emerg_lock);
557 buf.start = avc_emerg_buf;
558 }
559 buf.pos = buf.start;
560 buf.free = AVC_BUF_SIZE;
561
562 avc_printk(&buf, "avc: %s ", denied ? "denied" : "granted");
563 avc_dump_av(&buf, tclass, audited);
564 avc_printk(&buf, " for ");
565
566 if ( a && (a->sdom || a->tdom) )
567 {
568 if ( a->sdom && a->tdom && a->sdom != a->tdom )
569 avc_printk(&buf, "domid=%d target=%d ", a->sdom->domain_id, a->tdom->domain_id);
570 else if ( a->sdom )
571 avc_printk(&buf, "domid=%d ", a->sdom->domain_id);
572 else
573 avc_printk(&buf, "target=%d ", a->tdom->domain_id);
574 }
575 else if ( cdom )
576 avc_printk(&buf, "domid=%d ", cdom->domain_id);
577 switch ( a ? a->type : 0 ) {
578 case AVC_AUDIT_DATA_DEV:
579 avc_printk(&buf, "device=%#lx ", a->device);
580 break;
581 case AVC_AUDIT_DATA_IRQ:
582 avc_printk(&buf, "irq=%d ", a->irq);
583 break;
584 case AVC_AUDIT_DATA_RANGE:
585 avc_printk(&buf, "range=%#lx-%#lx ", a->range.start, a->range.end);
586 break;
587 case AVC_AUDIT_DATA_MEMORY:
588 avc_printk(&buf, "pte=%#lx mfn=%#lx ", a->memory.pte, a->memory.mfn);
589 break;
590 case AVC_AUDIT_DATA_DTDEV:
591 avc_printk(&buf, "dtdevice=%s ", a->dtdev);
592 break;
593 }
594
595 avc_dump_query(&buf, ssid, tsid, tclass);
596 avc_printk(&buf, "\n");
597 printk("%s", buf.start);
598
599 if ( buf.start == avc_emerg_buf )
600 spin_unlock(&avc_emerg_lock);
601 else
602 xfree(buf.start);
603 }
604
605 /**
606 * avc_update_node Update an AVC entry
607 * @event : Updating event
608 * @perms : Permission mask bits
609 * @ssid,@tsid,@tclass : identifier of an AVC entry
610 *
611 * if a valid AVC entry doesn't exist,this function returns -ENOENT.
612 * if kmalloc() called internal returns NULL, this function returns -ENOMEM.
613 * otherwise, this function update the AVC entry. The original AVC-entry object
614 * will release later by RCU.
615 */
avc_update_node(u32 perms,u32 ssid,u32 tsid,u16 tclass,u32 seqno)616 static int avc_update_node(u32 perms, u32 ssid, u32 tsid, u16 tclass,
617 u32 seqno)
618 {
619 int hvalue, rc = 0;
620 unsigned long flag;
621 struct avc_node *pos, *node, *orig = NULL;
622 struct hlist_head *head;
623 struct hlist_node *next;
624 spinlock_t *lock;
625
626 node = avc_alloc_node();
627 if ( !node )
628 {
629 rc = -ENOMEM;
630 goto out;
631 }
632
633 hvalue = avc_hash(ssid, tsid, tclass);
634
635 head = &avc_cache.slots[hvalue];
636 lock = &avc_cache.slots_lock[hvalue];
637
638 spin_lock_irqsave(lock, flag);
639
640 hlist_for_each_entry(pos, next, head, list)
641 {
642 if ( ssid == pos->ae.ssid &&
643 tsid == pos->ae.tsid &&
644 tclass == pos->ae.tclass &&
645 seqno == pos->ae.avd.seqno )
646 {
647 orig = pos;
648 break;
649 }
650 }
651
652 if ( !orig )
653 {
654 rc = -ENOENT;
655 avc_node_kill(node);
656 goto out_unlock;
657 }
658
659 /*
660 * Copy and replace original node.
661 */
662
663 avc_node_populate(node, ssid, tsid, tclass, &orig->ae.avd);
664
665 node->ae.avd.allowed |= perms;
666 avc_node_replace(node, orig);
667 out_unlock:
668 spin_unlock_irqrestore(lock, flag);
669 out:
670 return rc;
671 }
672
673 /**
674 * avc_ss_reset - Flush the cache and revalidate migrated permissions.
675 * @seqno: policy sequence number
676 */
avc_ss_reset(u32 seqno)677 int avc_ss_reset(u32 seqno)
678 {
679 int i, rc = 0;
680 unsigned long flag;
681 struct avc_node *node;
682 struct hlist_head *head;
683 struct hlist_node *next;
684 spinlock_t *lock;
685
686 for ( i = 0; i < AVC_CACHE_SLOTS; i++ )
687 {
688 head = &avc_cache.slots[i];
689 lock = &avc_cache.slots_lock[i];
690
691 spin_lock_irqsave(lock, flag);
692 rcu_read_lock(&avc_rcu_lock);
693 hlist_for_each_entry(node, next, head, list)
694 avc_node_delete(node);
695 rcu_read_unlock(&avc_rcu_lock);
696 spin_unlock_irqrestore(lock, flag);
697 }
698
699 avc_latest_notif_update(seqno, 0);
700 return rc;
701 }
702
703 /**
704 * avc_has_perm_noaudit - Check permissions but perform no auditing.
705 * @ssid: source security identifier
706 * @tsid: target security identifier
707 * @tclass: target security class
708 * @requested: requested permissions, interpreted based on @tclass
709 * @avd: access vector decisions
710 *
711 * Check the AVC to determine whether the @requested permissions are granted
712 * for the SID pair (@ssid, @tsid), interpreting the permissions
713 * based on @tclass, and call the security server on a cache miss to obtain
714 * a new decision and add it to the cache. Return a copy of the decisions
715 * in @avd. Return %0 if all @requested permissions are granted,
716 * -%EACCES if any permissions are denied, or another -errno upon
717 * other errors. This function is typically called by avc_has_perm(),
718 * but may also be called directly to separate permission checking from
719 * auditing, e.g. in cases where a lock must be held for the check but
720 * should be released for the auditing.
721 */
avc_has_perm_noaudit(u32 ssid,u32 tsid,u16 tclass,u32 requested,struct av_decision * in_avd)722 int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested,
723 struct av_decision *in_avd)
724 {
725 struct avc_node *node;
726 struct av_decision avd_entry, *avd;
727 int rc = 0;
728 u32 denied;
729
730 BUG_ON(!requested);
731
732 rcu_read_lock(&avc_rcu_lock);
733
734 node = avc_lookup(ssid, tsid, tclass);
735 if ( !node )
736 {
737 rcu_read_unlock(&avc_rcu_lock);
738
739 if ( in_avd )
740 avd = in_avd;
741 else
742 avd = &avd_entry;
743
744 rc = security_compute_av(ssid,tsid,tclass,requested,avd);
745 if ( rc )
746 goto out;
747 rcu_read_lock(&avc_rcu_lock);
748 node = avc_insert(ssid,tsid,tclass,avd);
749 } else {
750 if ( in_avd )
751 memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
752 avd = &node->ae.avd;
753 }
754
755 denied = requested & ~(avd->allowed);
756
757 if ( denied )
758 {
759 if ( !flask_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE) )
760 avc_update_node(requested, ssid,tsid,tclass,avd->seqno);
761 else
762 rc = -EACCES;
763 }
764
765 rcu_read_unlock(&avc_rcu_lock);
766 out:
767 return rc;
768 }
769
770 /**
771 * avc_has_perm - Check permissions and perform any appropriate auditing.
772 * @ssid: source security identifier
773 * @tsid: target security identifier
774 * @tclass: target security class
775 * @requested: requested permissions, interpreted based on @tclass
776 * @auditdata: auxiliary audit data
777 *
778 * Check the AVC to determine whether the @requested permissions are granted
779 * for the SID pair (@ssid, @tsid), interpreting the permissions
780 * based on @tclass, and call the security server on a cache miss to obtain
781 * a new decision and add it to the cache. Audit the granting or denial of
782 * permissions in accordance with the policy. Return %0 if all @requested
783 * permissions are granted, -%EACCES if any permissions are denied, or
784 * another -errno upon other errors.
785 */
avc_has_perm(u32 ssid,u32 tsid,u16 tclass,u32 requested,struct avc_audit_data * auditdata)786 int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
787 u32 requested, struct avc_audit_data *auditdata)
788 {
789 struct av_decision avd;
790 int rc;
791
792 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, &avd);
793 avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
794 return rc;
795 }
796