1 /*
2 * Copyright (c) 2008, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) Allen Kay <allen.m.kay@intel.com>
17 * Copyright (C) Weidong Han <weidong.han@intel.com>
18 */
19
20 #include <xen/sched.h>
21 #include <xen/softirq.h>
22 #include <xen/domain_page.h>
23 #include <asm/paging.h>
24 #include <xen/iommu.h>
25 #include <xen/irq.h>
26 #include <xen/numa.h>
27 #include <asm/fixmap.h>
28 #include <asm/setup.h>
29 #include "../iommu.h"
30 #include "../dmar.h"
31 #include "../vtd.h"
32 #include "../extern.h"
33
34 /*
35 * iommu_inclusive_mapping: when set, all memory below 4GB is included in dom0
36 * 1:1 iommu mappings except xen and unusable regions.
37 */
38 static bool_t __hwdom_initdata iommu_inclusive_mapping = 1;
39 boolean_param("iommu_inclusive_mapping", iommu_inclusive_mapping);
40
map_vtd_domain_page(u64 maddr)41 void *map_vtd_domain_page(u64 maddr)
42 {
43 return map_domain_page(_mfn(paddr_to_pfn(maddr)));
44 }
45
unmap_vtd_domain_page(void * va)46 void unmap_vtd_domain_page(void *va)
47 {
48 unmap_domain_page(va);
49 }
50
get_cache_line_size(void)51 unsigned int get_cache_line_size(void)
52 {
53 return ((cpuid_ebx(1) >> 8) & 0xff) * 8;
54 }
55
cacheline_flush(char * addr)56 void cacheline_flush(char * addr)
57 {
58 clflush(addr);
59 }
60
flush_all_cache()61 void flush_all_cache()
62 {
63 wbinvd();
64 }
65
_hvm_dpci_isairq_eoi(struct domain * d,struct hvm_pirq_dpci * pirq_dpci,void * arg)66 static int _hvm_dpci_isairq_eoi(struct domain *d,
67 struct hvm_pirq_dpci *pirq_dpci, void *arg)
68 {
69 struct hvm_irq *hvm_irq = hvm_domain_irq(d);
70 unsigned int isairq = (long)arg;
71 const struct dev_intx_gsi_link *digl;
72
73 list_for_each_entry ( digl, &pirq_dpci->digl_list, list )
74 {
75 unsigned int link = hvm_pci_intx_link(digl->device, digl->intx);
76
77 if ( hvm_irq->pci_link.route[link] == isairq )
78 {
79 hvm_pci_intx_deassert(d, digl->device, digl->intx);
80 if ( --pirq_dpci->pending == 0 )
81 {
82 stop_timer(&pirq_dpci->timer);
83 pirq_guest_eoi(dpci_pirq(pirq_dpci));
84 }
85 }
86 }
87
88 return 0;
89 }
90
hvm_dpci_isairq_eoi(struct domain * d,unsigned int isairq)91 void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
92 {
93 struct hvm_irq_dpci *dpci = NULL;
94
95 ASSERT(isairq < NR_ISAIRQS);
96 if ( !iommu_enabled)
97 return;
98
99 spin_lock(&d->event_lock);
100
101 dpci = domain_get_irq_dpci(d);
102
103 if ( dpci && test_bit(isairq, dpci->isairq_map) )
104 {
105 /* Multiple mirq may be mapped to one isa irq */
106 pt_pirq_iterate(d, _hvm_dpci_isairq_eoi, (void *)(long)isairq);
107 }
108 spin_unlock(&d->event_lock);
109 }
110
vtd_set_hwdom_mapping(struct domain * d)111 void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
112 {
113 unsigned long i, j, tmp, top;
114
115 BUG_ON(!is_hardware_domain(d));
116
117 top = max(max_pdx, pfn_to_pdx(0xffffffffUL >> PAGE_SHIFT) + 1);
118
119 for ( i = 0; i < top; i++ )
120 {
121 int rc = 0;
122
123 /*
124 * Set up 1:1 mapping for dom0. Default to use only conventional RAM
125 * areas and let RMRRs include needed reserved regions. When set, the
126 * inclusive mapping maps in everything below 4GB except unusable
127 * ranges.
128 */
129 unsigned long pfn = pdx_to_pfn(i);
130
131 if ( pfn > (0xffffffffUL >> PAGE_SHIFT) ?
132 (!mfn_valid(_mfn(pfn)) ||
133 !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL)) :
134 iommu_inclusive_mapping ?
135 page_is_ram_type(pfn, RAM_TYPE_UNUSABLE) :
136 !page_is_ram_type(pfn, RAM_TYPE_CONVENTIONAL) )
137 continue;
138
139 /* Exclude Xen bits */
140 if ( xen_in_range(pfn) )
141 continue;
142
143 tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
144 for ( j = 0; j < tmp; j++ )
145 {
146 int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
147 IOMMUF_readable|IOMMUF_writable);
148
149 if ( !rc )
150 rc = ret;
151 }
152
153 if ( rc )
154 printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
155 d->domain_id, rc);
156
157 if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
158 process_pending_softirqs();
159 }
160 }
161
162