1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * VFIO PCI Intel Graphics support
4 *
5 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * Author: Alex Williamson <alex.williamson@redhat.com>
7 *
8 * Register a device specific region through which to provide read-only
9 * access to the Intel IGD opregion. The register defining the opregion
10 * address is also virtualized to prevent user modification.
11 */
12
13 #include <linux/io.h>
14 #include <linux/pci.h>
15 #include <linux/uaccess.h>
16 #include <linux/vfio.h>
17
18 #include <linux/vfio_pci_core.h>
19
20 #define OPREGION_SIGNATURE "IntelGraphicsMem"
21 #define OPREGION_SIZE (8 * 1024)
22 #define OPREGION_PCI_ADDR 0xfc
23
24 #define OPREGION_RVDA 0x3ba
25 #define OPREGION_RVDS 0x3c2
26 #define OPREGION_VERSION 0x16
27
28 struct igd_opregion_vbt {
29 void *opregion;
30 void *vbt_ex;
31 };
32
33 /**
34 * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
35 * @dst: User buffer ptr to copy to.
36 * @off: Offset to user buffer ptr. Increased by bytes on return.
37 * @src: Source buffer to copy from.
38 * @pos: Increased by bytes on return.
39 * @remaining: Decreased by bytes on return.
40 * @bytes: Bytes to copy and adjust off, pos and remaining.
41 *
42 * Copy OpRegion to offset from specific source ptr and shift the offset.
43 *
44 * Return: 0 on success, -EFAULT otherwise.
45 *
46 */
igd_opregion_shift_copy(char __user * dst,loff_t * off,void * src,loff_t * pos,size_t * remaining,size_t bytes)47 static inline unsigned long igd_opregion_shift_copy(char __user *dst,
48 loff_t *off,
49 void *src,
50 loff_t *pos,
51 size_t *remaining,
52 size_t bytes)
53 {
54 if (copy_to_user(dst + (*off), src, bytes))
55 return -EFAULT;
56
57 *off += bytes;
58 *pos += bytes;
59 *remaining -= bytes;
60
61 return 0;
62 }
63
vfio_pci_igd_rw(struct vfio_pci_core_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)64 static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
65 char __user *buf, size_t count, loff_t *ppos,
66 bool iswrite)
67 {
68 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
69 struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
70 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
71 size_t remaining;
72
73 if (pos >= vdev->region[i].size || iswrite)
74 return -EINVAL;
75
76 count = min_t(size_t, count, vdev->region[i].size - pos);
77 remaining = count;
78
79 /* Copy until OpRegion version */
80 if (remaining && pos < OPREGION_VERSION) {
81 size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
82
83 if (igd_opregion_shift_copy(buf, &off,
84 opregionvbt->opregion + pos, &pos,
85 &remaining, bytes))
86 return -EFAULT;
87 }
88
89 /* Copy patched (if necessary) OpRegion version */
90 if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
91 size_t bytes = min_t(size_t, remaining,
92 OPREGION_VERSION + sizeof(__le16) - pos);
93 __le16 version = *(__le16 *)(opregionvbt->opregion +
94 OPREGION_VERSION);
95
96 /* Patch to 2.1 if OpRegion 2.0 has extended VBT */
97 if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
98 version = cpu_to_le16(0x0201);
99
100 if (igd_opregion_shift_copy(buf, &off,
101 (u8 *)&version +
102 (pos - OPREGION_VERSION),
103 &pos, &remaining, bytes))
104 return -EFAULT;
105 }
106
107 /* Copy until RVDA */
108 if (remaining && pos < OPREGION_RVDA) {
109 size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
110
111 if (igd_opregion_shift_copy(buf, &off,
112 opregionvbt->opregion + pos, &pos,
113 &remaining, bytes))
114 return -EFAULT;
115 }
116
117 /* Copy modified (if necessary) RVDA */
118 if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
119 size_t bytes = min_t(size_t, remaining,
120 OPREGION_RVDA + sizeof(__le64) - pos);
121 __le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
122 OPREGION_SIZE : 0);
123
124 if (igd_opregion_shift_copy(buf, &off,
125 (u8 *)&rvda + (pos - OPREGION_RVDA),
126 &pos, &remaining, bytes))
127 return -EFAULT;
128 }
129
130 /* Copy the rest of OpRegion */
131 if (remaining && pos < OPREGION_SIZE) {
132 size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
133
134 if (igd_opregion_shift_copy(buf, &off,
135 opregionvbt->opregion + pos, &pos,
136 &remaining, bytes))
137 return -EFAULT;
138 }
139
140 /* Copy extended VBT if exists */
141 if (remaining &&
142 copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
143 remaining))
144 return -EFAULT;
145
146 *ppos += count;
147
148 return count;
149 }
150
vfio_pci_igd_release(struct vfio_pci_core_device * vdev,struct vfio_pci_region * region)151 static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
152 struct vfio_pci_region *region)
153 {
154 struct igd_opregion_vbt *opregionvbt = region->data;
155
156 if (opregionvbt->vbt_ex)
157 memunmap(opregionvbt->vbt_ex);
158
159 memunmap(opregionvbt->opregion);
160 kfree(opregionvbt);
161 }
162
163 static const struct vfio_pci_regops vfio_pci_igd_regops = {
164 .rw = vfio_pci_igd_rw,
165 .release = vfio_pci_igd_release,
166 };
167
vfio_pci_igd_opregion_init(struct vfio_pci_core_device * vdev)168 static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
169 {
170 __le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
171 u32 addr, size;
172 struct igd_opregion_vbt *opregionvbt;
173 int ret;
174 u16 version;
175
176 ret = pci_read_config_dword(vdev->pdev, OPREGION_PCI_ADDR, &addr);
177 if (ret)
178 return ret;
179
180 if (!addr || !(~addr))
181 return -ENODEV;
182
183 opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
184 if (!opregionvbt)
185 return -ENOMEM;
186
187 opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
188 if (!opregionvbt->opregion) {
189 kfree(opregionvbt);
190 return -ENOMEM;
191 }
192
193 if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
194 memunmap(opregionvbt->opregion);
195 kfree(opregionvbt);
196 return -EINVAL;
197 }
198
199 size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
200 if (!size) {
201 memunmap(opregionvbt->opregion);
202 kfree(opregionvbt);
203 return -EINVAL;
204 }
205
206 size *= 1024; /* In KB */
207
208 /*
209 * OpRegion and VBT:
210 * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
211 * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
212 * to hold the VBT data, the Extended VBT region is introduced since
213 * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
214 * introduced to define the extended VBT data location and size.
215 * OpRegion 2.0: RVDA defines the absolute physical address of the
216 * extended VBT data, RVDS defines the VBT data size.
217 * OpRegion 2.1 and above: RVDA defines the relative address of the
218 * extended VBT data to OpRegion base, RVDS defines the VBT data size.
219 *
220 * Due to the RVDA definition diff in OpRegion VBT (also the only diff
221 * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
222 * for OpRegion 2.0 and above makes it possible to support the
223 * non-contiguous VBT through a single vfio region. From r/w ops view,
224 * only contiguous VBT after OpRegion with version 2.1+ is exposed,
225 * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
226 * ops will on-the-fly shift the actural offset into VBT so that data at
227 * correct position can be returned to the requester.
228 */
229 version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
230 OPREGION_VERSION));
231 if (version >= 0x0200) {
232 u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
233 OPREGION_RVDA));
234 u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
235 OPREGION_RVDS));
236
237 /* The extended VBT is valid only when RVDA/RVDS are non-zero */
238 if (rvda && rvds) {
239 size += rvds;
240
241 /*
242 * Extended VBT location by RVDA:
243 * Absolute physical addr for 2.0.
244 * Relative addr to OpRegion header for 2.1+.
245 */
246 if (version == 0x0200)
247 addr = rvda;
248 else
249 addr += rvda;
250
251 opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
252 if (!opregionvbt->vbt_ex) {
253 memunmap(opregionvbt->opregion);
254 kfree(opregionvbt);
255 return -ENOMEM;
256 }
257 }
258 }
259
260 ret = vfio_pci_register_dev_region(vdev,
261 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
262 VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
263 size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
264 if (ret) {
265 if (opregionvbt->vbt_ex)
266 memunmap(opregionvbt->vbt_ex);
267
268 memunmap(opregionvbt->opregion);
269 kfree(opregionvbt);
270 return ret;
271 }
272
273 /* Fill vconfig with the hw value and virtualize register */
274 *dwordp = cpu_to_le32(addr);
275 memset(vdev->pci_config_map + OPREGION_PCI_ADDR,
276 PCI_CAP_ID_INVALID_VIRT, 4);
277
278 return ret;
279 }
280
vfio_pci_igd_cfg_rw(struct vfio_pci_core_device * vdev,char __user * buf,size_t count,loff_t * ppos,bool iswrite)281 static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
282 char __user *buf, size_t count, loff_t *ppos,
283 bool iswrite)
284 {
285 unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
286 struct pci_dev *pdev = vdev->region[i].data;
287 loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
288 size_t size;
289 int ret;
290
291 if (pos >= vdev->region[i].size || iswrite)
292 return -EINVAL;
293
294 size = count = min(count, (size_t)(vdev->region[i].size - pos));
295
296 if ((pos & 1) && size) {
297 u8 val;
298
299 ret = pci_user_read_config_byte(pdev, pos, &val);
300 if (ret)
301 return ret;
302
303 if (copy_to_user(buf + count - size, &val, 1))
304 return -EFAULT;
305
306 pos++;
307 size--;
308 }
309
310 if ((pos & 3) && size > 2) {
311 u16 val;
312
313 ret = pci_user_read_config_word(pdev, pos, &val);
314 if (ret)
315 return ret;
316
317 val = cpu_to_le16(val);
318 if (copy_to_user(buf + count - size, &val, 2))
319 return -EFAULT;
320
321 pos += 2;
322 size -= 2;
323 }
324
325 while (size > 3) {
326 u32 val;
327
328 ret = pci_user_read_config_dword(pdev, pos, &val);
329 if (ret)
330 return ret;
331
332 val = cpu_to_le32(val);
333 if (copy_to_user(buf + count - size, &val, 4))
334 return -EFAULT;
335
336 pos += 4;
337 size -= 4;
338 }
339
340 while (size >= 2) {
341 u16 val;
342
343 ret = pci_user_read_config_word(pdev, pos, &val);
344 if (ret)
345 return ret;
346
347 val = cpu_to_le16(val);
348 if (copy_to_user(buf + count - size, &val, 2))
349 return -EFAULT;
350
351 pos += 2;
352 size -= 2;
353 }
354
355 while (size) {
356 u8 val;
357
358 ret = pci_user_read_config_byte(pdev, pos, &val);
359 if (ret)
360 return ret;
361
362 if (copy_to_user(buf + count - size, &val, 1))
363 return -EFAULT;
364
365 pos++;
366 size--;
367 }
368
369 *ppos += count;
370
371 return count;
372 }
373
vfio_pci_igd_cfg_release(struct vfio_pci_core_device * vdev,struct vfio_pci_region * region)374 static void vfio_pci_igd_cfg_release(struct vfio_pci_core_device *vdev,
375 struct vfio_pci_region *region)
376 {
377 struct pci_dev *pdev = region->data;
378
379 pci_dev_put(pdev);
380 }
381
382 static const struct vfio_pci_regops vfio_pci_igd_cfg_regops = {
383 .rw = vfio_pci_igd_cfg_rw,
384 .release = vfio_pci_igd_cfg_release,
385 };
386
vfio_pci_igd_cfg_init(struct vfio_pci_core_device * vdev)387 static int vfio_pci_igd_cfg_init(struct vfio_pci_core_device *vdev)
388 {
389 struct pci_dev *host_bridge, *lpc_bridge;
390 int ret;
391
392 host_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
393 if (!host_bridge)
394 return -ENODEV;
395
396 if (host_bridge->vendor != PCI_VENDOR_ID_INTEL ||
397 host_bridge->class != (PCI_CLASS_BRIDGE_HOST << 8)) {
398 pci_dev_put(host_bridge);
399 return -EINVAL;
400 }
401
402 ret = vfio_pci_register_dev_region(vdev,
403 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
404 VFIO_REGION_SUBTYPE_INTEL_IGD_HOST_CFG,
405 &vfio_pci_igd_cfg_regops, host_bridge->cfg_size,
406 VFIO_REGION_INFO_FLAG_READ, host_bridge);
407 if (ret) {
408 pci_dev_put(host_bridge);
409 return ret;
410 }
411
412 lpc_bridge = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0x1f, 0));
413 if (!lpc_bridge)
414 return -ENODEV;
415
416 if (lpc_bridge->vendor != PCI_VENDOR_ID_INTEL ||
417 lpc_bridge->class != (PCI_CLASS_BRIDGE_ISA << 8)) {
418 pci_dev_put(lpc_bridge);
419 return -EINVAL;
420 }
421
422 ret = vfio_pci_register_dev_region(vdev,
423 PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
424 VFIO_REGION_SUBTYPE_INTEL_IGD_LPC_CFG,
425 &vfio_pci_igd_cfg_regops, lpc_bridge->cfg_size,
426 VFIO_REGION_INFO_FLAG_READ, lpc_bridge);
427 if (ret) {
428 pci_dev_put(lpc_bridge);
429 return ret;
430 }
431
432 return 0;
433 }
434
vfio_pci_igd_init(struct vfio_pci_core_device * vdev)435 int vfio_pci_igd_init(struct vfio_pci_core_device *vdev)
436 {
437 int ret;
438
439 ret = vfio_pci_igd_opregion_init(vdev);
440 if (ret)
441 return ret;
442
443 ret = vfio_pci_igd_cfg_init(vdev);
444 if (ret)
445 return ret;
446
447 return 0;
448 }
449