1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Intel Speed Select Interface: Common functions
4  * Copyright (c) 2019, Intel Corporation.
5  * All rights reserved.
6  *
7  * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8  */
9 
10 #include <linux/cpufeature.h>
11 #include <linux/cpuhotplug.h>
12 #include <linux/fs.h>
13 #include <linux/hashtable.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/pci.h>
17 #include <linux/sched/signal.h>
18 #include <linux/slab.h>
19 #include <linux/uaccess.h>
20 #include <uapi/linux/isst_if.h>
21 
22 #include "isst_if_common.h"
23 
24 #define MSR_THREAD_ID_INFO	0x53
25 #define MSR_CPU_BUS_NUMBER	0x128
26 
27 static struct isst_if_cmd_cb punit_callbacks[ISST_IF_DEV_MAX];
28 
29 static int punit_msr_white_list[] = {
30 	MSR_TURBO_RATIO_LIMIT,
31 	MSR_CONFIG_TDP_CONTROL,
32 	MSR_TURBO_RATIO_LIMIT1,
33 	MSR_TURBO_RATIO_LIMIT2,
34 };
35 
36 struct isst_valid_cmd_ranges {
37 	u16 cmd;
38 	u16 sub_cmd_beg;
39 	u16 sub_cmd_end;
40 };
41 
42 struct isst_cmd_set_req_type {
43 	u16 cmd;
44 	u16 sub_cmd;
45 	u16 param;
46 };
47 
48 static const struct isst_valid_cmd_ranges isst_valid_cmds[] = {
49 	{0xD0, 0x00, 0x03},
50 	{0x7F, 0x00, 0x0C},
51 	{0x7F, 0x10, 0x12},
52 	{0x7F, 0x20, 0x23},
53 	{0x94, 0x03, 0x03},
54 	{0x95, 0x03, 0x03},
55 };
56 
57 static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = {
58 	{0xD0, 0x00, 0x08},
59 	{0xD0, 0x01, 0x08},
60 	{0xD0, 0x02, 0x08},
61 	{0xD0, 0x03, 0x08},
62 	{0x7F, 0x02, 0x00},
63 	{0x7F, 0x08, 0x00},
64 	{0x95, 0x03, 0x03},
65 };
66 
67 struct isst_cmd {
68 	struct hlist_node hnode;
69 	u64 data;
70 	u32 cmd;
71 	int cpu;
72 	int mbox_cmd_type;
73 	u32 param;
74 };
75 
76 static DECLARE_HASHTABLE(isst_hash, 8);
77 static DEFINE_MUTEX(isst_hash_lock);
78 
isst_store_new_cmd(int cmd,u32 cpu,int mbox_cmd_type,u32 param,u32 data)79 static int isst_store_new_cmd(int cmd, u32 cpu, int mbox_cmd_type, u32 param,
80 			      u32 data)
81 {
82 	struct isst_cmd *sst_cmd;
83 
84 	sst_cmd = kmalloc(sizeof(*sst_cmd), GFP_KERNEL);
85 	if (!sst_cmd)
86 		return -ENOMEM;
87 
88 	sst_cmd->cpu = cpu;
89 	sst_cmd->cmd = cmd;
90 	sst_cmd->mbox_cmd_type = mbox_cmd_type;
91 	sst_cmd->param = param;
92 	sst_cmd->data = data;
93 
94 	hash_add(isst_hash, &sst_cmd->hnode, sst_cmd->cmd);
95 
96 	return 0;
97 }
98 
isst_delete_hash(void)99 static void isst_delete_hash(void)
100 {
101 	struct isst_cmd *sst_cmd;
102 	struct hlist_node *tmp;
103 	int i;
104 
105 	hash_for_each_safe(isst_hash, i, tmp, sst_cmd, hnode) {
106 		hash_del(&sst_cmd->hnode);
107 		kfree(sst_cmd);
108 	}
109 }
110 
111 /**
112  * isst_store_cmd() - Store command to a hash table
113  * @cmd: Mailbox command.
114  * @sub_cmd: Mailbox sub-command or MSR id.
115  * @cpu: Target CPU for the command
116  * @mbox_cmd_type: Mailbox or MSR command.
117  * @param: Mailbox parameter.
118  * @data: Mailbox request data or MSR data.
119  *
120  * Stores the command to a hash table if there is no such command already
121  * stored. If already stored update the latest parameter and data for the
122  * command.
123  *
124  * Return: Return result of store to hash table, 0 for success, others for
125  * failure.
126  */
isst_store_cmd(int cmd,int sub_cmd,u32 cpu,int mbox_cmd_type,u32 param,u64 data)127 int isst_store_cmd(int cmd, int sub_cmd, u32 cpu, int mbox_cmd_type,
128 		   u32 param, u64 data)
129 {
130 	struct isst_cmd *sst_cmd;
131 	int full_cmd, ret;
132 
133 	full_cmd = (cmd & GENMASK_ULL(15, 0)) << 16;
134 	full_cmd |= (sub_cmd & GENMASK_ULL(15, 0));
135 	mutex_lock(&isst_hash_lock);
136 	hash_for_each_possible(isst_hash, sst_cmd, hnode, full_cmd) {
137 		if (sst_cmd->cmd == full_cmd && sst_cmd->cpu == cpu &&
138 		    sst_cmd->mbox_cmd_type == mbox_cmd_type) {
139 			sst_cmd->param = param;
140 			sst_cmd->data = data;
141 			mutex_unlock(&isst_hash_lock);
142 			return 0;
143 		}
144 	}
145 
146 	ret = isst_store_new_cmd(full_cmd, cpu, mbox_cmd_type, param, data);
147 	mutex_unlock(&isst_hash_lock);
148 
149 	return ret;
150 }
151 EXPORT_SYMBOL_GPL(isst_store_cmd);
152 
isst_mbox_resume_command(struct isst_if_cmd_cb * cb,struct isst_cmd * sst_cmd)153 static void isst_mbox_resume_command(struct isst_if_cmd_cb *cb,
154 				     struct isst_cmd *sst_cmd)
155 {
156 	struct isst_if_mbox_cmd mbox_cmd;
157 	int wr_only;
158 
159 	mbox_cmd.command = (sst_cmd->cmd & GENMASK_ULL(31, 16)) >> 16;
160 	mbox_cmd.sub_command = sst_cmd->cmd & GENMASK_ULL(15, 0);
161 	mbox_cmd.parameter = sst_cmd->param;
162 	mbox_cmd.req_data = sst_cmd->data;
163 	mbox_cmd.logical_cpu = sst_cmd->cpu;
164 	(cb->cmd_callback)((u8 *)&mbox_cmd, &wr_only, 1);
165 }
166 
167 /**
168  * isst_resume_common() - Process Resume request
169  *
170  * On resume replay all mailbox commands and MSRs.
171  *
172  * Return: None.
173  */
isst_resume_common(void)174 void isst_resume_common(void)
175 {
176 	struct isst_cmd *sst_cmd;
177 	int i;
178 
179 	hash_for_each(isst_hash, i, sst_cmd, hnode) {
180 		struct isst_if_cmd_cb *cb;
181 
182 		if (sst_cmd->mbox_cmd_type) {
183 			cb = &punit_callbacks[ISST_IF_DEV_MBOX];
184 			if (cb->registered)
185 				isst_mbox_resume_command(cb, sst_cmd);
186 		} else {
187 			wrmsrl_safe_on_cpu(sst_cmd->cpu, sst_cmd->cmd,
188 					   sst_cmd->data);
189 		}
190 	}
191 }
192 EXPORT_SYMBOL_GPL(isst_resume_common);
193 
isst_restore_msr_local(int cpu)194 static void isst_restore_msr_local(int cpu)
195 {
196 	struct isst_cmd *sst_cmd;
197 	int i;
198 
199 	mutex_lock(&isst_hash_lock);
200 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
201 		if (!punit_msr_white_list[i])
202 			break;
203 
204 		hash_for_each_possible(isst_hash, sst_cmd, hnode,
205 				       punit_msr_white_list[i]) {
206 			if (!sst_cmd->mbox_cmd_type && sst_cmd->cpu == cpu)
207 				wrmsrl_safe(sst_cmd->cmd, sst_cmd->data);
208 		}
209 	}
210 	mutex_unlock(&isst_hash_lock);
211 }
212 
213 /**
214  * isst_if_mbox_cmd_invalid() - Check invalid mailbox commands
215  * @cmd: Pointer to the command structure to verify.
216  *
217  * Invalid command to PUNIT to may result in instability of the platform.
218  * This function has a whitelist of commands, which are allowed.
219  *
220  * Return: Return true if the command is invalid, else false.
221  */
isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd * cmd)222 bool isst_if_mbox_cmd_invalid(struct isst_if_mbox_cmd *cmd)
223 {
224 	int i;
225 
226 	if (cmd->logical_cpu >= nr_cpu_ids)
227 		return true;
228 
229 	for (i = 0; i < ARRAY_SIZE(isst_valid_cmds); ++i) {
230 		if (cmd->command == isst_valid_cmds[i].cmd &&
231 		    (cmd->sub_command >= isst_valid_cmds[i].sub_cmd_beg &&
232 		     cmd->sub_command <= isst_valid_cmds[i].sub_cmd_end)) {
233 			return false;
234 		}
235 	}
236 
237 	return true;
238 }
239 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_invalid);
240 
241 /**
242  * isst_if_mbox_cmd_set_req() - Check mailbox command is a set request
243  * @cmd: Pointer to the command structure to verify.
244  *
245  * Check if the given mail box level is set request and not a get request.
246  *
247  * Return: Return true if the command is set_req, else false.
248  */
isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd * cmd)249 bool isst_if_mbox_cmd_set_req(struct isst_if_mbox_cmd *cmd)
250 {
251 	int i;
252 
253 	for (i = 0; i < ARRAY_SIZE(isst_cmd_set_reqs); ++i) {
254 		if (cmd->command == isst_cmd_set_reqs[i].cmd &&
255 		    cmd->sub_command == isst_cmd_set_reqs[i].sub_cmd &&
256 		    cmd->parameter == isst_cmd_set_reqs[i].param) {
257 			return true;
258 		}
259 	}
260 
261 	return false;
262 }
263 EXPORT_SYMBOL_GPL(isst_if_mbox_cmd_set_req);
264 
isst_if_get_platform_info(void __user * argp)265 static int isst_if_get_platform_info(void __user *argp)
266 {
267 	struct isst_if_platform_info info;
268 
269 	info.api_version = ISST_IF_API_VERSION;
270 	info.driver_version = ISST_IF_DRIVER_VERSION;
271 	info.max_cmds_per_ioctl = ISST_IF_CMD_LIMIT;
272 	info.mbox_supported = punit_callbacks[ISST_IF_DEV_MBOX].registered;
273 	info.mmio_supported = punit_callbacks[ISST_IF_DEV_MMIO].registered;
274 
275 	if (copy_to_user(argp, &info, sizeof(info)))
276 		return -EFAULT;
277 
278 	return 0;
279 }
280 
281 #define ISST_MAX_BUS_NUMBER	2
282 
283 struct isst_if_cpu_info {
284 	/* For BUS 0 and BUS 1 only, which we need for PUNIT interface */
285 	int bus_info[ISST_MAX_BUS_NUMBER];
286 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
287 	int punit_cpu_id;
288 	int numa_node;
289 };
290 
291 struct isst_if_pkg_info {
292 	struct pci_dev *pci_dev[ISST_MAX_BUS_NUMBER];
293 };
294 
295 static struct isst_if_cpu_info *isst_cpu_info;
296 static struct isst_if_pkg_info *isst_pkg_info;
297 
298 #define ISST_MAX_PCI_DOMAINS	8
299 
_isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)300 static struct pci_dev *_isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
301 {
302 	struct pci_dev *matched_pci_dev = NULL;
303 	struct pci_dev *pci_dev = NULL;
304 	int no_matches = 0, pkg_id;
305 	int i, bus_number;
306 
307 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER || cpu < 0 ||
308 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
309 		return NULL;
310 
311 	pkg_id = topology_physical_package_id(cpu);
312 
313 	bus_number = isst_cpu_info[cpu].bus_info[bus_no];
314 	if (bus_number < 0)
315 		return NULL;
316 
317 	for (i = 0; i < ISST_MAX_PCI_DOMAINS; ++i) {
318 		struct pci_dev *_pci_dev;
319 		int node;
320 
321 		_pci_dev = pci_get_domain_bus_and_slot(i, bus_number, PCI_DEVFN(dev, fn));
322 		if (!_pci_dev)
323 			continue;
324 
325 		++no_matches;
326 		if (!matched_pci_dev)
327 			matched_pci_dev = _pci_dev;
328 
329 		node = dev_to_node(&_pci_dev->dev);
330 		if (node == NUMA_NO_NODE) {
331 			pr_info("Fail to get numa node for CPU:%d bus:%d dev:%d fn:%d\n",
332 				cpu, bus_no, dev, fn);
333 			continue;
334 		}
335 
336 		if (node == isst_cpu_info[cpu].numa_node) {
337 			isst_pkg_info[pkg_id].pci_dev[bus_no] = _pci_dev;
338 
339 			pci_dev = _pci_dev;
340 			break;
341 		}
342 	}
343 
344 	/*
345 	 * If there is no numa matched pci_dev, then there can be following cases:
346 	 * 1. CONFIG_NUMA is not defined: In this case if there is only single device
347 	 *    match, then we don't need numa information. Simply return last match.
348 	 *    Othewise return NULL.
349 	 * 2. NUMA information is not exposed via _SEG method. In this case it is similar
350 	 *    to case 1.
351 	 * 3. Numa information doesn't match with CPU numa node and more than one match
352 	 *    return NULL.
353 	 */
354 	if (!pci_dev && no_matches == 1)
355 		pci_dev = matched_pci_dev;
356 
357 	/* Return pci_dev pointer for any matched CPU in the package */
358 	if (!pci_dev)
359 		pci_dev = isst_pkg_info[pkg_id].pci_dev[bus_no];
360 
361 	return pci_dev;
362 }
363 
364 /**
365  * isst_if_get_pci_dev() - Get the PCI device instance for a CPU
366  * @cpu: Logical CPU number.
367  * @bus_no: The bus number assigned by the hardware.
368  * @dev: The device number assigned by the hardware.
369  * @fn: The function number assigned by the hardware.
370  *
371  * Using cached bus information, find out the PCI device for a bus number,
372  * device and function.
373  *
374  * Return: Return pci_dev pointer or NULL.
375  */
isst_if_get_pci_dev(int cpu,int bus_no,int dev,int fn)376 struct pci_dev *isst_if_get_pci_dev(int cpu, int bus_no, int dev, int fn)
377 {
378 	struct pci_dev *pci_dev;
379 
380 	if (bus_no < 0 || bus_no >= ISST_MAX_BUS_NUMBER  || cpu < 0 ||
381 	    cpu >= nr_cpu_ids || cpu >= num_possible_cpus())
382 		return NULL;
383 
384 	pci_dev = isst_cpu_info[cpu].pci_dev[bus_no];
385 
386 	if (pci_dev && pci_dev->devfn == PCI_DEVFN(dev, fn))
387 		return pci_dev;
388 
389 	return _isst_if_get_pci_dev(cpu, bus_no, dev, fn);
390 }
391 EXPORT_SYMBOL_GPL(isst_if_get_pci_dev);
392 
isst_if_cpu_online(unsigned int cpu)393 static int isst_if_cpu_online(unsigned int cpu)
394 {
395 	u64 data;
396 	int ret;
397 
398 	isst_cpu_info[cpu].numa_node = cpu_to_node(cpu);
399 
400 	ret = rdmsrl_safe(MSR_CPU_BUS_NUMBER, &data);
401 	if (ret) {
402 		/* This is not a fatal error on MSR mailbox only I/F */
403 		isst_cpu_info[cpu].bus_info[0] = -1;
404 		isst_cpu_info[cpu].bus_info[1] = -1;
405 	} else {
406 		isst_cpu_info[cpu].bus_info[0] = data & 0xff;
407 		isst_cpu_info[cpu].bus_info[1] = (data >> 8) & 0xff;
408 		isst_cpu_info[cpu].pci_dev[0] = _isst_if_get_pci_dev(cpu, 0, 0, 1);
409 		isst_cpu_info[cpu].pci_dev[1] = _isst_if_get_pci_dev(cpu, 1, 30, 1);
410 	}
411 
412 	ret = rdmsrl_safe(MSR_THREAD_ID_INFO, &data);
413 	if (ret) {
414 		isst_cpu_info[cpu].punit_cpu_id = -1;
415 		return ret;
416 	}
417 	isst_cpu_info[cpu].punit_cpu_id = data;
418 
419 	isst_restore_msr_local(cpu);
420 
421 	return 0;
422 }
423 
424 static int isst_if_online_id;
425 
isst_if_cpu_info_init(void)426 static int isst_if_cpu_info_init(void)
427 {
428 	int ret;
429 
430 	isst_cpu_info = kcalloc(num_possible_cpus(),
431 				sizeof(*isst_cpu_info),
432 				GFP_KERNEL);
433 	if (!isst_cpu_info)
434 		return -ENOMEM;
435 
436 	isst_pkg_info = kcalloc(topology_max_packages(),
437 				sizeof(*isst_pkg_info),
438 				GFP_KERNEL);
439 	if (!isst_pkg_info) {
440 		kfree(isst_cpu_info);
441 		return -ENOMEM;
442 	}
443 
444 	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
445 				"platform/x86/isst-if:online",
446 				isst_if_cpu_online, NULL);
447 	if (ret < 0) {
448 		kfree(isst_pkg_info);
449 		kfree(isst_cpu_info);
450 		return ret;
451 	}
452 
453 	isst_if_online_id = ret;
454 
455 	return 0;
456 }
457 
isst_if_cpu_info_exit(void)458 static void isst_if_cpu_info_exit(void)
459 {
460 	cpuhp_remove_state(isst_if_online_id);
461 	kfree(isst_pkg_info);
462 	kfree(isst_cpu_info);
463 };
464 
isst_if_proc_phyid_req(u8 * cmd_ptr,int * write_only,int resume)465 static long isst_if_proc_phyid_req(u8 *cmd_ptr, int *write_only, int resume)
466 {
467 	struct isst_if_cpu_map *cpu_map;
468 
469 	cpu_map = (struct isst_if_cpu_map *)cmd_ptr;
470 	if (cpu_map->logical_cpu >= nr_cpu_ids ||
471 	    cpu_map->logical_cpu >= num_possible_cpus())
472 		return -EINVAL;
473 
474 	*write_only = 0;
475 	cpu_map->physical_cpu = isst_cpu_info[cpu_map->logical_cpu].punit_cpu_id;
476 
477 	return 0;
478 }
479 
match_punit_msr_white_list(int msr)480 static bool match_punit_msr_white_list(int msr)
481 {
482 	int i;
483 
484 	for (i = 0; i < ARRAY_SIZE(punit_msr_white_list); ++i) {
485 		if (punit_msr_white_list[i] == msr)
486 			return true;
487 	}
488 
489 	return false;
490 }
491 
isst_if_msr_cmd_req(u8 * cmd_ptr,int * write_only,int resume)492 static long isst_if_msr_cmd_req(u8 *cmd_ptr, int *write_only, int resume)
493 {
494 	struct isst_if_msr_cmd *msr_cmd;
495 	int ret;
496 
497 	msr_cmd = (struct isst_if_msr_cmd *)cmd_ptr;
498 
499 	if (!match_punit_msr_white_list(msr_cmd->msr))
500 		return -EINVAL;
501 
502 	if (msr_cmd->logical_cpu >= nr_cpu_ids)
503 		return -EINVAL;
504 
505 	if (msr_cmd->read_write) {
506 		if (!capable(CAP_SYS_ADMIN))
507 			return -EPERM;
508 
509 		ret = wrmsrl_safe_on_cpu(msr_cmd->logical_cpu,
510 					 msr_cmd->msr,
511 					 msr_cmd->data);
512 		*write_only = 1;
513 		if (!ret && !resume)
514 			ret = isst_store_cmd(0, msr_cmd->msr,
515 					     msr_cmd->logical_cpu,
516 					     0, 0, msr_cmd->data);
517 	} else {
518 		u64 data;
519 
520 		ret = rdmsrl_safe_on_cpu(msr_cmd->logical_cpu,
521 					 msr_cmd->msr, &data);
522 		if (!ret) {
523 			msr_cmd->data = data;
524 			*write_only = 0;
525 		}
526 	}
527 
528 
529 	return ret;
530 }
531 
isst_if_exec_multi_cmd(void __user * argp,struct isst_if_cmd_cb * cb)532 static long isst_if_exec_multi_cmd(void __user *argp, struct isst_if_cmd_cb *cb)
533 {
534 	unsigned char __user *ptr;
535 	u32 cmd_count;
536 	u8 *cmd_ptr;
537 	long ret;
538 	int i;
539 
540 	/* Each multi command has u32 command count as the first field */
541 	if (copy_from_user(&cmd_count, argp, sizeof(cmd_count)))
542 		return -EFAULT;
543 
544 	if (!cmd_count || cmd_count > ISST_IF_CMD_LIMIT)
545 		return -EINVAL;
546 
547 	cmd_ptr = kmalloc(cb->cmd_size, GFP_KERNEL);
548 	if (!cmd_ptr)
549 		return -ENOMEM;
550 
551 	/* cb->offset points to start of the command after the command count */
552 	ptr = argp + cb->offset;
553 
554 	for (i = 0; i < cmd_count; ++i) {
555 		int wr_only;
556 
557 		if (signal_pending(current)) {
558 			ret = -EINTR;
559 			break;
560 		}
561 
562 		if (copy_from_user(cmd_ptr, ptr, cb->cmd_size)) {
563 			ret = -EFAULT;
564 			break;
565 		}
566 
567 		ret = cb->cmd_callback(cmd_ptr, &wr_only, 0);
568 		if (ret)
569 			break;
570 
571 		if (!wr_only && copy_to_user(ptr, cmd_ptr, cb->cmd_size)) {
572 			ret = -EFAULT;
573 			break;
574 		}
575 
576 		ptr += cb->cmd_size;
577 	}
578 
579 	kfree(cmd_ptr);
580 
581 	return i ? i : ret;
582 }
583 
isst_if_def_ioctl(struct file * file,unsigned int cmd,unsigned long arg)584 static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
585 			      unsigned long arg)
586 {
587 	void __user *argp = (void __user *)arg;
588 	struct isst_if_cmd_cb cmd_cb;
589 	struct isst_if_cmd_cb *cb;
590 	long ret = -ENOTTY;
591 
592 	switch (cmd) {
593 	case ISST_IF_GET_PLATFORM_INFO:
594 		ret = isst_if_get_platform_info(argp);
595 		break;
596 	case ISST_IF_GET_PHY_ID:
597 		cmd_cb.cmd_size = sizeof(struct isst_if_cpu_map);
598 		cmd_cb.offset = offsetof(struct isst_if_cpu_maps, cpu_map);
599 		cmd_cb.cmd_callback = isst_if_proc_phyid_req;
600 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
601 		break;
602 	case ISST_IF_IO_CMD:
603 		cb = &punit_callbacks[ISST_IF_DEV_MMIO];
604 		if (cb->registered)
605 			ret = isst_if_exec_multi_cmd(argp, cb);
606 		break;
607 	case ISST_IF_MBOX_COMMAND:
608 		cb = &punit_callbacks[ISST_IF_DEV_MBOX];
609 		if (cb->registered)
610 			ret = isst_if_exec_multi_cmd(argp, cb);
611 		break;
612 	case ISST_IF_MSR_COMMAND:
613 		cmd_cb.cmd_size = sizeof(struct isst_if_msr_cmd);
614 		cmd_cb.offset = offsetof(struct isst_if_msr_cmds, msr_cmd);
615 		cmd_cb.cmd_callback = isst_if_msr_cmd_req;
616 		ret = isst_if_exec_multi_cmd(argp, &cmd_cb);
617 		break;
618 	default:
619 		break;
620 	}
621 
622 	return ret;
623 }
624 
625 /* Lock to prevent module registration when already opened by user space */
626 static DEFINE_MUTEX(punit_misc_dev_open_lock);
627 /* Lock to allow one shared misc device for all ISST interfaces */
628 static DEFINE_MUTEX(punit_misc_dev_reg_lock);
629 static int misc_usage_count;
630 static int misc_device_ret;
631 static int misc_device_open;
632 
isst_if_open(struct inode * inode,struct file * file)633 static int isst_if_open(struct inode *inode, struct file *file)
634 {
635 	int i, ret = 0;
636 
637 	/* Fail open, if a module is going away */
638 	mutex_lock(&punit_misc_dev_open_lock);
639 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
640 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
641 
642 		if (cb->registered && !try_module_get(cb->owner)) {
643 			ret = -ENODEV;
644 			break;
645 		}
646 	}
647 	if (ret) {
648 		int j;
649 
650 		for (j = 0; j < i; ++j) {
651 			struct isst_if_cmd_cb *cb;
652 
653 			cb = &punit_callbacks[j];
654 			if (cb->registered)
655 				module_put(cb->owner);
656 		}
657 	} else {
658 		misc_device_open++;
659 	}
660 	mutex_unlock(&punit_misc_dev_open_lock);
661 
662 	return ret;
663 }
664 
isst_if_relase(struct inode * inode,struct file * f)665 static int isst_if_relase(struct inode *inode, struct file *f)
666 {
667 	int i;
668 
669 	mutex_lock(&punit_misc_dev_open_lock);
670 	misc_device_open--;
671 	for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
672 		struct isst_if_cmd_cb *cb = &punit_callbacks[i];
673 
674 		if (cb->registered)
675 			module_put(cb->owner);
676 	}
677 	mutex_unlock(&punit_misc_dev_open_lock);
678 
679 	return 0;
680 }
681 
682 static const struct file_operations isst_if_char_driver_ops = {
683 	.open = isst_if_open,
684 	.unlocked_ioctl = isst_if_def_ioctl,
685 	.release = isst_if_relase,
686 };
687 
688 static struct miscdevice isst_if_char_driver = {
689 	.minor		= MISC_DYNAMIC_MINOR,
690 	.name		= "isst_interface",
691 	.fops		= &isst_if_char_driver_ops,
692 };
693 
isst_misc_reg(void)694 static int isst_misc_reg(void)
695 {
696 	mutex_lock(&punit_misc_dev_reg_lock);
697 	if (misc_device_ret)
698 		goto unlock_exit;
699 
700 	if (!misc_usage_count) {
701 		misc_device_ret = isst_if_cpu_info_init();
702 		if (misc_device_ret)
703 			goto unlock_exit;
704 
705 		misc_device_ret = misc_register(&isst_if_char_driver);
706 		if (misc_device_ret) {
707 			isst_if_cpu_info_exit();
708 			goto unlock_exit;
709 		}
710 	}
711 	misc_usage_count++;
712 
713 unlock_exit:
714 	mutex_unlock(&punit_misc_dev_reg_lock);
715 
716 	return misc_device_ret;
717 }
718 
isst_misc_unreg(void)719 static void isst_misc_unreg(void)
720 {
721 	mutex_lock(&punit_misc_dev_reg_lock);
722 	if (misc_usage_count)
723 		misc_usage_count--;
724 	if (!misc_usage_count && !misc_device_ret) {
725 		misc_deregister(&isst_if_char_driver);
726 		isst_if_cpu_info_exit();
727 	}
728 	mutex_unlock(&punit_misc_dev_reg_lock);
729 }
730 
731 /**
732  * isst_if_cdev_register() - Register callback for IOCTL
733  * @device_type: The device type this callback handling.
734  * @cb:	Callback structure.
735  *
736  * This function registers a callback to device type. On very first call
737  * it will register a misc device, which is used for user kernel interface.
738  * Other calls simply increment ref count. Registry will fail, if the user
739  * already opened misc device for operation. Also if the misc device
740  * creation failed, then it will not try again and all callers will get
741  * failure code.
742  *
743  * Return: Return the return value from the misc creation device or -EINVAL
744  * for unsupported device type.
745  */
isst_if_cdev_register(int device_type,struct isst_if_cmd_cb * cb)746 int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
747 {
748 	int ret;
749 
750 	if (device_type >= ISST_IF_DEV_MAX)
751 		return -EINVAL;
752 
753 	mutex_lock(&punit_misc_dev_open_lock);
754 	/* Device is already open, we don't want to add new callbacks */
755 	if (misc_device_open) {
756 		mutex_unlock(&punit_misc_dev_open_lock);
757 		return -EAGAIN;
758 	}
759 	memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
760 	punit_callbacks[device_type].registered = 1;
761 	mutex_unlock(&punit_misc_dev_open_lock);
762 
763 	ret = isst_misc_reg();
764 	if (ret) {
765 		/*
766 		 * No need of mutex as the misc device register failed
767 		 * as no one can open device yet. Hence no contention.
768 		 */
769 		punit_callbacks[device_type].registered = 0;
770 		return ret;
771 	}
772 	return 0;
773 }
774 EXPORT_SYMBOL_GPL(isst_if_cdev_register);
775 
776 /**
777  * isst_if_cdev_unregister() - Unregister callback for IOCTL
778  * @device_type: The device type to unregister.
779  *
780  * This function unregisters the previously registered callback. If this
781  * is the last callback unregistering, then misc device is removed.
782  *
783  * Return: None.
784  */
isst_if_cdev_unregister(int device_type)785 void isst_if_cdev_unregister(int device_type)
786 {
787 	isst_misc_unreg();
788 	mutex_lock(&punit_misc_dev_open_lock);
789 	punit_callbacks[device_type].registered = 0;
790 	if (device_type == ISST_IF_DEV_MBOX)
791 		isst_delete_hash();
792 	mutex_unlock(&punit_misc_dev_open_lock);
793 }
794 EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
795 
796 MODULE_LICENSE("GPL v2");
797