1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * processor thermal device mailbox driver for Workload type hints
4 * Copyright (c) 2020, Intel Corporation.
5 */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/io-64-nonatomic-lo-hi.h>
11 #include "processor_thermal_device.h"
12
13 #define MBOX_CMD_WORKLOAD_TYPE_READ 0x0E
14 #define MBOX_CMD_WORKLOAD_TYPE_WRITE 0x0F
15
16 #define MBOX_OFFSET_DATA 0x5810
17 #define MBOX_OFFSET_INTERFACE 0x5818
18
19 #define MBOX_BUSY_BIT 31
20 #define MBOX_RETRY_COUNT 100
21
22 #define MBOX_DATA_BIT_VALID 31
23 #define MBOX_DATA_BIT_AC_DC 30
24
25 static DEFINE_MUTEX(mbox_lock);
26
send_mbox_cmd(struct pci_dev * pdev,u16 cmd_id,u32 cmd_data,u64 * cmd_resp)27 static int send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cmd_resp)
28 {
29 struct proc_thermal_device *proc_priv;
30 u32 retries, data;
31 int ret;
32
33 mutex_lock(&mbox_lock);
34 proc_priv = pci_get_drvdata(pdev);
35
36 /* Poll for rb bit == 0 */
37 retries = MBOX_RETRY_COUNT;
38 do {
39 data = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
40 if (data & BIT_ULL(MBOX_BUSY_BIT)) {
41 ret = -EBUSY;
42 continue;
43 }
44 ret = 0;
45 break;
46 } while (--retries);
47
48 if (ret)
49 goto unlock_mbox;
50
51 if (cmd_id == MBOX_CMD_WORKLOAD_TYPE_WRITE)
52 writel(cmd_data, (void __iomem *) ((proc_priv->mmio_base + MBOX_OFFSET_DATA)));
53
54 /* Write command register */
55 data = BIT_ULL(MBOX_BUSY_BIT) | cmd_id;
56 writel(data, (void __iomem *) ((proc_priv->mmio_base + MBOX_OFFSET_INTERFACE)));
57
58 /* Poll for rb bit == 0 */
59 retries = MBOX_RETRY_COUNT;
60 do {
61 data = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_INTERFACE));
62 if (data & BIT_ULL(MBOX_BUSY_BIT)) {
63 ret = -EBUSY;
64 continue;
65 }
66
67 if (data) {
68 ret = -ENXIO;
69 goto unlock_mbox;
70 }
71
72 ret = 0;
73
74 if (!cmd_resp)
75 break;
76
77 if (cmd_id == MBOX_CMD_WORKLOAD_TYPE_READ)
78 *cmd_resp = readl((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_DATA));
79 else
80 *cmd_resp = readq((void __iomem *) (proc_priv->mmio_base + MBOX_OFFSET_DATA));
81
82 break;
83 } while (--retries);
84
85 unlock_mbox:
86 mutex_unlock(&mbox_lock);
87 return ret;
88 }
89
processor_thermal_send_mbox_cmd(struct pci_dev * pdev,u16 cmd_id,u32 cmd_data,u64 * cmd_resp)90 int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u64 *cmd_resp)
91 {
92 return send_mbox_cmd(pdev, cmd_id, cmd_data, cmd_resp);
93 }
94 EXPORT_SYMBOL_GPL(processor_thermal_send_mbox_cmd);
95
96 /* List of workload types */
97 static const char * const workload_types[] = {
98 "none",
99 "idle",
100 "semi_active",
101 "bursty",
102 "sustained",
103 "battery_life",
104 NULL
105 };
106
107
workload_available_types_show(struct device * dev,struct device_attribute * attr,char * buf)108 static ssize_t workload_available_types_show(struct device *dev,
109 struct device_attribute *attr,
110 char *buf)
111 {
112 int i = 0;
113 int ret = 0;
114
115 while (workload_types[i] != NULL)
116 ret += sprintf(&buf[ret], "%s ", workload_types[i++]);
117
118 ret += sprintf(&buf[ret], "\n");
119
120 return ret;
121 }
122
123 static DEVICE_ATTR_RO(workload_available_types);
124
workload_type_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)125 static ssize_t workload_type_store(struct device *dev,
126 struct device_attribute *attr,
127 const char *buf, size_t count)
128 {
129 struct pci_dev *pdev = to_pci_dev(dev);
130 char str_preference[15];
131 u32 data = 0;
132 ssize_t ret;
133
134 ret = sscanf(buf, "%14s", str_preference);
135 if (ret != 1)
136 return -EINVAL;
137
138 ret = match_string(workload_types, -1, str_preference);
139 if (ret < 0)
140 return ret;
141
142 ret &= 0xff;
143
144 if (ret)
145 data = BIT(MBOX_DATA_BIT_VALID) | BIT(MBOX_DATA_BIT_AC_DC);
146
147 data |= ret;
148
149 ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_WRITE, data, NULL);
150 if (ret)
151 return false;
152
153 return count;
154 }
155
workload_type_show(struct device * dev,struct device_attribute * attr,char * buf)156 static ssize_t workload_type_show(struct device *dev,
157 struct device_attribute *attr,
158 char *buf)
159 {
160 struct pci_dev *pdev = to_pci_dev(dev);
161 u64 cmd_resp;
162 int ret;
163
164 ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, 0, &cmd_resp);
165 if (ret)
166 return false;
167
168 cmd_resp &= 0xff;
169
170 if (cmd_resp > ARRAY_SIZE(workload_types) - 1)
171 return -EINVAL;
172
173 return sprintf(buf, "%s\n", workload_types[cmd_resp]);
174 }
175
176 static DEVICE_ATTR_RW(workload_type);
177
178 static struct attribute *workload_req_attrs[] = {
179 &dev_attr_workload_available_types.attr,
180 &dev_attr_workload_type.attr,
181 NULL
182 };
183
184 static const struct attribute_group workload_req_attribute_group = {
185 .attrs = workload_req_attrs,
186 .name = "workload_request"
187 };
188
189
190
191 static bool workload_req_created;
192
proc_thermal_mbox_add(struct pci_dev * pdev,struct proc_thermal_device * proc_priv)193 int proc_thermal_mbox_add(struct pci_dev *pdev, struct proc_thermal_device *proc_priv)
194 {
195 u64 cmd_resp;
196 int ret;
197
198 /* Check if there is a mailbox support, if fails return success */
199 ret = send_mbox_cmd(pdev, MBOX_CMD_WORKLOAD_TYPE_READ, 0, &cmd_resp);
200 if (ret)
201 return 0;
202
203 ret = sysfs_create_group(&pdev->dev.kobj, &workload_req_attribute_group);
204 if (ret)
205 return ret;
206
207 workload_req_created = true;
208
209 return 0;
210 }
211 EXPORT_SYMBOL_GPL(proc_thermal_mbox_add);
212
proc_thermal_mbox_remove(struct pci_dev * pdev)213 void proc_thermal_mbox_remove(struct pci_dev *pdev)
214 {
215 if (workload_req_created)
216 sysfs_remove_group(&pdev->dev.kobj, &workload_req_attribute_group);
217
218 workload_req_created = false;
219
220 }
221 EXPORT_SYMBOL_GPL(proc_thermal_mbox_remove);
222
223 MODULE_LICENSE("GPL v2");
224