1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Author: Huacai Chen <chenhuacai@loongson.cn>
4 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
5 *
6 * Derived from MIPS:
7 * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
8 * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
10 * Copyright (C) 2004 Thiemo Seufer
11 * Copyright (C) 2013 Imagination Technologies Ltd.
12 */
13 #include <linux/cpu.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/hw_breakpoint.h>
22 #include <linux/mm.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/export.h>
26 #include <linux/ptrace.h>
27 #include <linux/mman.h>
28 #include <linux/personality.h>
29 #include <linux/sys.h>
30 #include <linux/completion.h>
31 #include <linux/kallsyms.h>
32 #include <linux/random.h>
33 #include <linux/prctl.h>
34 #include <linux/nmi.h>
35
36 #include <asm/asm.h>
37 #include <asm/bootinfo.h>
38 #include <asm/cpu.h>
39 #include <asm/elf.h>
40 #include <asm/fpu.h>
41 #include <asm/io.h>
42 #include <asm/irq.h>
43 #include <asm/irq_regs.h>
44 #include <asm/loongarch.h>
45 #include <asm/pgtable.h>
46 #include <asm/processor.h>
47 #include <asm/reg.h>
48 #include <asm/unwind.h>
49 #include <asm/vdso.h>
50
51 #ifdef CONFIG_STACKPROTECTOR
52 #include <linux/stackprotector.h>
53 unsigned long __stack_chk_guard __read_mostly;
54 EXPORT_SYMBOL(__stack_chk_guard);
55 #endif
56
57 /*
58 * Idle related variables and functions
59 */
60
61 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
62 EXPORT_SYMBOL(boot_option_idle_override);
63
64 #ifdef CONFIG_HOTPLUG_CPU
arch_cpu_idle_dead(void)65 void arch_cpu_idle_dead(void)
66 {
67 play_dead();
68 }
69 #endif
70
71 asmlinkage void ret_from_fork(void);
72 asmlinkage void ret_from_kernel_thread(void);
73
start_thread(struct pt_regs * regs,unsigned long pc,unsigned long sp)74 void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
75 {
76 unsigned long crmd;
77 unsigned long prmd;
78 unsigned long euen;
79
80 /* New thread loses kernel privileges. */
81 crmd = regs->csr_crmd & ~(PLV_MASK);
82 crmd |= PLV_USER;
83 regs->csr_crmd = crmd;
84
85 prmd = regs->csr_prmd & ~(PLV_MASK);
86 prmd |= PLV_USER;
87 regs->csr_prmd = prmd;
88
89 euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
90 regs->csr_euen = euen;
91 lose_fpu(0);
92
93 clear_thread_flag(TIF_LSX_CTX_LIVE);
94 clear_thread_flag(TIF_LASX_CTX_LIVE);
95 clear_used_math();
96 regs->csr_era = pc;
97 regs->regs[3] = sp;
98 }
99
flush_thread(void)100 void flush_thread(void)
101 {
102 flush_ptrace_hw_breakpoint(current);
103 }
104
exit_thread(struct task_struct * tsk)105 void exit_thread(struct task_struct *tsk)
106 {
107 }
108
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)109 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
110 {
111 /*
112 * Save any process state which is live in hardware registers to the
113 * parent context prior to duplication. This prevents the new child
114 * state becoming stale if the parent is preempted before copy_thread()
115 * gets a chance to save the parent's live hardware registers to the
116 * child context.
117 */
118 preempt_disable();
119
120 if (is_fpu_owner())
121 save_fp(current);
122
123 preempt_enable();
124
125 if (used_math())
126 memcpy(dst, src, sizeof(struct task_struct));
127 else
128 memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
129
130 return 0;
131 }
132
133 /*
134 * Copy architecture-specific thread state
135 */
copy_thread(struct task_struct * p,const struct kernel_clone_args * args)136 int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
137 {
138 unsigned long childksp;
139 unsigned long tls = args->tls;
140 unsigned long usp = args->stack;
141 unsigned long clone_flags = args->flags;
142 struct pt_regs *childregs, *regs = current_pt_regs();
143
144 childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
145
146 /* set up new TSS. */
147 childregs = (struct pt_regs *) childksp - 1;
148 /* Put the stack after the struct pt_regs. */
149 childksp = (unsigned long) childregs;
150 p->thread.sched_cfa = 0;
151 p->thread.csr_euen = 0;
152 p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
153 p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
154 p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
155 if (unlikely(args->fn)) {
156 /* kernel thread */
157 p->thread.reg03 = childksp;
158 p->thread.reg23 = (unsigned long)args->fn;
159 p->thread.reg24 = (unsigned long)args->fn_arg;
160 p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
161 p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
162 memset(childregs, 0, sizeof(struct pt_regs));
163 childregs->csr_euen = p->thread.csr_euen;
164 childregs->csr_crmd = p->thread.csr_crmd;
165 childregs->csr_prmd = p->thread.csr_prmd;
166 childregs->csr_ecfg = p->thread.csr_ecfg;
167 goto out;
168 }
169
170 /* user thread */
171 *childregs = *regs;
172 childregs->regs[4] = 0; /* Child gets zero as return value */
173 if (usp)
174 childregs->regs[3] = usp;
175
176 p->thread.reg03 = (unsigned long) childregs;
177 p->thread.reg01 = (unsigned long) ret_from_fork;
178 p->thread.sched_ra = (unsigned long) ret_from_fork;
179
180 /*
181 * New tasks lose permission to use the fpu. This accelerates context
182 * switching for most programs since they don't use the fpu.
183 */
184 childregs->csr_euen = 0;
185
186 if (clone_flags & CLONE_SETTLS)
187 childregs->regs[2] = tls;
188
189 out:
190 ptrace_hw_copy_thread(p);
191 clear_tsk_thread_flag(p, TIF_USEDFPU);
192 clear_tsk_thread_flag(p, TIF_USEDSIMD);
193 clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
194 clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
195
196 return 0;
197 }
198
__get_wchan(struct task_struct * task)199 unsigned long __get_wchan(struct task_struct *task)
200 {
201 unsigned long pc = 0;
202 struct unwind_state state;
203
204 if (!try_get_task_stack(task))
205 return 0;
206
207 for (unwind_start(&state, task, NULL);
208 !unwind_done(&state); unwind_next_frame(&state)) {
209 pc = unwind_get_return_address(&state);
210 if (!pc)
211 break;
212 if (in_sched_functions(pc))
213 continue;
214 break;
215 }
216
217 put_task_stack(task);
218
219 return pc;
220 }
221
in_irq_stack(unsigned long stack,struct stack_info * info)222 bool in_irq_stack(unsigned long stack, struct stack_info *info)
223 {
224 unsigned long nextsp;
225 unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
226 unsigned long end = begin + IRQ_STACK_START;
227
228 if (stack < begin || stack >= end)
229 return false;
230
231 nextsp = *(unsigned long *)end;
232 if (nextsp & (SZREG - 1))
233 return false;
234
235 info->begin = begin;
236 info->end = end;
237 info->next_sp = nextsp;
238 info->type = STACK_TYPE_IRQ;
239
240 return true;
241 }
242
in_task_stack(unsigned long stack,struct task_struct * task,struct stack_info * info)243 bool in_task_stack(unsigned long stack, struct task_struct *task,
244 struct stack_info *info)
245 {
246 unsigned long begin = (unsigned long)task_stack_page(task);
247 unsigned long end = begin + THREAD_SIZE;
248
249 if (stack < begin || stack >= end)
250 return false;
251
252 info->begin = begin;
253 info->end = end;
254 info->next_sp = 0;
255 info->type = STACK_TYPE_TASK;
256
257 return true;
258 }
259
get_stack_info(unsigned long stack,struct task_struct * task,struct stack_info * info)260 int get_stack_info(unsigned long stack, struct task_struct *task,
261 struct stack_info *info)
262 {
263 task = task ? : current;
264
265 if (!stack || stack & (SZREG - 1))
266 goto unknown;
267
268 if (in_task_stack(stack, task, info))
269 return 0;
270
271 if (task != current)
272 goto unknown;
273
274 if (in_irq_stack(stack, info))
275 return 0;
276
277 unknown:
278 info->type = STACK_TYPE_UNKNOWN;
279 return -EINVAL;
280 }
281
stack_top(void)282 unsigned long stack_top(void)
283 {
284 unsigned long top = TASK_SIZE & PAGE_MASK;
285
286 /* Space for the VDSO & data page */
287 top -= PAGE_ALIGN(current->thread.vdso->size);
288 top -= PAGE_SIZE;
289
290 /* Space to randomize the VDSO base */
291 if (current->flags & PF_RANDOMIZE)
292 top -= VDSO_RANDOMIZE_SIZE;
293
294 return top;
295 }
296
297 /*
298 * Don't forget that the stack pointer must be aligned on a 8 bytes
299 * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
300 */
arch_align_stack(unsigned long sp)301 unsigned long arch_align_stack(unsigned long sp)
302 {
303 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
304 sp -= get_random_u32_below(PAGE_SIZE);
305
306 return sp & STACK_ALIGN;
307 }
308
309 static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
310 static struct cpumask backtrace_csd_busy;
311
handle_backtrace(void * info)312 static void handle_backtrace(void *info)
313 {
314 nmi_cpu_backtrace(get_irq_regs());
315 cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
316 }
317
raise_backtrace(cpumask_t * mask)318 static void raise_backtrace(cpumask_t *mask)
319 {
320 call_single_data_t *csd;
321 int cpu;
322
323 for_each_cpu(cpu, mask) {
324 /*
325 * If we previously sent an IPI to the target CPU & it hasn't
326 * cleared its bit in the busy cpumask then it didn't handle
327 * our previous IPI & it's not safe for us to reuse the
328 * call_single_data_t.
329 */
330 if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
331 pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
332 cpu);
333 continue;
334 }
335
336 csd = &per_cpu(backtrace_csd, cpu);
337 csd->func = handle_backtrace;
338 smp_call_function_single_async(cpu, csd);
339 }
340 }
341
arch_trigger_cpumask_backtrace(const cpumask_t * mask,bool exclude_self)342 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
343 {
344 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
345 }
346
347 #ifdef CONFIG_64BIT
loongarch_dump_regs64(u64 * uregs,const struct pt_regs * regs)348 void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
349 {
350 unsigned int i;
351
352 for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
353 uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
354 }
355
356 uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
357 uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
358 uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
359 uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
360 uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
361 uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
362 uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
363 uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
364 }
365 #endif /* CONFIG_64BIT */
366