1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * OpenRISC fault.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/extable.h>
17 #include <linux/sched/signal.h>
18 #include <linux/perf_event.h>
19
20 #include <linux/uaccess.h>
21 #include <asm/mmu_context.h>
22 #include <asm/siginfo.h>
23 #include <asm/signal.h>
24
25 #define NUM_TLB_ENTRIES 64
26 #define TLB_OFFSET(add) (((add) >> PAGE_SHIFT) & (NUM_TLB_ENTRIES-1))
27
28 /* __PHX__ :: - check the vmalloc_fault in do_page_fault()
29 * - also look into include/asm/mmu_context.h
30 */
31 volatile pgd_t *current_pgd[NR_CPUS];
32
33 extern void __noreturn die(char *, struct pt_regs *, long);
34
35 /*
36 * This routine handles page faults. It determines the address,
37 * and the problem, and then passes it off to one of the appropriate
38 * routines.
39 *
40 * If this routine detects a bad access, it returns 1, otherwise it
41 * returns 0.
42 */
43
do_page_fault(struct pt_regs * regs,unsigned long address,unsigned long vector,int write_acc)44 asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
45 unsigned long vector, int write_acc)
46 {
47 struct task_struct *tsk;
48 struct mm_struct *mm;
49 struct vm_area_struct *vma;
50 int si_code;
51 vm_fault_t fault;
52 unsigned int flags = FAULT_FLAG_DEFAULT;
53
54 tsk = current;
55
56 /*
57 * We fault-in kernel-space virtual memory on-demand. The
58 * 'reference' page table is init_mm.pgd.
59 *
60 * NOTE! We MUST NOT take any locks for this case. We may
61 * be in an interrupt or a critical region, and should
62 * only copy the information from the master page table,
63 * nothing more.
64 *
65 * NOTE2: This is done so that, when updating the vmalloc
66 * mappings we don't have to walk all processes pgdirs and
67 * add the high mappings all at once. Instead we do it as they
68 * are used. However vmalloc'ed page entries have the PAGE_GLOBAL
69 * bit set so sometimes the TLB can use a lingering entry.
70 *
71 * This verifies that the fault happens in kernel space
72 * and that the fault was not a protection error.
73 */
74
75 if (address >= VMALLOC_START &&
76 (vector != 0x300 && vector != 0x400) &&
77 !user_mode(regs))
78 goto vmalloc_fault;
79
80 /* If exceptions were enabled, we can reenable them here */
81 if (user_mode(regs)) {
82 /* Exception was in userspace: reenable interrupts */
83 local_irq_enable();
84 flags |= FAULT_FLAG_USER;
85 } else {
86 /* If exception was in a syscall, then IRQ's may have
87 * been enabled or disabled. If they were enabled,
88 * reenable them.
89 */
90 if (regs->sr && (SPR_SR_IEE | SPR_SR_TEE))
91 local_irq_enable();
92 }
93
94 mm = tsk->mm;
95 si_code = SEGV_MAPERR;
96
97 /*
98 * If we're in an interrupt or have no user
99 * context, we must not take the fault..
100 */
101
102 if (in_interrupt() || !mm)
103 goto no_context;
104
105 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
106
107 retry:
108 mmap_read_lock(mm);
109 vma = find_vma(mm, address);
110
111 if (!vma)
112 goto bad_area;
113
114 if (vma->vm_start <= address)
115 goto good_area;
116
117 if (!(vma->vm_flags & VM_GROWSDOWN))
118 goto bad_area;
119
120 if (user_mode(regs)) {
121 /*
122 * accessing the stack below usp is always a bug.
123 * we get page-aligned addresses so we can only check
124 * if we're within a page from usp, but that might be
125 * enough to catch brutal errors at least.
126 */
127 if (address + PAGE_SIZE < regs->sp)
128 goto bad_area;
129 }
130 if (expand_stack(vma, address))
131 goto bad_area;
132
133 /*
134 * Ok, we have a good vm_area for this memory access, so
135 * we can handle it..
136 */
137
138 good_area:
139 si_code = SEGV_ACCERR;
140
141 /* first do some preliminary protection checks */
142
143 if (write_acc) {
144 if (!(vma->vm_flags & VM_WRITE))
145 goto bad_area;
146 flags |= FAULT_FLAG_WRITE;
147 } else {
148 /* not present */
149 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
150 goto bad_area;
151 }
152
153 /* are we trying to execute nonexecutable area */
154 if ((vector == 0x400) && !(vma->vm_page_prot.pgprot & _PAGE_EXEC))
155 goto bad_area;
156
157 /*
158 * If for any reason at all we couldn't handle the fault,
159 * make sure we exit gracefully rather than endlessly redo
160 * the fault.
161 */
162
163 fault = handle_mm_fault(vma, address, flags, regs);
164
165 if (fault_signal_pending(fault, regs)) {
166 if (!user_mode(regs))
167 goto no_context;
168 return;
169 }
170
171 /* The fault is fully completed (including releasing mmap lock) */
172 if (fault & VM_FAULT_COMPLETED)
173 return;
174
175 if (unlikely(fault & VM_FAULT_ERROR)) {
176 if (fault & VM_FAULT_OOM)
177 goto out_of_memory;
178 else if (fault & VM_FAULT_SIGSEGV)
179 goto bad_area;
180 else if (fault & VM_FAULT_SIGBUS)
181 goto do_sigbus;
182 BUG();
183 }
184
185 /*RGD modeled on Cris */
186 if (fault & VM_FAULT_RETRY) {
187 flags |= FAULT_FLAG_TRIED;
188
189 /* No need to mmap_read_unlock(mm) as we would
190 * have already released it in __lock_page_or_retry
191 * in mm/filemap.c.
192 */
193
194 goto retry;
195 }
196
197 mmap_read_unlock(mm);
198 return;
199
200 /*
201 * Something tried to access memory that isn't in our memory map..
202 * Fix it, but check if it's kernel or user first..
203 */
204
205 bad_area:
206 mmap_read_unlock(mm);
207
208 bad_area_nosemaphore:
209
210 /* User mode accesses just cause a SIGSEGV */
211
212 if (user_mode(regs)) {
213 force_sig_fault(SIGSEGV, si_code, (void __user *)address);
214 return;
215 }
216
217 no_context:
218
219 /* Are we prepared to handle this kernel fault?
220 *
221 * (The kernel has valid exception-points in the source
222 * when it acesses user-memory. When it fails in one
223 * of those points, we find it in a table and do a jump
224 * to some fixup code that loads an appropriate error
225 * code)
226 */
227
228 {
229 const struct exception_table_entry *entry;
230
231 if ((entry = search_exception_tables(regs->pc)) != NULL) {
232 /* Adjust the instruction pointer in the stackframe */
233 regs->pc = entry->fixup;
234 return;
235 }
236 }
237
238 /*
239 * Oops. The kernel tried to access some bad page. We'll have to
240 * terminate things with extreme prejudice.
241 */
242
243 if ((unsigned long)(address) < PAGE_SIZE)
244 printk(KERN_ALERT
245 "Unable to handle kernel NULL pointer dereference");
246 else
247 printk(KERN_ALERT "Unable to handle kernel access");
248 printk(" at virtual address 0x%08lx\n", address);
249
250 die("Oops", regs, write_acc);
251
252 /*
253 * We ran out of memory, or some other thing happened to us that made
254 * us unable to handle the page fault gracefully.
255 */
256
257 out_of_memory:
258 mmap_read_unlock(mm);
259 if (!user_mode(regs))
260 goto no_context;
261 pagefault_out_of_memory();
262 return;
263
264 do_sigbus:
265 mmap_read_unlock(mm);
266
267 /*
268 * Send a sigbus, regardless of whether we were in kernel
269 * or user mode.
270 */
271 force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
272
273 /* Kernel mode? Handle exceptions or die */
274 if (!user_mode(regs))
275 goto no_context;
276 return;
277
278 vmalloc_fault:
279 {
280 /*
281 * Synchronize this task's top level page-table
282 * with the 'reference' page table.
283 *
284 * Use current_pgd instead of tsk->active_mm->pgd
285 * since the latter might be unavailable if this
286 * code is executed in a misfortunately run irq
287 * (like inside schedule() between switch_mm and
288 * switch_to...).
289 */
290
291 int offset = pgd_index(address);
292 pgd_t *pgd, *pgd_k;
293 p4d_t *p4d, *p4d_k;
294 pud_t *pud, *pud_k;
295 pmd_t *pmd, *pmd_k;
296 pte_t *pte_k;
297
298 /*
299 phx_warn("do_page_fault(): vmalloc_fault will not work, "
300 "since current_pgd assign a proper value somewhere\n"
301 "anyhow we don't need this at the moment\n");
302
303 phx_mmu("vmalloc_fault");
304 */
305 pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
306 pgd_k = init_mm.pgd + offset;
307
308 /* Since we're two-level, we don't need to do both
309 * set_pgd and set_pmd (they do the same thing). If
310 * we go three-level at some point, do the right thing
311 * with pgd_present and set_pgd here.
312 *
313 * Also, since the vmalloc area is global, we don't
314 * need to copy individual PTE's, it is enough to
315 * copy the pgd pointer into the pte page of the
316 * root task. If that is there, we'll find our pte if
317 * it exists.
318 */
319
320 p4d = p4d_offset(pgd, address);
321 p4d_k = p4d_offset(pgd_k, address);
322 if (!p4d_present(*p4d_k))
323 goto no_context;
324
325 pud = pud_offset(p4d, address);
326 pud_k = pud_offset(p4d_k, address);
327 if (!pud_present(*pud_k))
328 goto no_context;
329
330 pmd = pmd_offset(pud, address);
331 pmd_k = pmd_offset(pud_k, address);
332
333 if (!pmd_present(*pmd_k))
334 goto bad_area_nosemaphore;
335
336 set_pmd(pmd, *pmd_k);
337
338 /* Make sure the actual PTE exists as well to
339 * catch kernel vmalloc-area accesses to non-mapped
340 * addresses. If we don't do this, this will just
341 * silently loop forever.
342 */
343
344 pte_k = pte_offset_kernel(pmd_k, address);
345 if (!pte_present(*pte_k))
346 goto no_context;
347
348 return;
349 }
350 }
351