1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/sched.h>
6 #include <asm/sbi.h>
7 #include <asm/mmu_context.h>
8 #include <asm/tlbflush.h>
9
flush_tlb_all(void)10 void flush_tlb_all(void)
11 {
12 sbi_remote_sfence_vma(NULL, 0, -1);
13 }
14
__sbi_tlb_flush_range(struct mm_struct * mm,unsigned long start,unsigned long size,unsigned long stride)15 static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
16 unsigned long size, unsigned long stride)
17 {
18 struct cpumask *pmask = &mm->context.tlb_stale_mask;
19 struct cpumask *cmask = mm_cpumask(mm);
20 unsigned int cpuid;
21 bool broadcast;
22
23 if (cpumask_empty(cmask))
24 return;
25
26 cpuid = get_cpu();
27 /* check if the tlbflush needs to be sent to other CPUs */
28 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
29 if (static_branch_unlikely(&use_asid_allocator)) {
30 unsigned long asid = atomic_long_read(&mm->context.id);
31
32 /*
33 * TLB will be immediately flushed on harts concurrently
34 * executing this MM context. TLB flush on other harts
35 * is deferred until this MM context migrates there.
36 */
37 cpumask_setall(pmask);
38 cpumask_clear_cpu(cpuid, pmask);
39 cpumask_andnot(pmask, pmask, cmask);
40
41 if (broadcast) {
42 sbi_remote_sfence_vma_asid(cmask, start, size, asid);
43 } else if (size <= stride) {
44 local_flush_tlb_page_asid(start, asid);
45 } else {
46 local_flush_tlb_all_asid(asid);
47 }
48 } else {
49 if (broadcast) {
50 sbi_remote_sfence_vma(cmask, start, size);
51 } else if (size <= stride) {
52 local_flush_tlb_page(start);
53 } else {
54 local_flush_tlb_all();
55 }
56 }
57
58 put_cpu();
59 }
60
flush_tlb_mm(struct mm_struct * mm)61 void flush_tlb_mm(struct mm_struct *mm)
62 {
63 __sbi_tlb_flush_range(mm, 0, -1, PAGE_SIZE);
64 }
65
flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)66 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
67 {
68 __sbi_tlb_flush_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
69 }
70
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)71 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
72 unsigned long end)
73 {
74 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PAGE_SIZE);
75 }
76 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
flush_pmd_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)77 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
78 unsigned long end)
79 {
80 __sbi_tlb_flush_range(vma->vm_mm, start, end - start, PMD_SIZE);
81 }
82 #endif
83