1  /*
2   * This file is subject to the terms and conditions of the GNU General Public
3   * License.  See the file "COPYING" in the main directory of this archive
4   * for more details.
5   *
6   * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7   * Copyright (C) 1999 SuSE GmbH Nuernberg
8   * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9   *
10   * Cache and TLB management
11   *
12   */
13  
14  #include <linux/init.h>
15  #include <linux/kernel.h>
16  #include <linux/mm.h>
17  #include <linux/module.h>
18  #include <linux/seq_file.h>
19  #include <linux/pagemap.h>
20  #include <linux/sched.h>
21  #include <linux/sched/mm.h>
22  #include <asm/pdc.h>
23  #include <asm/cache.h>
24  #include <asm/cacheflush.h>
25  #include <asm/tlbflush.h>
26  #include <asm/page.h>
27  #include <asm/processor.h>
28  #include <asm/sections.h>
29  #include <asm/shmparam.h>
30  
31  int split_tlb __ro_after_init;
32  int dcache_stride __ro_after_init;
33  int icache_stride __ro_after_init;
34  EXPORT_SYMBOL(dcache_stride);
35  
36  void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
37  EXPORT_SYMBOL(flush_dcache_page_asm);
38  void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
39  void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40  
41  
42  /* On some machines (i.e., ones with the Merced bus), there can be
43   * only a single PxTLB broadcast at a time; this must be guaranteed
44   * by software. We need a spinlock around all TLB flushes to ensure
45   * this.
46   */
47  DEFINE_SPINLOCK(pa_tlb_flush_lock);
48  
49  /* Swapper page setup lock. */
50  DEFINE_SPINLOCK(pa_swapper_pg_lock);
51  
52  #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53  int pa_serialize_tlb_flushes __ro_after_init;
54  #endif
55  
56  struct pdc_cache_info cache_info __ro_after_init;
57  #ifndef CONFIG_PA20
58  static struct pdc_btlb_info btlb_info __ro_after_init;
59  #endif
60  
61  #ifdef CONFIG_SMP
62  void
flush_data_cache(void)63  flush_data_cache(void)
64  {
65  	on_each_cpu(flush_data_cache_local, NULL, 1);
66  }
67  void
flush_instruction_cache(void)68  flush_instruction_cache(void)
69  {
70  	on_each_cpu(flush_instruction_cache_local, NULL, 1);
71  }
72  #endif
73  
74  void
flush_cache_all_local(void)75  flush_cache_all_local(void)
76  {
77  	flush_instruction_cache_local(NULL);
78  	flush_data_cache_local(NULL);
79  }
80  EXPORT_SYMBOL(flush_cache_all_local);
81  
82  /* Virtual address of pfn.  */
83  #define pfn_va(pfn)	__va(PFN_PHYS(pfn))
84  
85  void
__update_cache(pte_t pte)86  __update_cache(pte_t pte)
87  {
88  	unsigned long pfn = pte_pfn(pte);
89  	struct page *page;
90  
91  	/* We don't have pte special.  As a result, we can be called with
92  	   an invalid pfn and we don't need to flush the kernel dcache page.
93  	   This occurs with FireGL card in C8000.  */
94  	if (!pfn_valid(pfn))
95  		return;
96  
97  	page = pfn_to_page(pfn);
98  	if (page_mapping_file(page) &&
99  	    test_bit(PG_dcache_dirty, &page->flags)) {
100  		flush_kernel_dcache_page_addr(pfn_va(pfn));
101  		clear_bit(PG_dcache_dirty, &page->flags);
102  	} else if (parisc_requires_coherency())
103  		flush_kernel_dcache_page_addr(pfn_va(pfn));
104  }
105  
106  void
show_cache_info(struct seq_file * m)107  show_cache_info(struct seq_file *m)
108  {
109  	char buf[32];
110  
111  	seq_printf(m, "I-cache\t\t: %ld KB\n",
112  		cache_info.ic_size/1024 );
113  	if (cache_info.dc_loop != 1)
114  		snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
115  	seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
116  		cache_info.dc_size/1024,
117  		(cache_info.dc_conf.cc_wt ? "WT":"WB"),
118  		(cache_info.dc_conf.cc_sh ? ", shared I/D":""),
119  		((cache_info.dc_loop == 1) ? "direct mapped" : buf));
120  	seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
121  		cache_info.it_size,
122  		cache_info.dt_size,
123  		cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
124  	);
125  
126  #ifndef CONFIG_PA20
127  	/* BTLB - Block TLB */
128  	if (btlb_info.max_size==0) {
129  		seq_printf(m, "BTLB\t\t: not supported\n" );
130  	} else {
131  		seq_printf(m,
132  		"BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
133  		"BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
134  		"BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
135  		btlb_info.max_size, (int)4096,
136  		btlb_info.max_size>>8,
137  		btlb_info.fixed_range_info.num_i,
138  		btlb_info.fixed_range_info.num_d,
139  		btlb_info.fixed_range_info.num_comb,
140  		btlb_info.variable_range_info.num_i,
141  		btlb_info.variable_range_info.num_d,
142  		btlb_info.variable_range_info.num_comb
143  		);
144  	}
145  #endif
146  }
147  
148  void __init
parisc_cache_init(void)149  parisc_cache_init(void)
150  {
151  	if (pdc_cache_info(&cache_info) < 0)
152  		panic("parisc_cache_init: pdc_cache_info failed");
153  
154  #if 0
155  	printk("ic_size %lx dc_size %lx it_size %lx\n",
156  		cache_info.ic_size,
157  		cache_info.dc_size,
158  		cache_info.it_size);
159  
160  	printk("DC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
161  		cache_info.dc_base,
162  		cache_info.dc_stride,
163  		cache_info.dc_count,
164  		cache_info.dc_loop);
165  
166  	printk("dc_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
167  		*(unsigned long *) (&cache_info.dc_conf),
168  		cache_info.dc_conf.cc_alias,
169  		cache_info.dc_conf.cc_block,
170  		cache_info.dc_conf.cc_line,
171  		cache_info.dc_conf.cc_shift);
172  	printk("	wt %d sh %d cst %d hv %d\n",
173  		cache_info.dc_conf.cc_wt,
174  		cache_info.dc_conf.cc_sh,
175  		cache_info.dc_conf.cc_cst,
176  		cache_info.dc_conf.cc_hv);
177  
178  	printk("IC  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
179  		cache_info.ic_base,
180  		cache_info.ic_stride,
181  		cache_info.ic_count,
182  		cache_info.ic_loop);
183  
184  	printk("IT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185  		cache_info.it_sp_base,
186  		cache_info.it_sp_stride,
187  		cache_info.it_sp_count,
188  		cache_info.it_loop,
189  		cache_info.it_off_base,
190  		cache_info.it_off_stride,
191  		cache_info.it_off_count);
192  
193  	printk("DT  base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
194  		cache_info.dt_sp_base,
195  		cache_info.dt_sp_stride,
196  		cache_info.dt_sp_count,
197  		cache_info.dt_loop,
198  		cache_info.dt_off_base,
199  		cache_info.dt_off_stride,
200  		cache_info.dt_off_count);
201  
202  	printk("ic_conf = 0x%lx  alias %d blk %d line %d shift %d\n",
203  		*(unsigned long *) (&cache_info.ic_conf),
204  		cache_info.ic_conf.cc_alias,
205  		cache_info.ic_conf.cc_block,
206  		cache_info.ic_conf.cc_line,
207  		cache_info.ic_conf.cc_shift);
208  	printk("	wt %d sh %d cst %d hv %d\n",
209  		cache_info.ic_conf.cc_wt,
210  		cache_info.ic_conf.cc_sh,
211  		cache_info.ic_conf.cc_cst,
212  		cache_info.ic_conf.cc_hv);
213  
214  	printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215  		cache_info.dt_conf.tc_sh,
216  		cache_info.dt_conf.tc_page,
217  		cache_info.dt_conf.tc_cst,
218  		cache_info.dt_conf.tc_aid,
219  		cache_info.dt_conf.tc_sr);
220  
221  	printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
222  		cache_info.it_conf.tc_sh,
223  		cache_info.it_conf.tc_page,
224  		cache_info.it_conf.tc_cst,
225  		cache_info.it_conf.tc_aid,
226  		cache_info.it_conf.tc_sr);
227  #endif
228  
229  	split_tlb = 0;
230  	if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
231  		if (cache_info.dt_conf.tc_sh == 2)
232  			printk(KERN_WARNING "Unexpected TLB configuration. "
233  			"Will flush I/D separately (could be optimized).\n");
234  
235  		split_tlb = 1;
236  	}
237  
238  	/* "New and Improved" version from Jim Hull
239  	 *	(1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
240  	 * The following CAFL_STRIDE is an optimized version, see
241  	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
242  	 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
243  	 */
244  #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
245  	dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
246  	icache_stride = CAFL_STRIDE(cache_info.ic_conf);
247  #undef CAFL_STRIDE
248  
249  #ifndef CONFIG_PA20
250  	if (pdc_btlb_info(&btlb_info) < 0) {
251  		memset(&btlb_info, 0, sizeof btlb_info);
252  	}
253  #endif
254  
255  	if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
256  						PDC_MODEL_NVA_UNSUPPORTED) {
257  		printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
258  #if 0
259  		panic("SMP kernel required to avoid non-equivalent aliasing");
260  #endif
261  	}
262  }
263  
disable_sr_hashing(void)264  void __init disable_sr_hashing(void)
265  {
266  	int srhash_type, retval;
267  	unsigned long space_bits;
268  
269  	switch (boot_cpu_data.cpu_type) {
270  	case pcx: /* We shouldn't get this far.  setup.c should prevent it. */
271  		BUG();
272  		return;
273  
274  	case pcxs:
275  	case pcxt:
276  	case pcxt_:
277  		srhash_type = SRHASH_PCXST;
278  		break;
279  
280  	case pcxl:
281  		srhash_type = SRHASH_PCXL;
282  		break;
283  
284  	case pcxl2: /* pcxl2 doesn't support space register hashing */
285  		return;
286  
287  	default: /* Currently all PA2.0 machines use the same ins. sequence */
288  		srhash_type = SRHASH_PA20;
289  		break;
290  	}
291  
292  	disable_sr_hashing_asm(srhash_type);
293  
294  	retval = pdc_spaceid_bits(&space_bits);
295  	/* If this procedure isn't implemented, don't panic. */
296  	if (retval < 0 && retval != PDC_BAD_OPTION)
297  		panic("pdc_spaceid_bits call failed.\n");
298  	if (space_bits != 0)
299  		panic("SpaceID hashing is still on!\n");
300  }
301  
302  static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)303  __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
304  		   unsigned long physaddr)
305  {
306  	preempt_disable();
307  	flush_dcache_page_asm(physaddr, vmaddr);
308  	if (vma->vm_flags & VM_EXEC)
309  		flush_icache_page_asm(physaddr, vmaddr);
310  	preempt_enable();
311  }
312  
313  static inline void
__purge_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)314  __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
315  		   unsigned long physaddr)
316  {
317  	preempt_disable();
318  	purge_dcache_page_asm(physaddr, vmaddr);
319  	if (vma->vm_flags & VM_EXEC)
320  		flush_icache_page_asm(physaddr, vmaddr);
321  	preempt_enable();
322  }
323  
flush_dcache_page(struct page * page)324  void flush_dcache_page(struct page *page)
325  {
326  	struct address_space *mapping = page_mapping_file(page);
327  	struct vm_area_struct *mpnt;
328  	unsigned long offset;
329  	unsigned long addr, old_addr = 0;
330  	pgoff_t pgoff;
331  
332  	if (mapping && !mapping_mapped(mapping)) {
333  		set_bit(PG_dcache_dirty, &page->flags);
334  		return;
335  	}
336  
337  	flush_kernel_dcache_page_addr(page_address(page));
338  
339  	if (!mapping)
340  		return;
341  
342  	pgoff = page->index;
343  
344  	/* We have carefully arranged in arch_get_unmapped_area() that
345  	 * *any* mappings of a file are always congruently mapped (whether
346  	 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
347  	 * to flush one address here for them all to become coherent */
348  
349  	flush_dcache_mmap_lock(mapping);
350  	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
351  		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
352  		addr = mpnt->vm_start + offset;
353  
354  		/* The TLB is the engine of coherence on parisc: The
355  		 * CPU is entitled to speculate any page with a TLB
356  		 * mapping, so here we kill the mapping then flush the
357  		 * page along a special flush only alias mapping.
358  		 * This guarantees that the page is no-longer in the
359  		 * cache for any process and nor may it be
360  		 * speculatively read in (until the user or kernel
361  		 * specifically accesses it, of course) */
362  
363  		flush_tlb_page(mpnt, addr);
364  		if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
365  				      != (addr & (SHM_COLOUR - 1))) {
366  			__flush_cache_page(mpnt, addr, page_to_phys(page));
367  			if (parisc_requires_coherency() && old_addr)
368  				printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file);
369  			old_addr = addr;
370  		}
371  	}
372  	flush_dcache_mmap_unlock(mapping);
373  }
374  EXPORT_SYMBOL(flush_dcache_page);
375  
376  /* Defined in arch/parisc/kernel/pacache.S */
377  EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
378  EXPORT_SYMBOL(flush_data_cache_local);
379  EXPORT_SYMBOL(flush_kernel_icache_range_asm);
380  
381  #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
382  static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD;
383  
384  #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
385  static unsigned long parisc_tlb_flush_threshold __ro_after_init = ~0UL;
386  
parisc_setup_cache_timing(void)387  void __init parisc_setup_cache_timing(void)
388  {
389  	unsigned long rangetime, alltime;
390  	unsigned long size;
391  	unsigned long threshold;
392  
393  	alltime = mfctl(16);
394  	flush_data_cache();
395  	alltime = mfctl(16) - alltime;
396  
397  	size = (unsigned long)(_end - _text);
398  	rangetime = mfctl(16);
399  	flush_kernel_dcache_range((unsigned long)_text, size);
400  	rangetime = mfctl(16) - rangetime;
401  
402  	printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
403  		alltime, size, rangetime);
404  
405  	threshold = L1_CACHE_ALIGN(size * alltime / rangetime);
406  	if (threshold > cache_info.dc_size)
407  		threshold = cache_info.dc_size;
408  	if (threshold)
409  		parisc_cache_flush_threshold = threshold;
410  	printk(KERN_INFO "Cache flush threshold set to %lu KiB\n",
411  		parisc_cache_flush_threshold/1024);
412  
413  	/* calculate TLB flush threshold */
414  
415  	/* On SMP machines, skip the TLB measure of kernel text which
416  	 * has been mapped as huge pages. */
417  	if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
418  		threshold = max(cache_info.it_size, cache_info.dt_size);
419  		threshold *= PAGE_SIZE;
420  		threshold /= num_online_cpus();
421  		goto set_tlb_threshold;
422  	}
423  
424  	size = (unsigned long)_end - (unsigned long)_text;
425  	rangetime = mfctl(16);
426  	flush_tlb_kernel_range((unsigned long)_text, (unsigned long)_end);
427  	rangetime = mfctl(16) - rangetime;
428  
429  	alltime = mfctl(16);
430  	flush_tlb_all();
431  	alltime = mfctl(16) - alltime;
432  
433  	printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
434  		alltime, size, rangetime);
435  
436  	threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime);
437  	printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n",
438  		threshold/1024);
439  
440  set_tlb_threshold:
441  	if (threshold > FLUSH_TLB_THRESHOLD)
442  		parisc_tlb_flush_threshold = threshold;
443  	else
444  		parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD;
445  
446  	printk(KERN_INFO "TLB flush threshold set to %lu KiB\n",
447  		parisc_tlb_flush_threshold/1024);
448  }
449  
450  extern void purge_kernel_dcache_page_asm(unsigned long);
451  extern void clear_user_page_asm(void *, unsigned long);
452  extern void copy_user_page_asm(void *, void *, unsigned long);
453  
flush_kernel_dcache_page_addr(void * addr)454  void flush_kernel_dcache_page_addr(void *addr)
455  {
456  	unsigned long flags;
457  
458  	flush_kernel_dcache_page_asm(addr);
459  	purge_tlb_start(flags);
460  	pdtlb_kernel(addr);
461  	purge_tlb_end(flags);
462  }
463  EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
464  
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)465  void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
466  	struct page *pg)
467  {
468         /* Copy using kernel mapping.  No coherency is needed (all in
469  	  kunmap) for the `to' page.  However, the `from' page needs to
470  	  be flushed through a mapping equivalent to the user mapping
471  	  before it can be accessed through the kernel mapping. */
472  	preempt_disable();
473  	flush_dcache_page_asm(__pa(vfrom), vaddr);
474  	copy_page_asm(vto, vfrom);
475  	preempt_enable();
476  }
477  EXPORT_SYMBOL(copy_user_page);
478  
479  /* __flush_tlb_range()
480   *
481   * returns 1 if all TLBs were flushed.
482   */
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)483  int __flush_tlb_range(unsigned long sid, unsigned long start,
484  		      unsigned long end)
485  {
486  	unsigned long flags;
487  
488  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
489  	    end - start >= parisc_tlb_flush_threshold) {
490  		flush_tlb_all();
491  		return 1;
492  	}
493  
494  	/* Purge TLB entries for small ranges using the pdtlb and
495  	   pitlb instructions.  These instructions execute locally
496  	   but cause a purge request to be broadcast to other TLBs.  */
497  	while (start < end) {
498  		purge_tlb_start(flags);
499  		mtsp(sid, 1);
500  		pdtlb(start);
501  		pitlb(start);
502  		purge_tlb_end(flags);
503  		start += PAGE_SIZE;
504  	}
505  	return 0;
506  }
507  
cacheflush_h_tmp_function(void * dummy)508  static void cacheflush_h_tmp_function(void *dummy)
509  {
510  	flush_cache_all_local();
511  }
512  
flush_cache_all(void)513  void flush_cache_all(void)
514  {
515  	on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
516  }
517  
mm_total_size(struct mm_struct * mm)518  static inline unsigned long mm_total_size(struct mm_struct *mm)
519  {
520  	struct vm_area_struct *vma;
521  	unsigned long usize = 0;
522  
523  	for (vma = mm->mmap; vma; vma = vma->vm_next)
524  		usize += vma->vm_end - vma->vm_start;
525  	return usize;
526  }
527  
get_ptep(pgd_t * pgd,unsigned long addr)528  static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
529  {
530  	pte_t *ptep = NULL;
531  
532  	if (!pgd_none(*pgd)) {
533  		p4d_t *p4d = p4d_offset(pgd, addr);
534  		if (!p4d_none(*p4d)) {
535  			pud_t *pud = pud_offset(p4d, addr);
536  			if (!pud_none(*pud)) {
537  				pmd_t *pmd = pmd_offset(pud, addr);
538  				if (!pmd_none(*pmd))
539  					ptep = pte_offset_map(pmd, addr);
540  			}
541  		}
542  	}
543  	return ptep;
544  }
545  
flush_cache_pages(struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end)546  static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
547  			      unsigned long start, unsigned long end)
548  {
549  	unsigned long addr, pfn;
550  	pte_t *ptep;
551  
552  	for (addr = start; addr < end; addr += PAGE_SIZE) {
553  		ptep = get_ptep(mm->pgd, addr);
554  		if (ptep) {
555  			pfn = pte_pfn(*ptep);
556  			flush_cache_page(vma, addr, pfn);
557  		}
558  	}
559  }
560  
flush_user_cache_tlb(struct vm_area_struct * vma,unsigned long start,unsigned long end)561  static void flush_user_cache_tlb(struct vm_area_struct *vma,
562  				 unsigned long start, unsigned long end)
563  {
564  	flush_user_dcache_range_asm(start, end);
565  	if (vma->vm_flags & VM_EXEC)
566  		flush_user_icache_range_asm(start, end);
567  	flush_tlb_range(vma, start, end);
568  }
569  
flush_cache_mm(struct mm_struct * mm)570  void flush_cache_mm(struct mm_struct *mm)
571  {
572  	struct vm_area_struct *vma;
573  
574  	/* Flushing the whole cache on each cpu takes forever on
575  	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
576  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
577  	    mm_total_size(mm) >= parisc_cache_flush_threshold) {
578  		if (mm->context)
579  			flush_tlb_all();
580  		flush_cache_all();
581  		return;
582  	}
583  
584  	preempt_disable();
585  	if (mm->context == mfsp(3)) {
586  		for (vma = mm->mmap; vma; vma = vma->vm_next)
587  			flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
588  		preempt_enable();
589  		return;
590  	}
591  
592  	for (vma = mm->mmap; vma; vma = vma->vm_next)
593  		flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
594  	preempt_enable();
595  }
596  
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)597  void flush_cache_range(struct vm_area_struct *vma,
598  		unsigned long start, unsigned long end)
599  {
600  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
601  	    end - start >= parisc_cache_flush_threshold) {
602  		if (vma->vm_mm->context)
603  			flush_tlb_range(vma, start, end);
604  		flush_cache_all();
605  		return;
606  	}
607  
608  	preempt_disable();
609  	if (vma->vm_mm->context == mfsp(3)) {
610  		flush_user_cache_tlb(vma, start, end);
611  		preempt_enable();
612  		return;
613  	}
614  
615  	flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
616  	preempt_enable();
617  }
618  
619  void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)620  flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
621  {
622  	if (pfn_valid(pfn)) {
623  		if (likely(vma->vm_mm->context)) {
624  			flush_tlb_page(vma, vmaddr);
625  			__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
626  		} else {
627  			__purge_cache_page(vma, vmaddr, PFN_PHYS(pfn));
628  		}
629  	}
630  }
631  
flush_kernel_vmap_range(void * vaddr,int size)632  void flush_kernel_vmap_range(void *vaddr, int size)
633  {
634  	unsigned long start = (unsigned long)vaddr;
635  	unsigned long end = start + size;
636  
637  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
638  	    (unsigned long)size >= parisc_cache_flush_threshold) {
639  		flush_tlb_kernel_range(start, end);
640  		flush_data_cache();
641  		return;
642  	}
643  
644  	flush_kernel_dcache_range_asm(start, end);
645  	flush_tlb_kernel_range(start, end);
646  }
647  EXPORT_SYMBOL(flush_kernel_vmap_range);
648  
invalidate_kernel_vmap_range(void * vaddr,int size)649  void invalidate_kernel_vmap_range(void *vaddr, int size)
650  {
651  	unsigned long start = (unsigned long)vaddr;
652  	unsigned long end = start + size;
653  
654  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
655  	    (unsigned long)size >= parisc_cache_flush_threshold) {
656  		flush_tlb_kernel_range(start, end);
657  		flush_data_cache();
658  		return;
659  	}
660  
661  	purge_kernel_dcache_range_asm(start, end);
662  	flush_tlb_kernel_range(start, end);
663  }
664  EXPORT_SYMBOL(invalidate_kernel_vmap_range);
665