1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 *
5 * Derived from MIPS:
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8 */
9 #ifndef _ASM_PGTABLE_H
10 #define _ASM_PGTABLE_H
11
12 #include <linux/compiler.h>
13 #include <asm/addrspace.h>
14 #include <asm/page.h>
15 #include <asm/pgtable-bits.h>
16
17 #if CONFIG_PGTABLE_LEVELS == 2
18 #include <asm-generic/pgtable-nopmd.h>
19 #elif CONFIG_PGTABLE_LEVELS == 3
20 #include <asm-generic/pgtable-nopud.h>
21 #else
22 #include <asm-generic/pgtable-nop4d.h>
23 #endif
24
25 #if CONFIG_PGTABLE_LEVELS == 2
26 #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
27 #elif CONFIG_PGTABLE_LEVELS == 3
28 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
29 #define PMD_SIZE (1UL << PMD_SHIFT)
30 #define PMD_MASK (~(PMD_SIZE-1))
31 #define PGDIR_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
32 #elif CONFIG_PGTABLE_LEVELS == 4
33 #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3))
34 #define PMD_SIZE (1UL << PMD_SHIFT)
35 #define PMD_MASK (~(PMD_SIZE-1))
36 #define PUD_SHIFT (PMD_SHIFT + (PAGE_SHIFT - 3))
37 #define PUD_SIZE (1UL << PUD_SHIFT)
38 #define PUD_MASK (~(PUD_SIZE-1))
39 #define PGDIR_SHIFT (PUD_SHIFT + (PAGE_SHIFT - 3))
40 #endif
41
42 #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
43 #define PGDIR_MASK (~(PGDIR_SIZE-1))
44
45 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT - 3))
46
47 #define PTRS_PER_PGD (PAGE_SIZE >> 3)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 #define PTRS_PER_PUD (PAGE_SIZE >> 3)
50 #endif
51 #if CONFIG_PGTABLE_LEVELS > 2
52 #define PTRS_PER_PMD (PAGE_SIZE >> 3)
53 #endif
54 #define PTRS_PER_PTE (PAGE_SIZE >> 3)
55
56 #define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
57
58 #ifndef __ASSEMBLY__
59
60 #include <linux/mm_types.h>
61 #include <linux/mmzone.h>
62 #include <asm/fixmap.h>
63 #include <asm/sparsemem.h>
64
65 struct mm_struct;
66 struct vm_area_struct;
67
68 /*
69 * ZERO_PAGE is a global shared page that is always zero; used
70 * for zero-mapped memory areas etc..
71 */
72
73 extern unsigned long empty_zero_page;
74 extern unsigned long zero_page_mask;
75
76 #define ZERO_PAGE(vaddr) \
77 (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
78 #define __HAVE_COLOR_ZERO_PAGE
79
80 /*
81 * TLB refill handlers may also map the vmalloc area into xkvrange.
82 * Avoid the first couple of pages so NULL pointer dereferences will
83 * still reliably trap.
84 */
85 #define MODULES_VADDR (vm_map_base + PCI_IOSIZE + (2 * PAGE_SIZE))
86 #define MODULES_END (MODULES_VADDR + SZ_256M)
87
88 #define VMALLOC_START MODULES_END
89 #define VMALLOC_END \
90 (vm_map_base + \
91 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE)
92
93 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
94 #define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
95
96 #define pte_ERROR(e) \
97 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
98 #ifndef __PAGETABLE_PMD_FOLDED
99 #define pmd_ERROR(e) \
100 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
101 #endif
102 #ifndef __PAGETABLE_PUD_FOLDED
103 #define pud_ERROR(e) \
104 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
105 #endif
106 #define pgd_ERROR(e) \
107 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
108
109 extern pte_t invalid_pte_table[PTRS_PER_PTE];
110
111 #ifndef __PAGETABLE_PUD_FOLDED
112
113 typedef struct { unsigned long pud; } pud_t;
114 #define pud_val(x) ((x).pud)
115 #define __pud(x) ((pud_t) { (x) })
116
117 extern pud_t invalid_pud_table[PTRS_PER_PUD];
118
119 /*
120 * Empty pgd/p4d entries point to the invalid_pud_table.
121 */
p4d_none(p4d_t p4d)122 static inline int p4d_none(p4d_t p4d)
123 {
124 return p4d_val(p4d) == (unsigned long)invalid_pud_table;
125 }
126
p4d_bad(p4d_t p4d)127 static inline int p4d_bad(p4d_t p4d)
128 {
129 return p4d_val(p4d) & ~PAGE_MASK;
130 }
131
p4d_present(p4d_t p4d)132 static inline int p4d_present(p4d_t p4d)
133 {
134 return p4d_val(p4d) != (unsigned long)invalid_pud_table;
135 }
136
p4d_clear(p4d_t * p4dp)137 static inline void p4d_clear(p4d_t *p4dp)
138 {
139 p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
140 }
141
p4d_pgtable(p4d_t p4d)142 static inline pud_t *p4d_pgtable(p4d_t p4d)
143 {
144 return (pud_t *)p4d_val(p4d);
145 }
146
set_p4d(p4d_t * p4d,p4d_t p4dval)147 static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
148 {
149 *p4d = p4dval;
150 }
151
152 #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
153 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
154
155 #endif
156
157 #ifndef __PAGETABLE_PMD_FOLDED
158
159 typedef struct { unsigned long pmd; } pmd_t;
160 #define pmd_val(x) ((x).pmd)
161 #define __pmd(x) ((pmd_t) { (x) })
162
163 extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
164
165 /*
166 * Empty pud entries point to the invalid_pmd_table.
167 */
pud_none(pud_t pud)168 static inline int pud_none(pud_t pud)
169 {
170 return pud_val(pud) == (unsigned long)invalid_pmd_table;
171 }
172
pud_bad(pud_t pud)173 static inline int pud_bad(pud_t pud)
174 {
175 return pud_val(pud) & ~PAGE_MASK;
176 }
177
pud_present(pud_t pud)178 static inline int pud_present(pud_t pud)
179 {
180 return pud_val(pud) != (unsigned long)invalid_pmd_table;
181 }
182
pud_clear(pud_t * pudp)183 static inline void pud_clear(pud_t *pudp)
184 {
185 pud_val(*pudp) = ((unsigned long)invalid_pmd_table);
186 }
187
pud_pgtable(pud_t pud)188 static inline pmd_t *pud_pgtable(pud_t pud)
189 {
190 return (pmd_t *)pud_val(pud);
191 }
192
193 #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0)
194
195 #define pud_phys(pud) PHYSADDR(pud_val(pud))
196 #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
197
198 #endif
199
200 /*
201 * Empty pmd entries point to the invalid_pte_table.
202 */
pmd_none(pmd_t pmd)203 static inline int pmd_none(pmd_t pmd)
204 {
205 return pmd_val(pmd) == (unsigned long)invalid_pte_table;
206 }
207
pmd_bad(pmd_t pmd)208 static inline int pmd_bad(pmd_t pmd)
209 {
210 return (pmd_val(pmd) & ~PAGE_MASK);
211 }
212
pmd_present(pmd_t pmd)213 static inline int pmd_present(pmd_t pmd)
214 {
215 if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
216 return !!(pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE));
217
218 return pmd_val(pmd) != (unsigned long)invalid_pte_table;
219 }
220
pmd_clear(pmd_t * pmdp)221 static inline void pmd_clear(pmd_t *pmdp)
222 {
223 pmd_val(*pmdp) = ((unsigned long)invalid_pte_table);
224 }
225
226 #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0)
227
228 #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
229
230 #ifndef CONFIG_TRANSPARENT_HUGEPAGE
231 #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
232 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
233
234 #define pmd_page_vaddr(pmd) pmd_val(pmd)
235
236 extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
237 extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd);
238
239 #define pte_page(x) pfn_to_page(pte_pfn(x))
240 #define pte_pfn(x) ((unsigned long)(((x).pte & _PFN_MASK) >> _PFN_SHIFT))
241 #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
242 #define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
243
244 /*
245 * Initialize a new pgd / pud / pmd table with invalid pointers.
246 */
247 extern void pgd_init(void *addr);
248 extern void pud_init(void *addr);
249 extern void pmd_init(void *addr);
250
251 /*
252 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
253 * are !pte_none() && !pte_present().
254 *
255 * Format of swap PTEs:
256 *
257 * 6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
258 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
259 * <--------------------------- offset ---------------------------
260 *
261 * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
262 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
263 * --------------> E <--- type ---> <---------- zeroes ---------->
264 *
265 * E is the exclusive marker that is not stored in swap entries.
266 * The zero'ed bits include _PAGE_PRESENT and _PAGE_PROTNONE.
267 */
mk_swap_pte(unsigned long type,unsigned long offset)268 static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
269 { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
270
271 #define __swp_type(x) (((x).val >> 16) & 0x7f)
272 #define __swp_offset(x) ((x).val >> 24)
273 #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
274 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
275 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
276 #define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
277 #define __swp_entry_to_pmd(x) ((pmd_t) { (x).val | _PAGE_HUGE })
278
pte_swp_exclusive(pte_t pte)279 static inline int pte_swp_exclusive(pte_t pte)
280 {
281 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
282 }
283
pte_swp_mkexclusive(pte_t pte)284 static inline pte_t pte_swp_mkexclusive(pte_t pte)
285 {
286 pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
287 return pte;
288 }
289
pte_swp_clear_exclusive(pte_t pte)290 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
291 {
292 pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
293 return pte;
294 }
295
296 extern void paging_init(void);
297
298 #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
299 #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE))
300 #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
301
set_pte(pte_t * ptep,pte_t pteval)302 static inline void set_pte(pte_t *ptep, pte_t pteval)
303 {
304 *ptep = pteval;
305 if (pte_val(pteval) & _PAGE_GLOBAL) {
306 pte_t *buddy = ptep_buddy(ptep);
307 /*
308 * Make sure the buddy is global too (if it's !none,
309 * it better already be global)
310 */
311 #ifdef CONFIG_SMP
312 /*
313 * For SMP, multiple CPUs can race, so we need to do
314 * this atomically.
315 */
316 unsigned long page_global = _PAGE_GLOBAL;
317 unsigned long tmp;
318
319 __asm__ __volatile__ (
320 "1:" __LL "%[tmp], %[buddy] \n"
321 " bnez %[tmp], 2f \n"
322 " or %[tmp], %[tmp], %[global] \n"
323 __SC "%[tmp], %[buddy] \n"
324 " beqz %[tmp], 1b \n"
325 " nop \n"
326 "2: \n"
327 __WEAK_LLSC_MB
328 : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
329 : [global] "r" (page_global));
330 #else /* !CONFIG_SMP */
331 if (pte_none(*buddy))
332 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
333 #endif /* CONFIG_SMP */
334 }
335 }
336
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)337 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
338 pte_t *ptep, pte_t pteval)
339 {
340 set_pte(ptep, pteval);
341 }
342
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)343 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
344 {
345 /* Preserve global status for the pair */
346 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
347 set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
348 else
349 set_pte_at(mm, addr, ptep, __pte(0));
350 }
351
352 #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
353 #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
354 #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
355
356 extern pgd_t swapper_pg_dir[];
357 extern pgd_t invalid_pg_dir[];
358
359 /*
360 * The following only work if pte_present() is true.
361 * Undefined behaviour if not..
362 */
pte_write(pte_t pte)363 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
pte_young(pte_t pte)364 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
pte_dirty(pte_t pte)365 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
366
pte_mkold(pte_t pte)367 static inline pte_t pte_mkold(pte_t pte)
368 {
369 pte_val(pte) &= ~_PAGE_ACCESSED;
370 return pte;
371 }
372
pte_mkyoung(pte_t pte)373 static inline pte_t pte_mkyoung(pte_t pte)
374 {
375 pte_val(pte) |= _PAGE_ACCESSED;
376 return pte;
377 }
378
pte_mkclean(pte_t pte)379 static inline pte_t pte_mkclean(pte_t pte)
380 {
381 pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
382 return pte;
383 }
384
pte_mkdirty(pte_t pte)385 static inline pte_t pte_mkdirty(pte_t pte)
386 {
387 pte_val(pte) |= _PAGE_MODIFIED;
388 if (pte_val(pte) & _PAGE_WRITE)
389 pte_val(pte) |= _PAGE_DIRTY;
390 return pte;
391 }
392
pte_mkwrite(pte_t pte)393 static inline pte_t pte_mkwrite(pte_t pte)
394 {
395 pte_val(pte) |= _PAGE_WRITE;
396 if (pte_val(pte) & _PAGE_MODIFIED)
397 pte_val(pte) |= _PAGE_DIRTY;
398 return pte;
399 }
400
pte_wrprotect(pte_t pte)401 static inline pte_t pte_wrprotect(pte_t pte)
402 {
403 pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
404 return pte;
405 }
406
pte_huge(pte_t pte)407 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
408
pte_mkhuge(pte_t pte)409 static inline pte_t pte_mkhuge(pte_t pte)
410 {
411 pte_val(pte) |= _PAGE_HUGE;
412 return pte;
413 }
414
415 #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
pte_special(pte_t pte)416 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
pte_mkspecial(pte_t pte)417 static inline pte_t pte_mkspecial(pte_t pte) { pte_val(pte) |= _PAGE_SPECIAL; return pte; }
418 #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
419
420 #define pte_accessible pte_accessible
pte_accessible(struct mm_struct * mm,pte_t a)421 static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
422 {
423 if (pte_val(a) & _PAGE_PRESENT)
424 return true;
425
426 if ((pte_val(a) & _PAGE_PROTNONE) &&
427 atomic_read(&mm->tlb_flush_pending))
428 return true;
429
430 return false;
431 }
432
433 /*
434 * Conversion functions: convert a page and protection to a page entry,
435 * and a page entry and page directory to the page they refer to.
436 */
437 #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
438
pte_modify(pte_t pte,pgprot_t newprot)439 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
440 {
441 return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
442 (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
443 }
444
445 extern void __update_tlb(struct vm_area_struct *vma,
446 unsigned long address, pte_t *ptep);
447
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)448 static inline void update_mmu_cache(struct vm_area_struct *vma,
449 unsigned long address, pte_t *ptep)
450 {
451 __update_tlb(vma, address, ptep);
452 }
453
454 #define __HAVE_ARCH_UPDATE_MMU_TLB
455 #define update_mmu_tlb update_mmu_cache
456
update_mmu_cache_pmd(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)457 static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
458 unsigned long address, pmd_t *pmdp)
459 {
460 __update_tlb(vma, address, (pte_t *)pmdp);
461 }
462
pmd_pfn(pmd_t pmd)463 static inline unsigned long pmd_pfn(pmd_t pmd)
464 {
465 return (pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT;
466 }
467
468 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
469
470 /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
471 #define pmdp_establish generic_pmdp_establish
472
pmd_trans_huge(pmd_t pmd)473 static inline int pmd_trans_huge(pmd_t pmd)
474 {
475 return !!(pmd_val(pmd) & _PAGE_HUGE) && pmd_present(pmd);
476 }
477
pmd_mkhuge(pmd_t pmd)478 static inline pmd_t pmd_mkhuge(pmd_t pmd)
479 {
480 pmd_val(pmd) = (pmd_val(pmd) & ~(_PAGE_GLOBAL)) |
481 ((pmd_val(pmd) & _PAGE_GLOBAL) << (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
482 pmd_val(pmd) |= _PAGE_HUGE;
483
484 return pmd;
485 }
486
487 #define pmd_write pmd_write
pmd_write(pmd_t pmd)488 static inline int pmd_write(pmd_t pmd)
489 {
490 return !!(pmd_val(pmd) & _PAGE_WRITE);
491 }
492
pmd_mkwrite(pmd_t pmd)493 static inline pmd_t pmd_mkwrite(pmd_t pmd)
494 {
495 pmd_val(pmd) |= _PAGE_WRITE;
496 if (pmd_val(pmd) & _PAGE_MODIFIED)
497 pmd_val(pmd) |= _PAGE_DIRTY;
498 return pmd;
499 }
500
pmd_wrprotect(pmd_t pmd)501 static inline pmd_t pmd_wrprotect(pmd_t pmd)
502 {
503 pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
504 return pmd;
505 }
506
pmd_dirty(pmd_t pmd)507 static inline int pmd_dirty(pmd_t pmd)
508 {
509 return !!(pmd_val(pmd) & _PAGE_MODIFIED);
510 }
511
pmd_mkclean(pmd_t pmd)512 static inline pmd_t pmd_mkclean(pmd_t pmd)
513 {
514 pmd_val(pmd) &= ~(_PAGE_DIRTY | _PAGE_MODIFIED);
515 return pmd;
516 }
517
pmd_mkdirty(pmd_t pmd)518 static inline pmd_t pmd_mkdirty(pmd_t pmd)
519 {
520 pmd_val(pmd) |= _PAGE_MODIFIED;
521 if (pmd_val(pmd) & _PAGE_WRITE)
522 pmd_val(pmd) |= _PAGE_DIRTY;
523 return pmd;
524 }
525
526 #define pmd_young pmd_young
pmd_young(pmd_t pmd)527 static inline int pmd_young(pmd_t pmd)
528 {
529 return !!(pmd_val(pmd) & _PAGE_ACCESSED);
530 }
531
pmd_mkold(pmd_t pmd)532 static inline pmd_t pmd_mkold(pmd_t pmd)
533 {
534 pmd_val(pmd) &= ~_PAGE_ACCESSED;
535 return pmd;
536 }
537
pmd_mkyoung(pmd_t pmd)538 static inline pmd_t pmd_mkyoung(pmd_t pmd)
539 {
540 pmd_val(pmd) |= _PAGE_ACCESSED;
541 return pmd;
542 }
543
pmd_page(pmd_t pmd)544 static inline struct page *pmd_page(pmd_t pmd)
545 {
546 if (pmd_trans_huge(pmd))
547 return pfn_to_page(pmd_pfn(pmd));
548
549 return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
550 }
551
pmd_modify(pmd_t pmd,pgprot_t newprot)552 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
553 {
554 pmd_val(pmd) = (pmd_val(pmd) & _HPAGE_CHG_MASK) |
555 (pgprot_val(newprot) & ~_HPAGE_CHG_MASK);
556 return pmd;
557 }
558
pmd_mkinvalid(pmd_t pmd)559 static inline pmd_t pmd_mkinvalid(pmd_t pmd)
560 {
561 pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY | _PAGE_PROTNONE);
562
563 return pmd;
564 }
565
566 /*
567 * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
568 * different prototype.
569 */
570 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
pmdp_huge_get_and_clear(struct mm_struct * mm,unsigned long address,pmd_t * pmdp)571 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
572 unsigned long address, pmd_t *pmdp)
573 {
574 pmd_t old = *pmdp;
575
576 pmd_clear(pmdp);
577
578 return old;
579 }
580
581 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
582
583 #ifdef CONFIG_NUMA_BALANCING
pte_protnone(pte_t pte)584 static inline long pte_protnone(pte_t pte)
585 {
586 return (pte_val(pte) & _PAGE_PROTNONE);
587 }
588
pmd_protnone(pmd_t pmd)589 static inline long pmd_protnone(pmd_t pmd)
590 {
591 return (pmd_val(pmd) & _PAGE_PROTNONE);
592 }
593 #endif /* CONFIG_NUMA_BALANCING */
594
595 /*
596 * We provide our own get_unmapped area to cope with the virtual aliasing
597 * constraints placed on us by the cache architecture.
598 */
599 #define HAVE_ARCH_UNMAPPED_AREA
600 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
601
602 #endif /* !__ASSEMBLY__ */
603
604 #endif /* _ASM_PGTABLE_H */
605