1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_MIGRATE_H
3 #define _LINUX_MIGRATE_H
4 
5 #include <linux/mm.h>
6 #include <linux/mempolicy.h>
7 #include <linux/migrate_mode.h>
8 #include <linux/hugetlb.h>
9 
10 typedef struct page *new_page_t(struct page *page, unsigned long private);
11 typedef void free_page_t(struct page *page, unsigned long private);
12 
13 struct migration_target_control;
14 
15 /*
16  * Return values from addresss_space_operations.migratepage():
17  * - negative errno on page migration failure;
18  * - zero on page migration success;
19  */
20 #define MIGRATEPAGE_SUCCESS		0
21 
22 /* Defined in mm/debug.c: */
23 extern const char *migrate_reason_names[MR_TYPES];
24 
25 #ifdef CONFIG_MIGRATION
26 
27 extern void putback_movable_pages(struct list_head *l);
28 extern int migrate_page(struct address_space *mapping,
29 			struct page *newpage, struct page *page,
30 			enum migrate_mode mode);
31 extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
32 		unsigned long private, enum migrate_mode mode, int reason,
33 		unsigned int *ret_succeeded);
34 extern struct page *alloc_migration_target(struct page *page, unsigned long private);
35 extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
36 
37 extern void migrate_page_states(struct page *newpage, struct page *page);
38 extern void migrate_page_copy(struct page *newpage, struct page *page);
39 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
40 				  struct page *newpage, struct page *page);
41 extern int migrate_page_move_mapping(struct address_space *mapping,
42 		struct page *newpage, struct page *page, int extra_count);
43 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
44 void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
45 int folio_migrate_mapping(struct address_space *mapping,
46 		struct folio *newfolio, struct folio *folio, int extra_count);
47 
48 extern bool numa_demotion_enabled;
49 #else
50 
putback_movable_pages(struct list_head * l)51 static inline void putback_movable_pages(struct list_head *l) {}
migrate_pages(struct list_head * l,new_page_t new,free_page_t free,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)52 static inline int migrate_pages(struct list_head *l, new_page_t new,
53 		free_page_t free, unsigned long private, enum migrate_mode mode,
54 		int reason, unsigned int *ret_succeeded)
55 	{ return -ENOSYS; }
alloc_migration_target(struct page * page,unsigned long private)56 static inline struct page *alloc_migration_target(struct page *page,
57 		unsigned long private)
58 	{ return NULL; }
isolate_movable_page(struct page * page,isolate_mode_t mode)59 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode)
60 	{ return -EBUSY; }
61 
migrate_page_states(struct page * newpage,struct page * page)62 static inline void migrate_page_states(struct page *newpage, struct page *page)
63 {
64 }
65 
migrate_page_copy(struct page * newpage,struct page * page)66 static inline void migrate_page_copy(struct page *newpage,
67 				     struct page *page) {}
68 
migrate_huge_page_move_mapping(struct address_space * mapping,struct page * newpage,struct page * page)69 static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
70 				  struct page *newpage, struct page *page)
71 {
72 	return -ENOSYS;
73 }
74 
75 #define numa_demotion_enabled	false
76 #endif /* CONFIG_MIGRATION */
77 
78 #ifdef CONFIG_COMPACTION
79 extern int PageMovable(struct page *page);
80 extern void __SetPageMovable(struct page *page, struct address_space *mapping);
81 extern void __ClearPageMovable(struct page *page);
82 #else
PageMovable(struct page * page)83 static inline int PageMovable(struct page *page) { return 0; }
__SetPageMovable(struct page * page,struct address_space * mapping)84 static inline void __SetPageMovable(struct page *page,
85 				struct address_space *mapping)
86 {
87 }
__ClearPageMovable(struct page * page)88 static inline void __ClearPageMovable(struct page *page)
89 {
90 }
91 #endif
92 
93 #ifdef CONFIG_NUMA_BALANCING
94 extern int migrate_misplaced_page(struct page *page,
95 				  struct vm_area_struct *vma, int node);
96 #else
migrate_misplaced_page(struct page * page,struct vm_area_struct * vma,int node)97 static inline int migrate_misplaced_page(struct page *page,
98 					 struct vm_area_struct *vma, int node)
99 {
100 	return -EAGAIN; /* can't migrate now */
101 }
102 #endif /* CONFIG_NUMA_BALANCING */
103 
104 #ifdef CONFIG_MIGRATION
105 
106 /*
107  * Watch out for PAE architecture, which has an unsigned long, and might not
108  * have enough bits to store all physical address and flags. So far we have
109  * enough room for all our flags.
110  */
111 #define MIGRATE_PFN_VALID	(1UL << 0)
112 #define MIGRATE_PFN_MIGRATE	(1UL << 1)
113 #define MIGRATE_PFN_WRITE	(1UL << 3)
114 #define MIGRATE_PFN_SHIFT	6
115 
migrate_pfn_to_page(unsigned long mpfn)116 static inline struct page *migrate_pfn_to_page(unsigned long mpfn)
117 {
118 	if (!(mpfn & MIGRATE_PFN_VALID))
119 		return NULL;
120 	return pfn_to_page(mpfn >> MIGRATE_PFN_SHIFT);
121 }
122 
migrate_pfn(unsigned long pfn)123 static inline unsigned long migrate_pfn(unsigned long pfn)
124 {
125 	return (pfn << MIGRATE_PFN_SHIFT) | MIGRATE_PFN_VALID;
126 }
127 
128 enum migrate_vma_direction {
129 	MIGRATE_VMA_SELECT_SYSTEM = 1 << 0,
130 	MIGRATE_VMA_SELECT_DEVICE_PRIVATE = 1 << 1,
131 };
132 
133 struct migrate_vma {
134 	struct vm_area_struct	*vma;
135 	/*
136 	 * Both src and dst array must be big enough for
137 	 * (end - start) >> PAGE_SHIFT entries.
138 	 *
139 	 * The src array must not be modified by the caller after
140 	 * migrate_vma_setup(), and must not change the dst array after
141 	 * migrate_vma_pages() returns.
142 	 */
143 	unsigned long		*dst;
144 	unsigned long		*src;
145 	unsigned long		cpages;
146 	unsigned long		npages;
147 	unsigned long		start;
148 	unsigned long		end;
149 
150 	/*
151 	 * Set to the owner value also stored in page->pgmap->owner for
152 	 * migrating out of device private memory. The flags also need to
153 	 * be set to MIGRATE_VMA_SELECT_DEVICE_PRIVATE.
154 	 * The caller should always set this field when using mmu notifier
155 	 * callbacks to avoid device MMU invalidations for device private
156 	 * pages that are not being migrated.
157 	 */
158 	void			*pgmap_owner;
159 	unsigned long		flags;
160 };
161 
162 int migrate_vma_setup(struct migrate_vma *args);
163 void migrate_vma_pages(struct migrate_vma *migrate);
164 void migrate_vma_finalize(struct migrate_vma *migrate);
165 int next_demotion_node(int node);
166 
167 #else /* CONFIG_MIGRATION disabled: */
168 
next_demotion_node(int node)169 static inline int next_demotion_node(int node)
170 {
171 	return NUMA_NO_NODE;
172 }
173 
174 #endif /* CONFIG_MIGRATION */
175 
176 #endif /* _LINUX_MIGRATE_H */
177