1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/fsnotify_backend.h>
3 #include <linux/path.h>
4 #include <linux/slab.h>
5 #include <linux/exportfs.h>
6 #include <linux/hashtable.h>
7
8 extern struct kmem_cache *fanotify_mark_cache;
9 extern struct kmem_cache *fanotify_fid_event_cachep;
10 extern struct kmem_cache *fanotify_path_event_cachep;
11 extern struct kmem_cache *fanotify_perm_event_cachep;
12
13 /* Possible states of the permission event */
14 enum {
15 FAN_EVENT_INIT,
16 FAN_EVENT_REPORTED,
17 FAN_EVENT_ANSWERED,
18 FAN_EVENT_CANCELED,
19 };
20
21 /*
22 * 3 dwords are sufficient for most local fs (64bit ino, 32bit generation).
23 * fh buf should be dword aligned. On 64bit arch, the ext_buf pointer is
24 * stored in either the first or last 2 dwords.
25 */
26 #define FANOTIFY_INLINE_FH_LEN (3 << 2)
27 #define FANOTIFY_FH_HDR_LEN offsetof(struct fanotify_fh, buf)
28
29 /* Fixed size struct for file handle */
30 struct fanotify_fh {
31 u8 type;
32 u8 len;
33 #define FANOTIFY_FH_FLAG_EXT_BUF 1
34 u8 flags;
35 u8 pad;
36 unsigned char buf[];
37 } __aligned(4);
38
39 /* Variable size struct for dir file handle + child file handle + name */
40 struct fanotify_info {
41 /* size of dir_fh/file_fh including fanotify_fh hdr size */
42 u8 dir_fh_totlen;
43 u8 file_fh_totlen;
44 u8 name_len;
45 u8 pad;
46 unsigned char buf[];
47 /*
48 * (struct fanotify_fh) dir_fh starts at buf[0]
49 * (optional) file_fh starts at buf[dir_fh_totlen]
50 * name starts at buf[dir_fh_totlen + file_fh_totlen]
51 */
52 } __aligned(4);
53
fanotify_fh_has_ext_buf(struct fanotify_fh * fh)54 static inline bool fanotify_fh_has_ext_buf(struct fanotify_fh *fh)
55 {
56 return (fh->flags & FANOTIFY_FH_FLAG_EXT_BUF);
57 }
58
fanotify_fh_ext_buf_ptr(struct fanotify_fh * fh)59 static inline char **fanotify_fh_ext_buf_ptr(struct fanotify_fh *fh)
60 {
61 BUILD_BUG_ON(FANOTIFY_FH_HDR_LEN % 4);
62 BUILD_BUG_ON(__alignof__(char *) - 4 + sizeof(char *) >
63 FANOTIFY_INLINE_FH_LEN);
64 return (char **)ALIGN((unsigned long)(fh->buf), __alignof__(char *));
65 }
66
fanotify_fh_ext_buf(struct fanotify_fh * fh)67 static inline void *fanotify_fh_ext_buf(struct fanotify_fh *fh)
68 {
69 return *fanotify_fh_ext_buf_ptr(fh);
70 }
71
fanotify_fh_buf(struct fanotify_fh * fh)72 static inline void *fanotify_fh_buf(struct fanotify_fh *fh)
73 {
74 return fanotify_fh_has_ext_buf(fh) ? fanotify_fh_ext_buf(fh) : fh->buf;
75 }
76
fanotify_info_dir_fh_len(struct fanotify_info * info)77 static inline int fanotify_info_dir_fh_len(struct fanotify_info *info)
78 {
79 if (!info->dir_fh_totlen ||
80 WARN_ON_ONCE(info->dir_fh_totlen < FANOTIFY_FH_HDR_LEN))
81 return 0;
82
83 return info->dir_fh_totlen - FANOTIFY_FH_HDR_LEN;
84 }
85
fanotify_info_dir_fh(struct fanotify_info * info)86 static inline struct fanotify_fh *fanotify_info_dir_fh(struct fanotify_info *info)
87 {
88 BUILD_BUG_ON(offsetof(struct fanotify_info, buf) % 4);
89
90 return (struct fanotify_fh *)info->buf;
91 }
92
fanotify_info_file_fh_len(struct fanotify_info * info)93 static inline int fanotify_info_file_fh_len(struct fanotify_info *info)
94 {
95 if (!info->file_fh_totlen ||
96 WARN_ON_ONCE(info->file_fh_totlen < FANOTIFY_FH_HDR_LEN))
97 return 0;
98
99 return info->file_fh_totlen - FANOTIFY_FH_HDR_LEN;
100 }
101
fanotify_info_file_fh(struct fanotify_info * info)102 static inline struct fanotify_fh *fanotify_info_file_fh(struct fanotify_info *info)
103 {
104 return (struct fanotify_fh *)(info->buf + info->dir_fh_totlen);
105 }
106
fanotify_info_name(struct fanotify_info * info)107 static inline const char *fanotify_info_name(struct fanotify_info *info)
108 {
109 return info->buf + info->dir_fh_totlen + info->file_fh_totlen;
110 }
111
fanotify_info_init(struct fanotify_info * info)112 static inline void fanotify_info_init(struct fanotify_info *info)
113 {
114 info->dir_fh_totlen = 0;
115 info->file_fh_totlen = 0;
116 info->name_len = 0;
117 }
118
fanotify_info_len(struct fanotify_info * info)119 static inline unsigned int fanotify_info_len(struct fanotify_info *info)
120 {
121 return info->dir_fh_totlen + info->file_fh_totlen + info->name_len;
122 }
123
fanotify_info_copy_name(struct fanotify_info * info,const struct qstr * name)124 static inline void fanotify_info_copy_name(struct fanotify_info *info,
125 const struct qstr *name)
126 {
127 info->name_len = name->len;
128 strcpy(info->buf + info->dir_fh_totlen + info->file_fh_totlen,
129 name->name);
130 }
131
132 /*
133 * Common structure for fanotify events. Concrete structs are allocated in
134 * fanotify_handle_event() and freed when the information is retrieved by
135 * userspace. The type of event determines how it was allocated, how it will
136 * be freed and which concrete struct it may be cast to.
137 */
138 enum fanotify_event_type {
139 FANOTIFY_EVENT_TYPE_FID, /* fixed length */
140 FANOTIFY_EVENT_TYPE_FID_NAME, /* variable length */
141 FANOTIFY_EVENT_TYPE_PATH,
142 FANOTIFY_EVENT_TYPE_PATH_PERM,
143 FANOTIFY_EVENT_TYPE_OVERFLOW, /* struct fanotify_event */
144 FANOTIFY_EVENT_TYPE_FS_ERROR, /* struct fanotify_error_event */
145 __FANOTIFY_EVENT_TYPE_NUM
146 };
147
148 #define FANOTIFY_EVENT_TYPE_BITS \
149 (ilog2(__FANOTIFY_EVENT_TYPE_NUM - 1) + 1)
150 #define FANOTIFY_EVENT_HASH_BITS \
151 (32 - FANOTIFY_EVENT_TYPE_BITS)
152
153 struct fanotify_event {
154 struct fsnotify_event fse;
155 struct hlist_node merge_list; /* List for hashed merge */
156 u32 mask;
157 struct {
158 unsigned int type : FANOTIFY_EVENT_TYPE_BITS;
159 unsigned int hash : FANOTIFY_EVENT_HASH_BITS;
160 };
161 struct pid *pid;
162 };
163
fanotify_init_event(struct fanotify_event * event,unsigned int hash,u32 mask)164 static inline void fanotify_init_event(struct fanotify_event *event,
165 unsigned int hash, u32 mask)
166 {
167 fsnotify_init_event(&event->fse);
168 INIT_HLIST_NODE(&event->merge_list);
169 event->hash = hash;
170 event->mask = mask;
171 event->pid = NULL;
172 }
173
174 #define FANOTIFY_INLINE_FH(name, size) \
175 struct { \
176 struct fanotify_fh (name); \
177 /* Space for object_fh.buf[] - access with fanotify_fh_buf() */ \
178 unsigned char _inline_fh_buf[(size)]; \
179 }
180
181 struct fanotify_fid_event {
182 struct fanotify_event fae;
183 __kernel_fsid_t fsid;
184
185 FANOTIFY_INLINE_FH(object_fh, FANOTIFY_INLINE_FH_LEN);
186 };
187
188 static inline struct fanotify_fid_event *
FANOTIFY_FE(struct fanotify_event * event)189 FANOTIFY_FE(struct fanotify_event *event)
190 {
191 return container_of(event, struct fanotify_fid_event, fae);
192 }
193
194 struct fanotify_name_event {
195 struct fanotify_event fae;
196 __kernel_fsid_t fsid;
197 struct fanotify_info info;
198 };
199
200 static inline struct fanotify_name_event *
FANOTIFY_NE(struct fanotify_event * event)201 FANOTIFY_NE(struct fanotify_event *event)
202 {
203 return container_of(event, struct fanotify_name_event, fae);
204 }
205
206 struct fanotify_error_event {
207 struct fanotify_event fae;
208 s32 error; /* Error reported by the Filesystem. */
209 u32 err_count; /* Suppressed errors count */
210
211 __kernel_fsid_t fsid; /* FSID this error refers to. */
212
213 FANOTIFY_INLINE_FH(object_fh, MAX_HANDLE_SZ);
214 };
215
216 static inline struct fanotify_error_event *
FANOTIFY_EE(struct fanotify_event * event)217 FANOTIFY_EE(struct fanotify_event *event)
218 {
219 return container_of(event, struct fanotify_error_event, fae);
220 }
221
fanotify_event_fsid(struct fanotify_event * event)222 static inline __kernel_fsid_t *fanotify_event_fsid(struct fanotify_event *event)
223 {
224 if (event->type == FANOTIFY_EVENT_TYPE_FID)
225 return &FANOTIFY_FE(event)->fsid;
226 else if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME)
227 return &FANOTIFY_NE(event)->fsid;
228 else if (event->type == FANOTIFY_EVENT_TYPE_FS_ERROR)
229 return &FANOTIFY_EE(event)->fsid;
230 else
231 return NULL;
232 }
233
fanotify_event_object_fh(struct fanotify_event * event)234 static inline struct fanotify_fh *fanotify_event_object_fh(
235 struct fanotify_event *event)
236 {
237 if (event->type == FANOTIFY_EVENT_TYPE_FID)
238 return &FANOTIFY_FE(event)->object_fh;
239 else if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME)
240 return fanotify_info_file_fh(&FANOTIFY_NE(event)->info);
241 else if (event->type == FANOTIFY_EVENT_TYPE_FS_ERROR)
242 return &FANOTIFY_EE(event)->object_fh;
243 else
244 return NULL;
245 }
246
fanotify_event_info(struct fanotify_event * event)247 static inline struct fanotify_info *fanotify_event_info(
248 struct fanotify_event *event)
249 {
250 if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME)
251 return &FANOTIFY_NE(event)->info;
252 else
253 return NULL;
254 }
255
fanotify_event_object_fh_len(struct fanotify_event * event)256 static inline int fanotify_event_object_fh_len(struct fanotify_event *event)
257 {
258 struct fanotify_info *info = fanotify_event_info(event);
259 struct fanotify_fh *fh = fanotify_event_object_fh(event);
260
261 if (info)
262 return info->file_fh_totlen ? fh->len : 0;
263 else
264 return fh ? fh->len : 0;
265 }
266
fanotify_event_dir_fh_len(struct fanotify_event * event)267 static inline int fanotify_event_dir_fh_len(struct fanotify_event *event)
268 {
269 struct fanotify_info *info = fanotify_event_info(event);
270
271 return info ? fanotify_info_dir_fh_len(info) : 0;
272 }
273
fanotify_event_has_object_fh(struct fanotify_event * event)274 static inline bool fanotify_event_has_object_fh(struct fanotify_event *event)
275 {
276 /* For error events, even zeroed fh are reported. */
277 if (event->type == FANOTIFY_EVENT_TYPE_FS_ERROR)
278 return true;
279 return fanotify_event_object_fh_len(event) > 0;
280 }
281
fanotify_event_has_dir_fh(struct fanotify_event * event)282 static inline bool fanotify_event_has_dir_fh(struct fanotify_event *event)
283 {
284 return fanotify_event_dir_fh_len(event) > 0;
285 }
286
287 struct fanotify_path_event {
288 struct fanotify_event fae;
289 struct path path;
290 };
291
292 static inline struct fanotify_path_event *
FANOTIFY_PE(struct fanotify_event * event)293 FANOTIFY_PE(struct fanotify_event *event)
294 {
295 return container_of(event, struct fanotify_path_event, fae);
296 }
297
298 /*
299 * Structure for permission fanotify events. It gets allocated and freed in
300 * fanotify_handle_event() since we wait there for user response. When the
301 * information is retrieved by userspace the structure is moved from
302 * group->notification_list to group->fanotify_data.access_list to wait for
303 * user response.
304 */
305 struct fanotify_perm_event {
306 struct fanotify_event fae;
307 struct path path;
308 unsigned short response; /* userspace answer to the event */
309 unsigned short state; /* state of the event */
310 int fd; /* fd we passed to userspace for this event */
311 };
312
313 static inline struct fanotify_perm_event *
FANOTIFY_PERM(struct fanotify_event * event)314 FANOTIFY_PERM(struct fanotify_event *event)
315 {
316 return container_of(event, struct fanotify_perm_event, fae);
317 }
318
fanotify_is_perm_event(u32 mask)319 static inline bool fanotify_is_perm_event(u32 mask)
320 {
321 return IS_ENABLED(CONFIG_FANOTIFY_ACCESS_PERMISSIONS) &&
322 mask & FANOTIFY_PERM_EVENTS;
323 }
324
FANOTIFY_E(struct fsnotify_event * fse)325 static inline struct fanotify_event *FANOTIFY_E(struct fsnotify_event *fse)
326 {
327 return container_of(fse, struct fanotify_event, fse);
328 }
329
fanotify_is_error_event(u32 mask)330 static inline bool fanotify_is_error_event(u32 mask)
331 {
332 return mask & FAN_FS_ERROR;
333 }
334
fanotify_event_has_path(struct fanotify_event * event)335 static inline bool fanotify_event_has_path(struct fanotify_event *event)
336 {
337 return event->type == FANOTIFY_EVENT_TYPE_PATH ||
338 event->type == FANOTIFY_EVENT_TYPE_PATH_PERM;
339 }
340
fanotify_event_path(struct fanotify_event * event)341 static inline struct path *fanotify_event_path(struct fanotify_event *event)
342 {
343 if (event->type == FANOTIFY_EVENT_TYPE_PATH)
344 return &FANOTIFY_PE(event)->path;
345 else if (event->type == FANOTIFY_EVENT_TYPE_PATH_PERM)
346 return &FANOTIFY_PERM(event)->path;
347 else
348 return NULL;
349 }
350
351 /*
352 * Use 128 size hash table to speed up events merge.
353 */
354 #define FANOTIFY_HTABLE_BITS (7)
355 #define FANOTIFY_HTABLE_SIZE (1 << FANOTIFY_HTABLE_BITS)
356 #define FANOTIFY_HTABLE_MASK (FANOTIFY_HTABLE_SIZE - 1)
357
358 /*
359 * Permission events and overflow event do not get merged - don't hash them.
360 */
fanotify_is_hashed_event(u32 mask)361 static inline bool fanotify_is_hashed_event(u32 mask)
362 {
363 return !(fanotify_is_perm_event(mask) ||
364 fsnotify_is_overflow_event(mask));
365 }
366
fanotify_event_hash_bucket(struct fsnotify_group * group,struct fanotify_event * event)367 static inline unsigned int fanotify_event_hash_bucket(
368 struct fsnotify_group *group,
369 struct fanotify_event *event)
370 {
371 return event->hash & FANOTIFY_HTABLE_MASK;
372 }
373