1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
4 */
5
6 #include <linux/fs.h>
7 #include <linux/filelock.h>
8 #include <linux/miscdevice.h>
9 #include <linux/poll.h>
10 #include <linux/dlm.h>
11 #include <linux/dlm_plock.h>
12 #include <linux/slab.h>
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16
17 static DEFINE_SPINLOCK(ops_lock);
18 static LIST_HEAD(send_list);
19 static LIST_HEAD(recv_list);
20 static DECLARE_WAIT_QUEUE_HEAD(send_wq);
21 static DECLARE_WAIT_QUEUE_HEAD(recv_wq);
22
23 struct plock_async_data {
24 void *fl;
25 void *file;
26 struct file_lock flc;
27 int (*callback)(struct file_lock *fl, int result);
28 };
29
30 struct plock_op {
31 struct list_head list;
32 int done;
33 /* if lock op got interrupted while waiting dlm_controld reply */
34 bool sigint;
35 struct dlm_plock_info info;
36 /* if set indicates async handling */
37 struct plock_async_data *data;
38 };
39
set_version(struct dlm_plock_info * info)40 static inline void set_version(struct dlm_plock_info *info)
41 {
42 info->version[0] = DLM_PLOCK_VERSION_MAJOR;
43 info->version[1] = DLM_PLOCK_VERSION_MINOR;
44 info->version[2] = DLM_PLOCK_VERSION_PATCH;
45 }
46
check_version(struct dlm_plock_info * info)47 static int check_version(struct dlm_plock_info *info)
48 {
49 if ((DLM_PLOCK_VERSION_MAJOR != info->version[0]) ||
50 (DLM_PLOCK_VERSION_MINOR < info->version[1])) {
51 log_print("plock device version mismatch: "
52 "kernel (%u.%u.%u), user (%u.%u.%u)",
53 DLM_PLOCK_VERSION_MAJOR,
54 DLM_PLOCK_VERSION_MINOR,
55 DLM_PLOCK_VERSION_PATCH,
56 info->version[0],
57 info->version[1],
58 info->version[2]);
59 return -EINVAL;
60 }
61 return 0;
62 }
63
dlm_release_plock_op(struct plock_op * op)64 static void dlm_release_plock_op(struct plock_op *op)
65 {
66 kfree(op->data);
67 kfree(op);
68 }
69
send_op(struct plock_op * op)70 static void send_op(struct plock_op *op)
71 {
72 set_version(&op->info);
73 spin_lock(&ops_lock);
74 list_add_tail(&op->list, &send_list);
75 spin_unlock(&ops_lock);
76 wake_up(&send_wq);
77 }
78
79 /* If a process was killed while waiting for the only plock on a file,
80 locks_remove_posix will not see any lock on the file so it won't
81 send an unlock-close to us to pass on to userspace to clean up the
82 abandoned waiter. So, we have to insert the unlock-close when the
83 lock call is interrupted. */
84
do_unlock_close(const struct dlm_plock_info * info)85 static void do_unlock_close(const struct dlm_plock_info *info)
86 {
87 struct plock_op *op;
88
89 op = kzalloc(sizeof(*op), GFP_NOFS);
90 if (!op)
91 return;
92
93 op->info.optype = DLM_PLOCK_OP_UNLOCK;
94 op->info.pid = info->pid;
95 op->info.fsid = info->fsid;
96 op->info.number = info->number;
97 op->info.start = 0;
98 op->info.end = OFFSET_MAX;
99 op->info.owner = info->owner;
100
101 op->info.flags |= DLM_PLOCK_FL_CLOSE;
102 send_op(op);
103 }
104
dlm_posix_lock(dlm_lockspace_t * lockspace,u64 number,struct file * file,int cmd,struct file_lock * fl)105 int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
106 int cmd, struct file_lock *fl)
107 {
108 struct plock_async_data *op_data;
109 struct dlm_ls *ls;
110 struct plock_op *op;
111 int rv;
112
113 ls = dlm_find_lockspace_local(lockspace);
114 if (!ls)
115 return -EINVAL;
116
117 op = kzalloc(sizeof(*op), GFP_NOFS);
118 if (!op) {
119 rv = -ENOMEM;
120 goto out;
121 }
122
123 op->info.optype = DLM_PLOCK_OP_LOCK;
124 op->info.pid = fl->fl_pid;
125 op->info.ex = (fl->fl_type == F_WRLCK);
126 op->info.wait = IS_SETLKW(cmd);
127 op->info.fsid = ls->ls_global_id;
128 op->info.number = number;
129 op->info.start = fl->fl_start;
130 op->info.end = fl->fl_end;
131 /* async handling */
132 if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
133 op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
134 if (!op_data) {
135 dlm_release_plock_op(op);
136 rv = -ENOMEM;
137 goto out;
138 }
139
140 /* fl_owner is lockd which doesn't distinguish
141 processes on the nfs client */
142 op->info.owner = (__u64) fl->fl_pid;
143 op_data->callback = fl->fl_lmops->lm_grant;
144 locks_init_lock(&op_data->flc);
145 locks_copy_lock(&op_data->flc, fl);
146 op_data->fl = fl;
147 op_data->file = file;
148
149 op->data = op_data;
150
151 send_op(op);
152 rv = FILE_LOCK_DEFERRED;
153 goto out;
154 } else {
155 op->info.owner = (__u64)(long) fl->fl_owner;
156 }
157
158 send_op(op);
159
160 rv = wait_event_interruptible(recv_wq, (op->done != 0));
161 if (rv == -ERESTARTSYS) {
162 spin_lock(&ops_lock);
163 /* recheck under ops_lock if we got a done != 0,
164 * if so this interrupt case should be ignored
165 */
166 if (op->done != 0) {
167 spin_unlock(&ops_lock);
168 goto do_lock_wait;
169 }
170
171 op->sigint = true;
172 spin_unlock(&ops_lock);
173 log_debug(ls, "%s: wait interrupted %x %llx pid %d",
174 __func__, ls->ls_global_id,
175 (unsigned long long)number, op->info.pid);
176 goto out;
177 }
178
179 do_lock_wait:
180
181 WARN_ON(!list_empty(&op->list));
182
183 rv = op->info.rv;
184
185 if (!rv) {
186 if (locks_lock_file_wait(file, fl) < 0)
187 log_error(ls, "dlm_posix_lock: vfs lock error %llx",
188 (unsigned long long)number);
189 }
190
191 dlm_release_plock_op(op);
192 out:
193 dlm_put_lockspace(ls);
194 return rv;
195 }
196 EXPORT_SYMBOL_GPL(dlm_posix_lock);
197
198 /* Returns failure iff a successful lock operation should be canceled */
dlm_plock_callback(struct plock_op * op)199 static int dlm_plock_callback(struct plock_op *op)
200 {
201 struct plock_async_data *op_data = op->data;
202 struct file *file;
203 struct file_lock *fl;
204 struct file_lock *flc;
205 int (*notify)(struct file_lock *fl, int result) = NULL;
206 int rv = 0;
207
208 WARN_ON(!list_empty(&op->list));
209
210 /* check if the following 2 are still valid or make a copy */
211 file = op_data->file;
212 flc = &op_data->flc;
213 fl = op_data->fl;
214 notify = op_data->callback;
215
216 if (op->info.rv) {
217 notify(fl, op->info.rv);
218 goto out;
219 }
220
221 /* got fs lock; bookkeep locally as well: */
222 flc->fl_flags &= ~FL_SLEEP;
223 if (posix_lock_file(file, flc, NULL)) {
224 /*
225 * This can only happen in the case of kmalloc() failure.
226 * The filesystem's own lock is the authoritative lock,
227 * so a failure to get the lock locally is not a disaster.
228 * As long as the fs cannot reliably cancel locks (especially
229 * in a low-memory situation), we're better off ignoring
230 * this failure than trying to recover.
231 */
232 log_print("dlm_plock_callback: vfs lock error %llx file %p fl %p",
233 (unsigned long long)op->info.number, file, fl);
234 }
235
236 rv = notify(fl, 0);
237 if (rv) {
238 /* XXX: We need to cancel the fs lock here: */
239 log_print("dlm_plock_callback: lock granted after lock request "
240 "failed; dangling lock!\n");
241 goto out;
242 }
243
244 out:
245 dlm_release_plock_op(op);
246 return rv;
247 }
248
dlm_posix_unlock(dlm_lockspace_t * lockspace,u64 number,struct file * file,struct file_lock * fl)249 int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
250 struct file_lock *fl)
251 {
252 struct dlm_ls *ls;
253 struct plock_op *op;
254 int rv;
255 unsigned char fl_flags = fl->fl_flags;
256
257 ls = dlm_find_lockspace_local(lockspace);
258 if (!ls)
259 return -EINVAL;
260
261 op = kzalloc(sizeof(*op), GFP_NOFS);
262 if (!op) {
263 rv = -ENOMEM;
264 goto out;
265 }
266
267 /* cause the vfs unlock to return ENOENT if lock is not found */
268 fl->fl_flags |= FL_EXISTS;
269
270 rv = locks_lock_file_wait(file, fl);
271 if (rv == -ENOENT) {
272 rv = 0;
273 goto out_free;
274 }
275 if (rv < 0) {
276 log_error(ls, "dlm_posix_unlock: vfs unlock error %d %llx",
277 rv, (unsigned long long)number);
278 }
279
280 op->info.optype = DLM_PLOCK_OP_UNLOCK;
281 op->info.pid = fl->fl_pid;
282 op->info.fsid = ls->ls_global_id;
283 op->info.number = number;
284 op->info.start = fl->fl_start;
285 op->info.end = fl->fl_end;
286 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
287 op->info.owner = (__u64) fl->fl_pid;
288 else
289 op->info.owner = (__u64)(long) fl->fl_owner;
290
291 if (fl->fl_flags & FL_CLOSE) {
292 op->info.flags |= DLM_PLOCK_FL_CLOSE;
293 send_op(op);
294 rv = 0;
295 goto out;
296 }
297
298 send_op(op);
299 wait_event(recv_wq, (op->done != 0));
300
301 WARN_ON(!list_empty(&op->list));
302
303 rv = op->info.rv;
304
305 if (rv == -ENOENT)
306 rv = 0;
307
308 out_free:
309 dlm_release_plock_op(op);
310 out:
311 dlm_put_lockspace(ls);
312 fl->fl_flags = fl_flags;
313 return rv;
314 }
315 EXPORT_SYMBOL_GPL(dlm_posix_unlock);
316
dlm_posix_get(dlm_lockspace_t * lockspace,u64 number,struct file * file,struct file_lock * fl)317 int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
318 struct file_lock *fl)
319 {
320 struct dlm_ls *ls;
321 struct plock_op *op;
322 int rv;
323
324 ls = dlm_find_lockspace_local(lockspace);
325 if (!ls)
326 return -EINVAL;
327
328 op = kzalloc(sizeof(*op), GFP_NOFS);
329 if (!op) {
330 rv = -ENOMEM;
331 goto out;
332 }
333
334 op->info.optype = DLM_PLOCK_OP_GET;
335 op->info.pid = fl->fl_pid;
336 op->info.ex = (fl->fl_type == F_WRLCK);
337 op->info.fsid = ls->ls_global_id;
338 op->info.number = number;
339 op->info.start = fl->fl_start;
340 op->info.end = fl->fl_end;
341 if (fl->fl_lmops && fl->fl_lmops->lm_grant)
342 op->info.owner = (__u64) fl->fl_pid;
343 else
344 op->info.owner = (__u64)(long) fl->fl_owner;
345
346 send_op(op);
347 wait_event(recv_wq, (op->done != 0));
348
349 WARN_ON(!list_empty(&op->list));
350
351 /* info.rv from userspace is 1 for conflict, 0 for no-conflict,
352 -ENOENT if there are no locks on the file */
353
354 rv = op->info.rv;
355
356 fl->fl_type = F_UNLCK;
357 if (rv == -ENOENT)
358 rv = 0;
359 else if (rv > 0) {
360 locks_init_lock(fl);
361 fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
362 fl->fl_flags = FL_POSIX;
363 fl->fl_pid = -op->info.pid;
364 fl->fl_start = op->info.start;
365 fl->fl_end = op->info.end;
366 rv = 0;
367 }
368
369 dlm_release_plock_op(op);
370 out:
371 dlm_put_lockspace(ls);
372 return rv;
373 }
374 EXPORT_SYMBOL_GPL(dlm_posix_get);
375
376 /* a read copies out one plock request from the send list */
dev_read(struct file * file,char __user * u,size_t count,loff_t * ppos)377 static ssize_t dev_read(struct file *file, char __user *u, size_t count,
378 loff_t *ppos)
379 {
380 struct dlm_plock_info info;
381 struct plock_op *op = NULL;
382
383 if (count < sizeof(info))
384 return -EINVAL;
385
386 spin_lock(&ops_lock);
387 if (!list_empty(&send_list)) {
388 op = list_first_entry(&send_list, struct plock_op, list);
389 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
390 list_del(&op->list);
391 else
392 list_move(&op->list, &recv_list);
393 memcpy(&info, &op->info, sizeof(info));
394 }
395 spin_unlock(&ops_lock);
396
397 if (!op)
398 return -EAGAIN;
399
400 /* there is no need to get a reply from userspace for unlocks
401 that were generated by the vfs cleaning up for a close
402 (the process did not make an unlock call). */
403
404 if (op->info.flags & DLM_PLOCK_FL_CLOSE)
405 dlm_release_plock_op(op);
406
407 if (copy_to_user(u, &info, sizeof(info)))
408 return -EFAULT;
409 return sizeof(info);
410 }
411
412 /* a write copies in one plock result that should match a plock_op
413 on the recv list */
dev_write(struct file * file,const char __user * u,size_t count,loff_t * ppos)414 static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
415 loff_t *ppos)
416 {
417 struct plock_op *op = NULL, *iter;
418 struct dlm_plock_info info;
419 int do_callback = 0;
420
421 if (count != sizeof(info))
422 return -EINVAL;
423
424 if (copy_from_user(&info, u, sizeof(info)))
425 return -EFAULT;
426
427 if (check_version(&info))
428 return -EINVAL;
429
430 spin_lock(&ops_lock);
431 list_for_each_entry(iter, &recv_list, list) {
432 if (iter->info.fsid == info.fsid &&
433 iter->info.number == info.number &&
434 iter->info.owner == info.owner) {
435 if (iter->sigint) {
436 list_del(&iter->list);
437 spin_unlock(&ops_lock);
438
439 pr_debug("%s: sigint cleanup %x %llx pid %d",
440 __func__, iter->info.fsid,
441 (unsigned long long)iter->info.number,
442 iter->info.pid);
443 do_unlock_close(&iter->info);
444 memcpy(&iter->info, &info, sizeof(info));
445 dlm_release_plock_op(iter);
446 return count;
447 }
448 list_del_init(&iter->list);
449 memcpy(&iter->info, &info, sizeof(info));
450 if (iter->data)
451 do_callback = 1;
452 else
453 iter->done = 1;
454 op = iter;
455 break;
456 }
457 }
458 spin_unlock(&ops_lock);
459
460 if (op) {
461 if (do_callback)
462 dlm_plock_callback(op);
463 else
464 wake_up(&recv_wq);
465 } else
466 log_print("%s: no op %x %llx", __func__,
467 info.fsid, (unsigned long long)info.number);
468 return count;
469 }
470
dev_poll(struct file * file,poll_table * wait)471 static __poll_t dev_poll(struct file *file, poll_table *wait)
472 {
473 __poll_t mask = 0;
474
475 poll_wait(file, &send_wq, wait);
476
477 spin_lock(&ops_lock);
478 if (!list_empty(&send_list))
479 mask = EPOLLIN | EPOLLRDNORM;
480 spin_unlock(&ops_lock);
481
482 return mask;
483 }
484
485 static const struct file_operations dev_fops = {
486 .read = dev_read,
487 .write = dev_write,
488 .poll = dev_poll,
489 .owner = THIS_MODULE,
490 .llseek = noop_llseek,
491 };
492
493 static struct miscdevice plock_dev_misc = {
494 .minor = MISC_DYNAMIC_MINOR,
495 .name = DLM_PLOCK_MISC_NAME,
496 .fops = &dev_fops
497 };
498
dlm_plock_init(void)499 int dlm_plock_init(void)
500 {
501 int rv;
502
503 rv = misc_register(&plock_dev_misc);
504 if (rv)
505 log_print("dlm_plock_init: misc_register failed %d", rv);
506 return rv;
507 }
508
dlm_plock_exit(void)509 void dlm_plock_exit(void)
510 {
511 misc_deregister(&plock_dev_misc);
512 }
513
514