soundwire: stream: fix NULL pointer dereference for multi_link
[platform/kernel/linux-starfive.git] / fs / ceph / locks.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/file.h>
5 #include <linux/namei.h>
6 #include <linux/random.h>
7
8 #include "super.h"
9 #include "mds_client.h"
10 #include <linux/filelock.h>
11 #include <linux/ceph/pagelist.h>
12
13 static u64 lock_secret;
14 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
15                                          struct ceph_mds_request *req);
16
17 static inline u64 secure_addr(void *addr)
18 {
19         u64 v = lock_secret ^ (u64)(unsigned long)addr;
20         /*
21          * Set the most significant bit, so that MDS knows the 'owner'
22          * is sufficient to identify the owner of lock. (old code uses
23          * both 'owner' and 'pid')
24          */
25         v |= (1ULL << 63);
26         return v;
27 }
28
29 void __init ceph_flock_init(void)
30 {
31         get_random_bytes(&lock_secret, sizeof(lock_secret));
32 }
33
34 static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
35 {
36         struct inode *inode = file_inode(dst->fl_file);
37         atomic_inc(&ceph_inode(inode)->i_filelock_ref);
38         dst->fl_u.ceph.inode = igrab(inode);
39 }
40
41 /*
42  * Do not use the 'fl->fl_file' in release function, which
43  * is possibly already released by another thread.
44  */
45 static void ceph_fl_release_lock(struct file_lock *fl)
46 {
47         struct inode *inode = fl->fl_u.ceph.inode;
48         struct ceph_inode_info *ci;
49
50         /*
51          * If inode is NULL it should be a request file_lock,
52          * nothing we can do.
53          */
54         if (!inode)
55                 return;
56
57         ci = ceph_inode(inode);
58         if (atomic_dec_and_test(&ci->i_filelock_ref)) {
59                 /* clear error when all locks are released */
60                 spin_lock(&ci->i_ceph_lock);
61                 ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
62                 spin_unlock(&ci->i_ceph_lock);
63         }
64         fl->fl_u.ceph.inode = NULL;
65         iput(inode);
66 }
67
68 static const struct file_lock_operations ceph_fl_lock_ops = {
69         .fl_copy_lock = ceph_fl_copy_lock,
70         .fl_release_private = ceph_fl_release_lock,
71 };
72
73 /*
74  * Implement fcntl and flock locking functions.
75  */
76 static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
77                              int cmd, u8 wait, struct file_lock *fl)
78 {
79         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
80         struct ceph_mds_request *req;
81         int err;
82         u64 length = 0;
83         u64 owner;
84
85         if (operation == CEPH_MDS_OP_SETFILELOCK) {
86                 /*
87                  * increasing i_filelock_ref closes race window between
88                  * handling request reply and adding file_lock struct to
89                  * inode. Otherwise, auth caps may get trimmed in the
90                  * window. Caller function will decrease the counter.
91                  */
92                 fl->fl_ops = &ceph_fl_lock_ops;
93                 fl->fl_ops->fl_copy_lock(fl, NULL);
94         }
95
96         if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
97                 wait = 0;
98
99         req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
100         if (IS_ERR(req))
101                 return PTR_ERR(req);
102         req->r_inode = inode;
103         ihold(inode);
104         req->r_num_caps = 1;
105
106         /* mds requires start and length rather than start and end */
107         if (LLONG_MAX == fl->fl_end)
108                 length = 0;
109         else
110                 length = fl->fl_end - fl->fl_start + 1;
111
112         owner = secure_addr(fl->fl_owner);
113
114         dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
115              "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
116              (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
117              wait, fl->fl_type);
118
119         req->r_args.filelock_change.rule = lock_type;
120         req->r_args.filelock_change.type = cmd;
121         req->r_args.filelock_change.owner = cpu_to_le64(owner);
122         req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
123         req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
124         req->r_args.filelock_change.length = cpu_to_le64(length);
125         req->r_args.filelock_change.wait = wait;
126
127         err = ceph_mdsc_submit_request(mdsc, inode, req);
128         if (!err)
129                 err = ceph_mdsc_wait_request(mdsc, req, wait ?
130                                         ceph_lock_wait_for_completion : NULL);
131         if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
132                 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
133                 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
134                         fl->fl_type = F_RDLCK;
135                 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
136                         fl->fl_type = F_WRLCK;
137                 else
138                         fl->fl_type = F_UNLCK;
139
140                 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
141                 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
142                                                  le64_to_cpu(req->r_reply_info.filelock_reply->length);
143                 if (length >= 1)
144                         fl->fl_end = length -1;
145                 else
146                         fl->fl_end = 0;
147
148         }
149         ceph_mdsc_put_request(req);
150         dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
151              "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
152              (int)operation, (u64)fl->fl_pid, fl->fl_start,
153              length, wait, fl->fl_type, err);
154         return err;
155 }
156
157 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
158                                          struct ceph_mds_request *req)
159 {
160         struct ceph_mds_request *intr_req;
161         struct inode *inode = req->r_inode;
162         int err, lock_type;
163
164         BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
165         if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
166                 lock_type = CEPH_LOCK_FCNTL_INTR;
167         else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
168                 lock_type = CEPH_LOCK_FLOCK_INTR;
169         else
170                 BUG_ON(1);
171         BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
172
173         err = wait_for_completion_interruptible(&req->r_completion);
174         if (!err)
175                 return 0;
176
177         dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
178              req->r_tid);
179
180         mutex_lock(&mdsc->mutex);
181         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
182                 err = 0;
183         } else {
184                 /*
185                  * ensure we aren't running concurrently with
186                  * ceph_fill_trace or ceph_readdir_prepopulate, which
187                  * rely on locks (dir mutex) held by our caller.
188                  */
189                 mutex_lock(&req->r_fill_mutex);
190                 req->r_err = err;
191                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
192                 mutex_unlock(&req->r_fill_mutex);
193
194                 if (!req->r_session) {
195                         // haven't sent the request
196                         err = 0;
197                 }
198         }
199         mutex_unlock(&mdsc->mutex);
200         if (!err)
201                 return 0;
202
203         intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
204                                             USE_AUTH_MDS);
205         if (IS_ERR(intr_req))
206                 return PTR_ERR(intr_req);
207
208         intr_req->r_inode = inode;
209         ihold(inode);
210         intr_req->r_num_caps = 1;
211
212         intr_req->r_args.filelock_change = req->r_args.filelock_change;
213         intr_req->r_args.filelock_change.rule = lock_type;
214         intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
215
216         err = ceph_mdsc_do_request(mdsc, inode, intr_req);
217         ceph_mdsc_put_request(intr_req);
218
219         if (err && err != -ERESTARTSYS)
220                 return err;
221
222         wait_for_completion_killable(&req->r_safe_completion);
223         return 0;
224 }
225
226 static int try_unlock_file(struct file *file, struct file_lock *fl)
227 {
228         int err;
229         unsigned int orig_flags = fl->fl_flags;
230         fl->fl_flags |= FL_EXISTS;
231         err = locks_lock_file_wait(file, fl);
232         fl->fl_flags = orig_flags;
233         if (err == -ENOENT) {
234                 if (!(orig_flags & FL_EXISTS))
235                         err = 0;
236                 return err;
237         }
238         return 1;
239 }
240
241 /*
242  * Attempt to set an fcntl lock.
243  * For now, this just goes away to the server. Later it may be more awesome.
244  */
245 int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
246 {
247         struct inode *inode = file_inode(file);
248         struct ceph_inode_info *ci = ceph_inode(inode);
249         int err = 0;
250         u16 op = CEPH_MDS_OP_SETFILELOCK;
251         u8 wait = 0;
252         u8 lock_cmd;
253
254         if (!(fl->fl_flags & FL_POSIX))
255                 return -ENOLCK;
256
257         if (ceph_inode_is_shutdown(inode))
258                 return -ESTALE;
259
260         dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
261
262         /* set wait bit as appropriate, then make command as Ceph expects it*/
263         if (IS_GETLK(cmd))
264                 op = CEPH_MDS_OP_GETFILELOCK;
265         else if (IS_SETLKW(cmd))
266                 wait = 1;
267
268         spin_lock(&ci->i_ceph_lock);
269         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
270                 err = -EIO;
271         }
272         spin_unlock(&ci->i_ceph_lock);
273         if (err < 0) {
274                 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
275                         posix_lock_file(file, fl, NULL);
276                 return err;
277         }
278
279         if (F_RDLCK == fl->fl_type)
280                 lock_cmd = CEPH_LOCK_SHARED;
281         else if (F_WRLCK == fl->fl_type)
282                 lock_cmd = CEPH_LOCK_EXCL;
283         else
284                 lock_cmd = CEPH_LOCK_UNLOCK;
285
286         if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type) {
287                 err = try_unlock_file(file, fl);
288                 if (err <= 0)
289                         return err;
290         }
291
292         err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
293         if (!err) {
294                 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
295                         dout("mds locked, locking locally\n");
296                         err = posix_lock_file(file, fl, NULL);
297                         if (err) {
298                                 /* undo! This should only happen if
299                                  * the kernel detects local
300                                  * deadlock. */
301                                 ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
302                                                   CEPH_LOCK_UNLOCK, 0, fl);
303                                 dout("got %d on posix_lock_file, undid lock\n",
304                                      err);
305                         }
306                 }
307         }
308         return err;
309 }
310
311 int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
312 {
313         struct inode *inode = file_inode(file);
314         struct ceph_inode_info *ci = ceph_inode(inode);
315         int err = 0;
316         u8 wait = 0;
317         u8 lock_cmd;
318
319         if (!(fl->fl_flags & FL_FLOCK))
320                 return -ENOLCK;
321
322         if (ceph_inode_is_shutdown(inode))
323                 return -ESTALE;
324
325         dout("ceph_flock, fl_file: %p\n", fl->fl_file);
326
327         spin_lock(&ci->i_ceph_lock);
328         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
329                 err = -EIO;
330         }
331         spin_unlock(&ci->i_ceph_lock);
332         if (err < 0) {
333                 if (F_UNLCK == fl->fl_type)
334                         locks_lock_file_wait(file, fl);
335                 return err;
336         }
337
338         if (IS_SETLKW(cmd))
339                 wait = 1;
340
341         if (F_RDLCK == fl->fl_type)
342                 lock_cmd = CEPH_LOCK_SHARED;
343         else if (F_WRLCK == fl->fl_type)
344                 lock_cmd = CEPH_LOCK_EXCL;
345         else
346                 lock_cmd = CEPH_LOCK_UNLOCK;
347
348         if (F_UNLCK == fl->fl_type) {
349                 err = try_unlock_file(file, fl);
350                 if (err <= 0)
351                         return err;
352         }
353
354         err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
355                                 inode, lock_cmd, wait, fl);
356         if (!err && F_UNLCK != fl->fl_type) {
357                 err = locks_lock_file_wait(file, fl);
358                 if (err) {
359                         ceph_lock_message(CEPH_LOCK_FLOCK,
360                                           CEPH_MDS_OP_SETFILELOCK,
361                                           inode, CEPH_LOCK_UNLOCK, 0, fl);
362                         dout("got %d on locks_lock_file_wait, undid lock\n", err);
363                 }
364         }
365         return err;
366 }
367
368 /*
369  * Fills in the passed counter variables, so you can prepare pagelist metadata
370  * before calling ceph_encode_locks.
371  */
372 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
373 {
374         struct file_lock *lock;
375         struct file_lock_context *ctx;
376
377         *fcntl_count = 0;
378         *flock_count = 0;
379
380         ctx = locks_inode_context(inode);
381         if (ctx) {
382                 spin_lock(&ctx->flc_lock);
383                 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
384                         ++(*fcntl_count);
385                 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
386                         ++(*flock_count);
387                 spin_unlock(&ctx->flc_lock);
388         }
389         dout("counted %d flock locks and %d fcntl locks\n",
390              *flock_count, *fcntl_count);
391 }
392
393 /*
394  * Given a pointer to a lock, convert it to a ceph filelock
395  */
396 static int lock_to_ceph_filelock(struct file_lock *lock,
397                                  struct ceph_filelock *cephlock)
398 {
399         int err = 0;
400         cephlock->start = cpu_to_le64(lock->fl_start);
401         cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
402         cephlock->client = cpu_to_le64(0);
403         cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
404         cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
405
406         switch (lock->fl_type) {
407         case F_RDLCK:
408                 cephlock->type = CEPH_LOCK_SHARED;
409                 break;
410         case F_WRLCK:
411                 cephlock->type = CEPH_LOCK_EXCL;
412                 break;
413         case F_UNLCK:
414                 cephlock->type = CEPH_LOCK_UNLOCK;
415                 break;
416         default:
417                 dout("Have unknown lock type %d\n", lock->fl_type);
418                 err = -EINVAL;
419         }
420
421         return err;
422 }
423
424 /*
425  * Encode the flock and fcntl locks for the given inode into the ceph_filelock
426  * array. Must be called with inode->i_lock already held.
427  * If we encounter more of a specific lock type than expected, return -ENOSPC.
428  */
429 int ceph_encode_locks_to_buffer(struct inode *inode,
430                                 struct ceph_filelock *flocks,
431                                 int num_fcntl_locks, int num_flock_locks)
432 {
433         struct file_lock *lock;
434         struct file_lock_context *ctx = locks_inode_context(inode);
435         int err = 0;
436         int seen_fcntl = 0;
437         int seen_flock = 0;
438         int l = 0;
439
440         dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
441              num_fcntl_locks);
442
443         if (!ctx)
444                 return 0;
445
446         spin_lock(&ctx->flc_lock);
447         list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
448                 ++seen_fcntl;
449                 if (seen_fcntl > num_fcntl_locks) {
450                         err = -ENOSPC;
451                         goto fail;
452                 }
453                 err = lock_to_ceph_filelock(lock, &flocks[l]);
454                 if (err)
455                         goto fail;
456                 ++l;
457         }
458         list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
459                 ++seen_flock;
460                 if (seen_flock > num_flock_locks) {
461                         err = -ENOSPC;
462                         goto fail;
463                 }
464                 err = lock_to_ceph_filelock(lock, &flocks[l]);
465                 if (err)
466                         goto fail;
467                 ++l;
468         }
469 fail:
470         spin_unlock(&ctx->flc_lock);
471         return err;
472 }
473
474 /*
475  * Copy the encoded flock and fcntl locks into the pagelist.
476  * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
477  * sequential flock locks.
478  * Returns zero on success.
479  */
480 int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
481                            struct ceph_pagelist *pagelist,
482                            int num_fcntl_locks, int num_flock_locks)
483 {
484         int err = 0;
485         __le32 nlocks;
486
487         nlocks = cpu_to_le32(num_fcntl_locks);
488         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
489         if (err)
490                 goto out_fail;
491
492         if (num_fcntl_locks > 0) {
493                 err = ceph_pagelist_append(pagelist, flocks,
494                                            num_fcntl_locks * sizeof(*flocks));
495                 if (err)
496                         goto out_fail;
497         }
498
499         nlocks = cpu_to_le32(num_flock_locks);
500         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
501         if (err)
502                 goto out_fail;
503
504         if (num_flock_locks > 0) {
505                 err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
506                                            num_flock_locks * sizeof(*flocks));
507         }
508 out_fail:
509         return err;
510 }