ksmbd: separately allocate ci per dentry
[platform/kernel/linux-rpi.git] / fs / smb / server / vfs_cache.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4  * Copyright (C) 2019 Samsung Electronics Co., Ltd.
5  */
6
7 #include <linux/fs.h>
8 #include <linux/filelock.h>
9 #include <linux/slab.h>
10 #include <linux/vmalloc.h>
11
12 #include "glob.h"
13 #include "vfs_cache.h"
14 #include "oplock.h"
15 #include "vfs.h"
16 #include "connection.h"
17 #include "mgmt/tree_connect.h"
18 #include "mgmt/user_session.h"
19 #include "smb_common.h"
20
21 #define S_DEL_PENDING                   1
22 #define S_DEL_ON_CLS                    2
23 #define S_DEL_ON_CLS_STREAM             8
24
25 static unsigned int inode_hash_mask __read_mostly;
26 static unsigned int inode_hash_shift __read_mostly;
27 static struct hlist_head *inode_hashtable __read_mostly;
28 static DEFINE_RWLOCK(inode_hash_lock);
29
30 static struct ksmbd_file_table global_ft;
31 static atomic_long_t fd_limit;
32 static struct kmem_cache *filp_cache;
33
34 void ksmbd_set_fd_limit(unsigned long limit)
35 {
36         limit = min(limit, get_max_files());
37         atomic_long_set(&fd_limit, limit);
38 }
39
40 static bool fd_limit_depleted(void)
41 {
42         long v = atomic_long_dec_return(&fd_limit);
43
44         if (v >= 0)
45                 return false;
46         atomic_long_inc(&fd_limit);
47         return true;
48 }
49
50 static void fd_limit_close(void)
51 {
52         atomic_long_inc(&fd_limit);
53 }
54
55 /*
56  * INODE hash
57  */
58
59 static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
60 {
61         unsigned long tmp;
62
63         tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
64                 L1_CACHE_BYTES;
65         tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
66         return tmp & inode_hash_mask;
67 }
68
69 static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
70 {
71         struct hlist_head *head = inode_hashtable +
72                 inode_hash(d_inode(de)->i_sb, (unsigned long)de);
73         struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
74
75         hlist_for_each_entry(ci, head, m_hash) {
76                 if (ci->m_de == de) {
77                         if (atomic_inc_not_zero(&ci->m_count))
78                                 ret_ci = ci;
79                         break;
80                 }
81         }
82         return ret_ci;
83 }
84
85 static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
86 {
87         return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
88 }
89
90 int ksmbd_query_inode_status(struct dentry *dentry)
91 {
92         struct ksmbd_inode *ci;
93         int ret = KSMBD_INODE_STATUS_UNKNOWN;
94
95         read_lock(&inode_hash_lock);
96         ci = __ksmbd_inode_lookup(dentry);
97         if (ci) {
98                 ret = KSMBD_INODE_STATUS_OK;
99                 if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
100                         ret = KSMBD_INODE_STATUS_PENDING_DELETE;
101                 atomic_dec(&ci->m_count);
102         }
103         read_unlock(&inode_hash_lock);
104         return ret;
105 }
106
107 bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
108 {
109         return (fp->f_ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
110 }
111
112 void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
113 {
114         fp->f_ci->m_flags |= S_DEL_PENDING;
115 }
116
117 void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
118 {
119         fp->f_ci->m_flags &= ~S_DEL_PENDING;
120 }
121
122 void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
123                                   int file_info)
124 {
125         if (ksmbd_stream_fd(fp)) {
126                 fp->f_ci->m_flags |= S_DEL_ON_CLS_STREAM;
127                 return;
128         }
129
130         fp->f_ci->m_flags |= S_DEL_ON_CLS;
131 }
132
133 static void ksmbd_inode_hash(struct ksmbd_inode *ci)
134 {
135         struct hlist_head *b = inode_hashtable +
136                 inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
137
138         hlist_add_head(&ci->m_hash, b);
139 }
140
141 static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
142 {
143         write_lock(&inode_hash_lock);
144         hlist_del_init(&ci->m_hash);
145         write_unlock(&inode_hash_lock);
146 }
147
148 static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
149 {
150         atomic_set(&ci->m_count, 1);
151         atomic_set(&ci->op_count, 0);
152         atomic_set(&ci->sop_count, 0);
153         ci->m_flags = 0;
154         ci->m_fattr = 0;
155         INIT_LIST_HEAD(&ci->m_fp_list);
156         INIT_LIST_HEAD(&ci->m_op_list);
157         rwlock_init(&ci->m_lock);
158         ci->m_de = fp->filp->f_path.dentry;
159         return 0;
160 }
161
162 static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
163 {
164         struct ksmbd_inode *ci, *tmpci;
165         int rc;
166
167         read_lock(&inode_hash_lock);
168         ci = ksmbd_inode_lookup(fp);
169         read_unlock(&inode_hash_lock);
170         if (ci)
171                 return ci;
172
173         ci = kmalloc(sizeof(struct ksmbd_inode), GFP_KERNEL);
174         if (!ci)
175                 return NULL;
176
177         rc = ksmbd_inode_init(ci, fp);
178         if (rc) {
179                 pr_err("inode initialized failed\n");
180                 kfree(ci);
181                 return NULL;
182         }
183
184         write_lock(&inode_hash_lock);
185         tmpci = ksmbd_inode_lookup(fp);
186         if (!tmpci) {
187                 ksmbd_inode_hash(ci);
188         } else {
189                 kfree(ci);
190                 ci = tmpci;
191         }
192         write_unlock(&inode_hash_lock);
193         return ci;
194 }
195
196 static void ksmbd_inode_free(struct ksmbd_inode *ci)
197 {
198         ksmbd_inode_unhash(ci);
199         kfree(ci);
200 }
201
202 static void ksmbd_inode_put(struct ksmbd_inode *ci)
203 {
204         if (atomic_dec_and_test(&ci->m_count))
205                 ksmbd_inode_free(ci);
206 }
207
208 int __init ksmbd_inode_hash_init(void)
209 {
210         unsigned int loop;
211         unsigned long numentries = 16384;
212         unsigned long bucketsize = sizeof(struct hlist_head);
213         unsigned long size;
214
215         inode_hash_shift = ilog2(numentries);
216         inode_hash_mask = (1 << inode_hash_shift) - 1;
217
218         size = bucketsize << inode_hash_shift;
219
220         /* init master fp hash table */
221         inode_hashtable = vmalloc(size);
222         if (!inode_hashtable)
223                 return -ENOMEM;
224
225         for (loop = 0; loop < (1U << inode_hash_shift); loop++)
226                 INIT_HLIST_HEAD(&inode_hashtable[loop]);
227         return 0;
228 }
229
230 void ksmbd_release_inode_hash(void)
231 {
232         vfree(inode_hashtable);
233 }
234
235 static void __ksmbd_inode_close(struct ksmbd_file *fp)
236 {
237         struct ksmbd_inode *ci = fp->f_ci;
238         int err;
239         struct file *filp;
240
241         filp = fp->filp;
242         if (ksmbd_stream_fd(fp) && (ci->m_flags & S_DEL_ON_CLS_STREAM)) {
243                 ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
244                 err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
245                                              &filp->f_path,
246                                              fp->stream.name);
247                 if (err)
248                         pr_err("remove xattr failed : %s\n",
249                                fp->stream.name);
250         }
251
252         if (atomic_dec_and_test(&ci->m_count)) {
253                 write_lock(&ci->m_lock);
254                 if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
255                         ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
256                         write_unlock(&ci->m_lock);
257                         ksmbd_vfs_unlink(filp);
258                         write_lock(&ci->m_lock);
259                 }
260                 write_unlock(&ci->m_lock);
261
262                 ksmbd_inode_free(ci);
263         }
264 }
265
266 static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
267 {
268         if (!has_file_id(fp->persistent_id))
269                 return;
270
271         write_lock(&global_ft.lock);
272         idr_remove(global_ft.idr, fp->persistent_id);
273         write_unlock(&global_ft.lock);
274 }
275
276 static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
277 {
278         if (!has_file_id(fp->volatile_id))
279                 return;
280
281         write_lock(&fp->f_ci->m_lock);
282         list_del_init(&fp->node);
283         write_unlock(&fp->f_ci->m_lock);
284
285         write_lock(&ft->lock);
286         idr_remove(ft->idr, fp->volatile_id);
287         write_unlock(&ft->lock);
288 }
289
290 static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
291 {
292         struct file *filp;
293         struct ksmbd_lock *smb_lock, *tmp_lock;
294
295         fd_limit_close();
296         __ksmbd_remove_durable_fd(fp);
297         __ksmbd_remove_fd(ft, fp);
298
299         close_id_del_oplock(fp);
300         filp = fp->filp;
301
302         __ksmbd_inode_close(fp);
303         if (!IS_ERR_OR_NULL(filp))
304                 fput(filp);
305
306         /* because the reference count of fp is 0, it is guaranteed that
307          * there are not accesses to fp->lock_list.
308          */
309         list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
310                 spin_lock(&fp->conn->llist_lock);
311                 list_del(&smb_lock->clist);
312                 spin_unlock(&fp->conn->llist_lock);
313
314                 list_del(&smb_lock->flist);
315                 locks_free_lock(smb_lock->fl);
316                 kfree(smb_lock);
317         }
318
319         if (ksmbd_stream_fd(fp))
320                 kfree(fp->stream.name);
321         kmem_cache_free(filp_cache, fp);
322 }
323
324 static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
325 {
326         if (fp->f_state != FP_INITED)
327                 return NULL;
328
329         if (!atomic_inc_not_zero(&fp->refcount))
330                 return NULL;
331         return fp;
332 }
333
334 static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
335                                             u64 id)
336 {
337         struct ksmbd_file *fp;
338
339         if (!has_file_id(id))
340                 return NULL;
341
342         read_lock(&ft->lock);
343         fp = idr_find(ft->idr, id);
344         if (fp)
345                 fp = ksmbd_fp_get(fp);
346         read_unlock(&ft->lock);
347         return fp;
348 }
349
350 static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
351 {
352         __ksmbd_close_fd(&work->sess->file_table, fp);
353         atomic_dec(&work->conn->stats.open_files_count);
354 }
355
356 static void set_close_state_blocked_works(struct ksmbd_file *fp)
357 {
358         struct ksmbd_work *cancel_work;
359
360         spin_lock(&fp->f_lock);
361         list_for_each_entry(cancel_work, &fp->blocked_works,
362                                  fp_entry) {
363                 cancel_work->state = KSMBD_WORK_CLOSED;
364                 cancel_work->cancel_fn(cancel_work->cancel_argv);
365         }
366         spin_unlock(&fp->f_lock);
367 }
368
369 int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
370 {
371         struct ksmbd_file       *fp;
372         struct ksmbd_file_table *ft;
373
374         if (!has_file_id(id))
375                 return 0;
376
377         ft = &work->sess->file_table;
378         write_lock(&ft->lock);
379         fp = idr_find(ft->idr, id);
380         if (fp) {
381                 set_close_state_blocked_works(fp);
382
383                 if (fp->f_state != FP_INITED)
384                         fp = NULL;
385                 else {
386                         fp->f_state = FP_CLOSED;
387                         if (!atomic_dec_and_test(&fp->refcount))
388                                 fp = NULL;
389                 }
390         }
391         write_unlock(&ft->lock);
392
393         if (!fp)
394                 return -EINVAL;
395
396         __put_fd_final(work, fp);
397         return 0;
398 }
399
400 void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
401 {
402         if (!fp)
403                 return;
404
405         if (!atomic_dec_and_test(&fp->refcount))
406                 return;
407         __put_fd_final(work, fp);
408 }
409
410 static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
411 {
412         if (!fp)
413                 return false;
414         if (fp->tcon != tcon)
415                 return false;
416         return true;
417 }
418
419 struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
420 {
421         return __ksmbd_lookup_fd(&work->sess->file_table, id);
422 }
423
424 struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
425 {
426         struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
427
428         if (__sanity_check(work->tcon, fp))
429                 return fp;
430
431         ksmbd_fd_put(work, fp);
432         return NULL;
433 }
434
435 struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
436                                         u64 pid)
437 {
438         struct ksmbd_file *fp;
439
440         if (!has_file_id(id)) {
441                 id = work->compound_fid;
442                 pid = work->compound_pfid;
443         }
444
445         fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
446         if (!__sanity_check(work->tcon, fp)) {
447                 ksmbd_fd_put(work, fp);
448                 return NULL;
449         }
450         if (fp->persistent_id != pid) {
451                 ksmbd_fd_put(work, fp);
452                 return NULL;
453         }
454         return fp;
455 }
456
457 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
458 {
459         return __ksmbd_lookup_fd(&global_ft, id);
460 }
461
462 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
463 {
464         struct ksmbd_file       *fp = NULL;
465         unsigned int            id;
466
467         read_lock(&global_ft.lock);
468         idr_for_each_entry(global_ft.idr, fp, id) {
469                 if (!memcmp(fp->create_guid,
470                             cguid,
471                             SMB2_CREATE_GUID_SIZE)) {
472                         fp = ksmbd_fp_get(fp);
473                         break;
474                 }
475         }
476         read_unlock(&global_ft.lock);
477
478         return fp;
479 }
480
481 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
482 {
483         struct ksmbd_file       *lfp;
484         struct ksmbd_inode      *ci;
485         struct inode            *inode = d_inode(dentry);
486
487         read_lock(&inode_hash_lock);
488         ci = __ksmbd_inode_lookup(dentry);
489         read_unlock(&inode_hash_lock);
490         if (!ci)
491                 return NULL;
492
493         read_lock(&ci->m_lock);
494         list_for_each_entry(lfp, &ci->m_fp_list, node) {
495                 if (inode == file_inode(lfp->filp)) {
496                         atomic_dec(&ci->m_count);
497                         lfp = ksmbd_fp_get(lfp);
498                         read_unlock(&ci->m_lock);
499                         return lfp;
500                 }
501         }
502         atomic_dec(&ci->m_count);
503         read_unlock(&ci->m_lock);
504         return NULL;
505 }
506
507 #define OPEN_ID_TYPE_VOLATILE_ID        (0)
508 #define OPEN_ID_TYPE_PERSISTENT_ID      (1)
509
510 static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
511 {
512         if (type == OPEN_ID_TYPE_VOLATILE_ID)
513                 fp->volatile_id = id;
514         if (type == OPEN_ID_TYPE_PERSISTENT_ID)
515                 fp->persistent_id = id;
516 }
517
518 static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
519                      int type)
520 {
521         u64                     id = 0;
522         int                     ret;
523
524         if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
525                 __open_id_set(fp, KSMBD_NO_FID, type);
526                 return -EMFILE;
527         }
528
529         idr_preload(GFP_KERNEL);
530         write_lock(&ft->lock);
531         ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
532         if (ret >= 0) {
533                 id = ret;
534                 ret = 0;
535         } else {
536                 id = KSMBD_NO_FID;
537                 fd_limit_close();
538         }
539
540         __open_id_set(fp, id, type);
541         write_unlock(&ft->lock);
542         idr_preload_end();
543         return ret;
544 }
545
546 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
547 {
548         __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
549         return fp->persistent_id;
550 }
551
552 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
553 {
554         struct ksmbd_file *fp;
555         int ret;
556
557         fp = kmem_cache_zalloc(filp_cache, GFP_KERNEL);
558         if (!fp) {
559                 pr_err("Failed to allocate memory\n");
560                 return ERR_PTR(-ENOMEM);
561         }
562
563         INIT_LIST_HEAD(&fp->blocked_works);
564         INIT_LIST_HEAD(&fp->node);
565         INIT_LIST_HEAD(&fp->lock_list);
566         spin_lock_init(&fp->f_lock);
567         atomic_set(&fp->refcount, 1);
568
569         fp->filp                = filp;
570         fp->conn                = work->conn;
571         fp->tcon                = work->tcon;
572         fp->volatile_id         = KSMBD_NO_FID;
573         fp->persistent_id       = KSMBD_NO_FID;
574         fp->f_state             = FP_NEW;
575         fp->f_ci                = ksmbd_inode_get(fp);
576
577         if (!fp->f_ci) {
578                 ret = -ENOMEM;
579                 goto err_out;
580         }
581
582         ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
583         if (ret) {
584                 ksmbd_inode_put(fp->f_ci);
585                 goto err_out;
586         }
587
588         atomic_inc(&work->conn->stats.open_files_count);
589         return fp;
590
591 err_out:
592         kmem_cache_free(filp_cache, fp);
593         return ERR_PTR(ret);
594 }
595
596 void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
597                          unsigned int state)
598 {
599         if (!fp)
600                 return;
601
602         write_lock(&ft->lock);
603         fp->f_state = state;
604         write_unlock(&ft->lock);
605 }
606
607 static int
608 __close_file_table_ids(struct ksmbd_file_table *ft,
609                        struct ksmbd_tree_connect *tcon,
610                        bool (*skip)(struct ksmbd_tree_connect *tcon,
611                                     struct ksmbd_file *fp))
612 {
613         unsigned int                    id;
614         struct ksmbd_file               *fp;
615         int                             num = 0;
616
617         idr_for_each_entry(ft->idr, fp, id) {
618                 if (skip(tcon, fp))
619                         continue;
620
621                 set_close_state_blocked_works(fp);
622
623                 if (!atomic_dec_and_test(&fp->refcount))
624                         continue;
625                 __ksmbd_close_fd(ft, fp);
626                 num++;
627         }
628         return num;
629 }
630
631 static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
632                                struct ksmbd_file *fp)
633 {
634         return fp->tcon != tcon;
635 }
636
637 static bool session_fd_check(struct ksmbd_tree_connect *tcon,
638                              struct ksmbd_file *fp)
639 {
640         return false;
641 }
642
643 void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
644 {
645         int num = __close_file_table_ids(&work->sess->file_table,
646                                          work->tcon,
647                                          tree_conn_fd_check);
648
649         atomic_sub(num, &work->conn->stats.open_files_count);
650 }
651
652 void ksmbd_close_session_fds(struct ksmbd_work *work)
653 {
654         int num = __close_file_table_ids(&work->sess->file_table,
655                                          work->tcon,
656                                          session_fd_check);
657
658         atomic_sub(num, &work->conn->stats.open_files_count);
659 }
660
661 int ksmbd_init_global_file_table(void)
662 {
663         return ksmbd_init_file_table(&global_ft);
664 }
665
666 void ksmbd_free_global_file_table(void)
667 {
668         struct ksmbd_file       *fp = NULL;
669         unsigned int            id;
670
671         idr_for_each_entry(global_ft.idr, fp, id) {
672                 __ksmbd_remove_durable_fd(fp);
673                 kmem_cache_free(filp_cache, fp);
674         }
675
676         ksmbd_destroy_file_table(&global_ft);
677 }
678
679 int ksmbd_init_file_table(struct ksmbd_file_table *ft)
680 {
681         ft->idr = kzalloc(sizeof(struct idr), GFP_KERNEL);
682         if (!ft->idr)
683                 return -ENOMEM;
684
685         idr_init(ft->idr);
686         rwlock_init(&ft->lock);
687         return 0;
688 }
689
690 void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
691 {
692         if (!ft->idr)
693                 return;
694
695         __close_file_table_ids(ft, NULL, session_fd_check);
696         idr_destroy(ft->idr);
697         kfree(ft->idr);
698         ft->idr = NULL;
699 }
700
701 int ksmbd_init_file_cache(void)
702 {
703         filp_cache = kmem_cache_create("ksmbd_file_cache",
704                                        sizeof(struct ksmbd_file), 0,
705                                        SLAB_HWCACHE_ALIGN, NULL);
706         if (!filp_cache)
707                 goto out;
708
709         return 0;
710
711 out:
712         pr_err("failed to allocate file cache\n");
713         return -ENOMEM;
714 }
715
716 void ksmbd_exit_file_cache(void)
717 {
718         kmem_cache_destroy(filp_cache);
719 }