ceph: add new mount option to enable sparse reads
[platform/kernel/linux-rpi.git] / fs / ceph / addr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/swap.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 #include <linux/netfs.h>
16
17 #include "super.h"
18 #include "mds_client.h"
19 #include "cache.h"
20 #include "metric.h"
21 #include <linux/ceph/osd_client.h>
22 #include <linux/ceph/striper.h>
23
24 /*
25  * Ceph address space ops.
26  *
27  * There are a few funny things going on here.
28  *
29  * The page->private field is used to reference a struct
30  * ceph_snap_context for _every_ dirty page.  This indicates which
31  * snapshot the page was logically dirtied in, and thus which snap
32  * context needs to be associated with the osd write during writeback.
33  *
34  * Similarly, struct ceph_inode_info maintains a set of counters to
35  * count dirty pages on the inode.  In the absence of snapshots,
36  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
37  *
38  * When a snapshot is taken (that is, when the client receives
39  * notification that a snapshot was taken), each inode with caps and
40  * with dirty pages (dirty pages implies there is a cap) gets a new
41  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
42  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
43  * moved to capsnap->dirty. (Unless a sync write is currently in
44  * progress.  In that case, the capsnap is said to be "pending", new
45  * writes cannot start, and the capsnap isn't "finalized" until the
46  * write completes (or fails) and a final size/mtime for the inode for
47  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
48  *
49  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
50  * we look for the first capsnap in i_cap_snaps and write out pages in
51  * that snap context _only_.  Then we move on to the next capsnap,
52  * eventually reaching the "live" or "head" context (i.e., pages that
53  * are not yet snapped) and are writing the most recently dirtied
54  * pages.
55  *
56  * Invalidate and so forth must take care to ensure the dirty page
57  * accounting is preserved.
58  */
59
60 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
61 #define CONGESTION_OFF_THRESH(congestion_kb)                            \
62         (CONGESTION_ON_THRESH(congestion_kb) -                          \
63          (CONGESTION_ON_THRESH(congestion_kb) >> 2))
64
65 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
66                                         struct folio **foliop, void **_fsdata);
67
68 static inline struct ceph_snap_context *page_snap_context(struct page *page)
69 {
70         if (PagePrivate(page))
71                 return (void *)page->private;
72         return NULL;
73 }
74
75 /*
76  * Dirty a page.  Optimistically adjust accounting, on the assumption
77  * that we won't race with invalidate.  If we do, readjust.
78  */
79 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
80 {
81         struct inode *inode;
82         struct ceph_inode_info *ci;
83         struct ceph_snap_context *snapc;
84
85         if (folio_test_dirty(folio)) {
86                 dout("%p dirty_folio %p idx %lu -- already dirty\n",
87                      mapping->host, folio, folio->index);
88                 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
89                 return false;
90         }
91
92         inode = mapping->host;
93         ci = ceph_inode(inode);
94
95         /* dirty the head */
96         spin_lock(&ci->i_ceph_lock);
97         BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
98         if (__ceph_have_pending_cap_snap(ci)) {
99                 struct ceph_cap_snap *capsnap =
100                                 list_last_entry(&ci->i_cap_snaps,
101                                                 struct ceph_cap_snap,
102                                                 ci_item);
103                 snapc = ceph_get_snap_context(capsnap->context);
104                 capsnap->dirty_pages++;
105         } else {
106                 BUG_ON(!ci->i_head_snapc);
107                 snapc = ceph_get_snap_context(ci->i_head_snapc);
108                 ++ci->i_wrbuffer_ref_head;
109         }
110         if (ci->i_wrbuffer_ref == 0)
111                 ihold(inode);
112         ++ci->i_wrbuffer_ref;
113         dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
114              "snapc %p seq %lld (%d snaps)\n",
115              mapping->host, folio, folio->index,
116              ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
117              ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
118              snapc, snapc->seq, snapc->num_snaps);
119         spin_unlock(&ci->i_ceph_lock);
120
121         /*
122          * Reference snap context in folio->private.  Also set
123          * PagePrivate so that we get invalidate_folio callback.
124          */
125         VM_WARN_ON_FOLIO(folio->private, folio);
126         folio_attach_private(folio, snapc);
127
128         return ceph_fscache_dirty_folio(mapping, folio);
129 }
130
131 /*
132  * If we are truncating the full folio (i.e. offset == 0), adjust the
133  * dirty folio counters appropriately.  Only called if there is private
134  * data on the folio.
135  */
136 static void ceph_invalidate_folio(struct folio *folio, size_t offset,
137                                 size_t length)
138 {
139         struct inode *inode;
140         struct ceph_inode_info *ci;
141         struct ceph_snap_context *snapc;
142
143         inode = folio->mapping->host;
144         ci = ceph_inode(inode);
145
146         if (offset != 0 || length != folio_size(folio)) {
147                 dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
148                      inode, folio->index, offset, length);
149                 return;
150         }
151
152         WARN_ON(!folio_test_locked(folio));
153         if (folio_test_private(folio)) {
154                 dout("%p invalidate_folio idx %lu full dirty page\n",
155                      inode, folio->index);
156
157                 snapc = folio_detach_private(folio);
158                 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
159                 ceph_put_snap_context(snapc);
160         }
161
162         folio_wait_fscache(folio);
163 }
164
165 static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
166 {
167         struct inode *inode = folio->mapping->host;
168
169         dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
170              ceph_vinop(inode),
171              folio->index, folio_test_dirty(folio) ? "" : "not ");
172
173         if (folio_test_private(folio))
174                 return false;
175
176         if (folio_test_fscache(folio)) {
177                 if (current_is_kswapd() || !(gfp & __GFP_FS))
178                         return false;
179                 folio_wait_fscache(folio);
180         }
181         ceph_fscache_note_page_release(inode);
182         return true;
183 }
184
185 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
186 {
187         struct inode *inode = rreq->inode;
188         struct ceph_inode_info *ci = ceph_inode(inode);
189         struct ceph_file_layout *lo = &ci->i_layout;
190         unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
191         loff_t end = rreq->start + rreq->len, new_end;
192         struct ceph_netfs_request_data *priv = rreq->netfs_priv;
193         unsigned long max_len;
194         u32 blockoff;
195
196         if (priv) {
197                 /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
198                 if (priv->file_ra_disabled)
199                         max_pages = 0;
200                 else
201                         max_pages = priv->file_ra_pages;
202
203         }
204
205         /* Readahead is disabled */
206         if (!max_pages)
207                 return;
208
209         max_len = max_pages << PAGE_SHIFT;
210
211         /*
212          * Try to expand the length forward by rounding up it to the next
213          * block, but do not exceed the file size, unless the original
214          * request already exceeds it.
215          */
216         new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
217         if (new_end > end && new_end <= rreq->start + max_len)
218                 rreq->len = new_end - rreq->start;
219
220         /* Try to expand the start downward */
221         div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
222         if (rreq->len + blockoff <= max_len) {
223                 rreq->start -= blockoff;
224                 rreq->len += blockoff;
225         }
226 }
227
228 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
229 {
230         struct inode *inode = subreq->rreq->inode;
231         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
232         struct ceph_inode_info *ci = ceph_inode(inode);
233         u64 objno, objoff;
234         u32 xlen;
235
236         /* Truncate the extent at the end of the current block */
237         ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
238                                       &objno, &objoff, &xlen);
239         subreq->len = min(xlen, fsc->mount_options->rsize);
240         return true;
241 }
242
243 static void finish_netfs_read(struct ceph_osd_request *req)
244 {
245         struct ceph_fs_client *fsc = ceph_inode_to_client(req->r_inode);
246         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
247         struct netfs_io_subrequest *subreq = req->r_priv;
248         struct ceph_osd_req_op *op = &req->r_ops[0];
249         int num_pages;
250         int err = req->r_result;
251         bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
252
253         ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
254                                  req->r_end_latency, osd_data->length, err);
255
256         dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
257              subreq->len, i_size_read(req->r_inode));
258
259         /* no object means success but no data */
260         if (sparse && err >= 0)
261                 err = ceph_sparse_ext_map_end(op);
262         else if (err == -ENOENT)
263                 err = 0;
264         else if (err == -EBLOCKLISTED)
265                 fsc->blocklisted = true;
266
267         if (err >= 0 && err < subreq->len)
268                 __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
269
270         netfs_subreq_terminated(subreq, err, false);
271
272         num_pages = calc_pages_for(osd_data->alignment, osd_data->length);
273         ceph_put_page_vector(osd_data->pages, num_pages, false);
274         iput(req->r_inode);
275 }
276
277 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
278 {
279         struct netfs_io_request *rreq = subreq->rreq;
280         struct inode *inode = rreq->inode;
281         struct ceph_mds_reply_info_parsed *rinfo;
282         struct ceph_mds_reply_info_in *iinfo;
283         struct ceph_mds_request *req;
284         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
285         struct ceph_inode_info *ci = ceph_inode(inode);
286         struct iov_iter iter;
287         ssize_t err = 0;
288         size_t len;
289         int mode;
290
291         __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
292         __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
293
294         if (subreq->start >= inode->i_size)
295                 goto out;
296
297         /* We need to fetch the inline data. */
298         mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
299         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
300         if (IS_ERR(req)) {
301                 err = PTR_ERR(req);
302                 goto out;
303         }
304         req->r_ino1 = ci->i_vino;
305         req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
306         req->r_num_caps = 2;
307
308         err = ceph_mdsc_do_request(mdsc, NULL, req);
309         if (err < 0)
310                 goto out;
311
312         rinfo = &req->r_reply_info;
313         iinfo = &rinfo->targeti;
314         if (iinfo->inline_version == CEPH_INLINE_NONE) {
315                 /* The data got uninlined */
316                 ceph_mdsc_put_request(req);
317                 return false;
318         }
319
320         len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
321         iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
322         err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
323         if (err == 0)
324                 err = -EFAULT;
325
326         ceph_mdsc_put_request(req);
327 out:
328         netfs_subreq_terminated(subreq, err, false);
329         return true;
330 }
331
332 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
333 {
334         struct netfs_io_request *rreq = subreq->rreq;
335         struct inode *inode = rreq->inode;
336         struct ceph_inode_info *ci = ceph_inode(inode);
337         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
338         struct ceph_osd_request *req = NULL;
339         struct ceph_vino vino = ceph_vino(inode);
340         struct iov_iter iter;
341         struct page **pages;
342         size_t page_off;
343         int err = 0;
344         u64 len = subreq->len;
345         bool sparse = ceph_test_mount_opt(fsc, SPARSEREAD);
346
347         if (ceph_inode_is_shutdown(inode)) {
348                 err = -EIO;
349                 goto out;
350         }
351
352         if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
353                 return;
354
355         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino, subreq->start, &len,
356                         0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
357                         CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
358                         NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
359         if (IS_ERR(req)) {
360                 err = PTR_ERR(req);
361                 req = NULL;
362                 goto out;
363         }
364
365         if (sparse) {
366                 err = ceph_alloc_sparse_ext_map(&req->r_ops[0]);
367                 if (err)
368                         goto out;
369         }
370
371         dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
372         iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
373         err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
374         if (err < 0) {
375                 dout("%s: iov_ter_get_pages_alloc returned %d\n", __func__, err);
376                 goto out;
377         }
378
379         /* should always give us a page-aligned read */
380         WARN_ON_ONCE(page_off);
381         len = err;
382         err = 0;
383
384         osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
385         req->r_callback = finish_netfs_read;
386         req->r_priv = subreq;
387         req->r_inode = inode;
388         ihold(inode);
389
390         ceph_osdc_start_request(req->r_osdc, req);
391 out:
392         ceph_osdc_put_request(req);
393         if (err)
394                 netfs_subreq_terminated(subreq, err, false);
395         dout("%s: result %d\n", __func__, err);
396 }
397
398 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
399 {
400         struct inode *inode = rreq->inode;
401         int got = 0, want = CEPH_CAP_FILE_CACHE;
402         struct ceph_netfs_request_data *priv;
403         int ret = 0;
404
405         if (rreq->origin != NETFS_READAHEAD)
406                 return 0;
407
408         priv = kzalloc(sizeof(*priv), GFP_NOFS);
409         if (!priv)
410                 return -ENOMEM;
411
412         if (file) {
413                 struct ceph_rw_context *rw_ctx;
414                 struct ceph_file_info *fi = file->private_data;
415
416                 priv->file_ra_pages = file->f_ra.ra_pages;
417                 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
418
419                 rw_ctx = ceph_find_rw_context(fi);
420                 if (rw_ctx) {
421                         rreq->netfs_priv = priv;
422                         return 0;
423                 }
424         }
425
426         /*
427          * readahead callers do not necessarily hold Fcb caps
428          * (e.g. fadvise, madvise).
429          */
430         ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
431         if (ret < 0) {
432                 dout("start_read %p, error getting cap\n", inode);
433                 goto out;
434         }
435
436         if (!(got & want)) {
437                 dout("start_read %p, no cache cap\n", inode);
438                 ret = -EACCES;
439                 goto out;
440         }
441         if (ret == 0) {
442                 ret = -EACCES;
443                 goto out;
444         }
445
446         priv->caps = got;
447         rreq->netfs_priv = priv;
448
449 out:
450         if (ret < 0)
451                 kfree(priv);
452
453         return ret;
454 }
455
456 static void ceph_netfs_free_request(struct netfs_io_request *rreq)
457 {
458         struct ceph_netfs_request_data *priv = rreq->netfs_priv;
459
460         if (!priv)
461                 return;
462
463         if (priv->caps)
464                 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
465         kfree(priv);
466         rreq->netfs_priv = NULL;
467 }
468
469 const struct netfs_request_ops ceph_netfs_ops = {
470         .init_request           = ceph_init_request,
471         .free_request           = ceph_netfs_free_request,
472         .begin_cache_operation  = ceph_begin_cache_operation,
473         .issue_read             = ceph_netfs_issue_read,
474         .expand_readahead       = ceph_netfs_expand_readahead,
475         .clamp_length           = ceph_netfs_clamp_length,
476         .check_write_begin      = ceph_netfs_check_write_begin,
477 };
478
479 #ifdef CONFIG_CEPH_FSCACHE
480 static void ceph_set_page_fscache(struct page *page)
481 {
482         set_page_fscache(page);
483 }
484
485 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
486 {
487         struct inode *inode = priv;
488
489         if (IS_ERR_VALUE(error) && error != -ENOBUFS)
490                 ceph_fscache_invalidate(inode, false);
491 }
492
493 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
494 {
495         struct ceph_inode_info *ci = ceph_inode(inode);
496         struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
497
498         fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
499                                ceph_fscache_write_terminated, inode, caching);
500 }
501 #else
502 static inline void ceph_set_page_fscache(struct page *page)
503 {
504 }
505
506 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
507 {
508 }
509 #endif /* CONFIG_CEPH_FSCACHE */
510
511 struct ceph_writeback_ctl
512 {
513         loff_t i_size;
514         u64 truncate_size;
515         u32 truncate_seq;
516         bool size_stable;
517         bool head_snapc;
518 };
519
520 /*
521  * Get ref for the oldest snapc for an inode with dirty data... that is, the
522  * only snap context we are allowed to write back.
523  */
524 static struct ceph_snap_context *
525 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
526                    struct ceph_snap_context *page_snapc)
527 {
528         struct ceph_inode_info *ci = ceph_inode(inode);
529         struct ceph_snap_context *snapc = NULL;
530         struct ceph_cap_snap *capsnap = NULL;
531
532         spin_lock(&ci->i_ceph_lock);
533         list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
534                 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
535                      capsnap->context, capsnap->dirty_pages);
536                 if (!capsnap->dirty_pages)
537                         continue;
538
539                 /* get i_size, truncate_{seq,size} for page_snapc? */
540                 if (snapc && capsnap->context != page_snapc)
541                         continue;
542
543                 if (ctl) {
544                         if (capsnap->writing) {
545                                 ctl->i_size = i_size_read(inode);
546                                 ctl->size_stable = false;
547                         } else {
548                                 ctl->i_size = capsnap->size;
549                                 ctl->size_stable = true;
550                         }
551                         ctl->truncate_size = capsnap->truncate_size;
552                         ctl->truncate_seq = capsnap->truncate_seq;
553                         ctl->head_snapc = false;
554                 }
555
556                 if (snapc)
557                         break;
558
559                 snapc = ceph_get_snap_context(capsnap->context);
560                 if (!page_snapc ||
561                     page_snapc == snapc ||
562                     page_snapc->seq > snapc->seq)
563                         break;
564         }
565         if (!snapc && ci->i_wrbuffer_ref_head) {
566                 snapc = ceph_get_snap_context(ci->i_head_snapc);
567                 dout(" head snapc %p has %d dirty pages\n",
568                      snapc, ci->i_wrbuffer_ref_head);
569                 if (ctl) {
570                         ctl->i_size = i_size_read(inode);
571                         ctl->truncate_size = ci->i_truncate_size;
572                         ctl->truncate_seq = ci->i_truncate_seq;
573                         ctl->size_stable = false;
574                         ctl->head_snapc = true;
575                 }
576         }
577         spin_unlock(&ci->i_ceph_lock);
578         return snapc;
579 }
580
581 static u64 get_writepages_data_length(struct inode *inode,
582                                       struct page *page, u64 start)
583 {
584         struct ceph_inode_info *ci = ceph_inode(inode);
585         struct ceph_snap_context *snapc = page_snap_context(page);
586         struct ceph_cap_snap *capsnap = NULL;
587         u64 end = i_size_read(inode);
588
589         if (snapc != ci->i_head_snapc) {
590                 bool found = false;
591                 spin_lock(&ci->i_ceph_lock);
592                 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
593                         if (capsnap->context == snapc) {
594                                 if (!capsnap->writing)
595                                         end = capsnap->size;
596                                 found = true;
597                                 break;
598                         }
599                 }
600                 spin_unlock(&ci->i_ceph_lock);
601                 WARN_ON(!found);
602         }
603         if (end > page_offset(page) + thp_size(page))
604                 end = page_offset(page) + thp_size(page);
605         return end > start ? end - start : 0;
606 }
607
608 /*
609  * Write a single page, but leave the page locked.
610  *
611  * If we get a write error, mark the mapping for error, but still adjust the
612  * dirty page accounting (i.e., page is no longer dirty).
613  */
614 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
615 {
616         struct folio *folio = page_folio(page);
617         struct inode *inode = page->mapping->host;
618         struct ceph_inode_info *ci = ceph_inode(inode);
619         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
620         struct ceph_snap_context *snapc, *oldest;
621         loff_t page_off = page_offset(page);
622         int err;
623         loff_t len = thp_size(page);
624         struct ceph_writeback_ctl ceph_wbc;
625         struct ceph_osd_client *osdc = &fsc->client->osdc;
626         struct ceph_osd_request *req;
627         bool caching = ceph_is_cache_enabled(inode);
628
629         dout("writepage %p idx %lu\n", page, page->index);
630
631         if (ceph_inode_is_shutdown(inode))
632                 return -EIO;
633
634         /* verify this is a writeable snap context */
635         snapc = page_snap_context(page);
636         if (!snapc) {
637                 dout("writepage %p page %p not dirty?\n", inode, page);
638                 return 0;
639         }
640         oldest = get_oldest_context(inode, &ceph_wbc, snapc);
641         if (snapc->seq > oldest->seq) {
642                 dout("writepage %p page %p snapc %p not writeable - noop\n",
643                      inode, page, snapc);
644                 /* we should only noop if called by kswapd */
645                 WARN_ON(!(current->flags & PF_MEMALLOC));
646                 ceph_put_snap_context(oldest);
647                 redirty_page_for_writepage(wbc, page);
648                 return 0;
649         }
650         ceph_put_snap_context(oldest);
651
652         /* is this a partial page at end of file? */
653         if (page_off >= ceph_wbc.i_size) {
654                 dout("folio at %lu beyond eof %llu\n", folio->index,
655                                 ceph_wbc.i_size);
656                 folio_invalidate(folio, 0, folio_size(folio));
657                 return 0;
658         }
659
660         if (ceph_wbc.i_size < page_off + len)
661                 len = ceph_wbc.i_size - page_off;
662
663         dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
664              inode, page, page->index, page_off, len, snapc, snapc->seq);
665
666         if (atomic_long_inc_return(&fsc->writeback_count) >
667             CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
668                 fsc->write_congested = true;
669
670         req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
671                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
672                                     ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
673                                     true);
674         if (IS_ERR(req)) {
675                 redirty_page_for_writepage(wbc, page);
676                 return PTR_ERR(req);
677         }
678
679         set_page_writeback(page);
680         if (caching)
681                 ceph_set_page_fscache(page);
682         ceph_fscache_write_to_cache(inode, page_off, len, caching);
683
684         /* it may be a short write due to an object boundary */
685         WARN_ON_ONCE(len > thp_size(page));
686         osd_req_op_extent_osd_data_pages(req, 0, &page, len, 0, false, false);
687         dout("writepage %llu~%llu (%llu bytes)\n", page_off, len, len);
688
689         req->r_mtime = inode->i_mtime;
690         ceph_osdc_start_request(osdc, req);
691         err = ceph_osdc_wait_request(osdc, req);
692
693         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
694                                   req->r_end_latency, len, err);
695
696         ceph_osdc_put_request(req);
697         if (err == 0)
698                 err = len;
699
700         if (err < 0) {
701                 struct writeback_control tmp_wbc;
702                 if (!wbc)
703                         wbc = &tmp_wbc;
704                 if (err == -ERESTARTSYS) {
705                         /* killed by SIGKILL */
706                         dout("writepage interrupted page %p\n", page);
707                         redirty_page_for_writepage(wbc, page);
708                         end_page_writeback(page);
709                         return err;
710                 }
711                 if (err == -EBLOCKLISTED)
712                         fsc->blocklisted = true;
713                 dout("writepage setting page/mapping error %d %p\n",
714                      err, page);
715                 mapping_set_error(&inode->i_data, err);
716                 wbc->pages_skipped++;
717         } else {
718                 dout("writepage cleaned page %p\n", page);
719                 err = 0;  /* vfs expects us to return 0 */
720         }
721         oldest = detach_page_private(page);
722         WARN_ON_ONCE(oldest != snapc);
723         end_page_writeback(page);
724         ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
725         ceph_put_snap_context(snapc);  /* page's reference */
726
727         if (atomic_long_dec_return(&fsc->writeback_count) <
728             CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
729                 fsc->write_congested = false;
730
731         return err;
732 }
733
734 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
735 {
736         int err;
737         struct inode *inode = page->mapping->host;
738         BUG_ON(!inode);
739         ihold(inode);
740
741         if (wbc->sync_mode == WB_SYNC_NONE &&
742             ceph_inode_to_client(inode)->write_congested)
743                 return AOP_WRITEPAGE_ACTIVATE;
744
745         wait_on_page_fscache(page);
746
747         err = writepage_nounlock(page, wbc);
748         if (err == -ERESTARTSYS) {
749                 /* direct memory reclaimer was killed by SIGKILL. return 0
750                  * to prevent caller from setting mapping/page error */
751                 err = 0;
752         }
753         unlock_page(page);
754         iput(inode);
755         return err;
756 }
757
758 /*
759  * async writeback completion handler.
760  *
761  * If we get an error, set the mapping error bit, but not the individual
762  * page error bits.
763  */
764 static void writepages_finish(struct ceph_osd_request *req)
765 {
766         struct inode *inode = req->r_inode;
767         struct ceph_inode_info *ci = ceph_inode(inode);
768         struct ceph_osd_data *osd_data;
769         struct page *page;
770         int num_pages, total_pages = 0;
771         int i, j;
772         int rc = req->r_result;
773         struct ceph_snap_context *snapc = req->r_snapc;
774         struct address_space *mapping = inode->i_mapping;
775         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
776         unsigned int len = 0;
777         bool remove_page;
778
779         dout("writepages_finish %p rc %d\n", inode, rc);
780         if (rc < 0) {
781                 mapping_set_error(mapping, rc);
782                 ceph_set_error_write(ci);
783                 if (rc == -EBLOCKLISTED)
784                         fsc->blocklisted = true;
785         } else {
786                 ceph_clear_error_write(ci);
787         }
788
789         /*
790          * We lost the cache cap, need to truncate the page before
791          * it is unlocked, otherwise we'd truncate it later in the
792          * page truncation thread, possibly losing some data that
793          * raced its way in
794          */
795         remove_page = !(ceph_caps_issued(ci) &
796                         (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
797
798         /* clean all pages */
799         for (i = 0; i < req->r_num_ops; i++) {
800                 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
801                         pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
802                                 __func__, req->r_ops[i].op, req, i, req->r_tid);
803                         break;
804                 }
805
806                 osd_data = osd_req_op_extent_osd_data(req, i);
807                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
808                 len += osd_data->length;
809                 num_pages = calc_pages_for((u64)osd_data->alignment,
810                                            (u64)osd_data->length);
811                 total_pages += num_pages;
812                 for (j = 0; j < num_pages; j++) {
813                         page = osd_data->pages[j];
814                         BUG_ON(!page);
815                         WARN_ON(!PageUptodate(page));
816
817                         if (atomic_long_dec_return(&fsc->writeback_count) <
818                              CONGESTION_OFF_THRESH(
819                                         fsc->mount_options->congestion_kb))
820                                 fsc->write_congested = false;
821
822                         ceph_put_snap_context(detach_page_private(page));
823                         end_page_writeback(page);
824                         dout("unlocking %p\n", page);
825
826                         if (remove_page)
827                                 generic_error_remove_page(inode->i_mapping,
828                                                           page);
829
830                         unlock_page(page);
831                 }
832                 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
833                      inode, osd_data->length, rc >= 0 ? num_pages : 0);
834
835                 release_pages(osd_data->pages, num_pages);
836         }
837
838         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
839                                   req->r_end_latency, len, rc);
840
841         ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
842
843         osd_data = osd_req_op_extent_osd_data(req, 0);
844         if (osd_data->pages_from_pool)
845                 mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
846         else
847                 kfree(osd_data->pages);
848         ceph_osdc_put_request(req);
849 }
850
851 /*
852  * initiate async writeback
853  */
854 static int ceph_writepages_start(struct address_space *mapping,
855                                  struct writeback_control *wbc)
856 {
857         struct inode *inode = mapping->host;
858         struct ceph_inode_info *ci = ceph_inode(inode);
859         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
860         struct ceph_vino vino = ceph_vino(inode);
861         pgoff_t index, start_index, end = -1;
862         struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
863         struct folio_batch fbatch;
864         int rc = 0;
865         unsigned int wsize = i_blocksize(inode);
866         struct ceph_osd_request *req = NULL;
867         struct ceph_writeback_ctl ceph_wbc;
868         bool should_loop, range_whole = false;
869         bool done = false;
870         bool caching = ceph_is_cache_enabled(inode);
871         xa_mark_t tag;
872
873         if (wbc->sync_mode == WB_SYNC_NONE &&
874             fsc->write_congested)
875                 return 0;
876
877         dout("writepages_start %p (mode=%s)\n", inode,
878              wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
879              (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
880
881         if (ceph_inode_is_shutdown(inode)) {
882                 if (ci->i_wrbuffer_ref > 0) {
883                         pr_warn_ratelimited(
884                                 "writepage_start %p %lld forced umount\n",
885                                 inode, ceph_ino(inode));
886                 }
887                 mapping_set_error(mapping, -EIO);
888                 return -EIO; /* we're in a forced umount, don't write! */
889         }
890         if (fsc->mount_options->wsize < wsize)
891                 wsize = fsc->mount_options->wsize;
892
893         folio_batch_init(&fbatch);
894
895         start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
896         index = start_index;
897
898         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
899                 tag = PAGECACHE_TAG_TOWRITE;
900         } else {
901                 tag = PAGECACHE_TAG_DIRTY;
902         }
903 retry:
904         /* find oldest snap context with dirty data */
905         snapc = get_oldest_context(inode, &ceph_wbc, NULL);
906         if (!snapc) {
907                 /* hmm, why does writepages get called when there
908                    is no dirty data? */
909                 dout(" no snap context with dirty data?\n");
910                 goto out;
911         }
912         dout(" oldest snapc is %p seq %lld (%d snaps)\n",
913              snapc, snapc->seq, snapc->num_snaps);
914
915         should_loop = false;
916         if (ceph_wbc.head_snapc && snapc != last_snapc) {
917                 /* where to start/end? */
918                 if (wbc->range_cyclic) {
919                         index = start_index;
920                         end = -1;
921                         if (index > 0)
922                                 should_loop = true;
923                         dout(" cyclic, start at %lu\n", index);
924                 } else {
925                         index = wbc->range_start >> PAGE_SHIFT;
926                         end = wbc->range_end >> PAGE_SHIFT;
927                         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
928                                 range_whole = true;
929                         dout(" not cyclic, %lu to %lu\n", index, end);
930                 }
931         } else if (!ceph_wbc.head_snapc) {
932                 /* Do not respect wbc->range_{start,end}. Dirty pages
933                  * in that range can be associated with newer snapc.
934                  * They are not writeable until we write all dirty pages
935                  * associated with 'snapc' get written */
936                 if (index > 0)
937                         should_loop = true;
938                 dout(" non-head snapc, range whole\n");
939         }
940
941         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
942                 tag_pages_for_writeback(mapping, index, end);
943
944         ceph_put_snap_context(last_snapc);
945         last_snapc = snapc;
946
947         while (!done && index <= end) {
948                 int num_ops = 0, op_idx;
949                 unsigned i, nr_folios, max_pages, locked_pages = 0;
950                 struct page **pages = NULL, **data_pages;
951                 struct page *page;
952                 pgoff_t strip_unit_end = 0;
953                 u64 offset = 0, len = 0;
954                 bool from_pool = false;
955
956                 max_pages = wsize >> PAGE_SHIFT;
957
958 get_more_pages:
959                 nr_folios = filemap_get_folios_tag(mapping, &index,
960                                                    end, tag, &fbatch);
961                 dout("pagevec_lookup_range_tag got %d\n", nr_folios);
962                 if (!nr_folios && !locked_pages)
963                         break;
964                 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
965                         page = &fbatch.folios[i]->page;
966                         dout("? %p idx %lu\n", page, page->index);
967                         if (locked_pages == 0)
968                                 lock_page(page);  /* first page */
969                         else if (!trylock_page(page))
970                                 break;
971
972                         /* only dirty pages, or our accounting breaks */
973                         if (unlikely(!PageDirty(page)) ||
974                             unlikely(page->mapping != mapping)) {
975                                 dout("!dirty or !mapping %p\n", page);
976                                 unlock_page(page);
977                                 continue;
978                         }
979                         /* only if matching snap context */
980                         pgsnapc = page_snap_context(page);
981                         if (pgsnapc != snapc) {
982                                 dout("page snapc %p %lld != oldest %p %lld\n",
983                                      pgsnapc, pgsnapc->seq, snapc, snapc->seq);
984                                 if (!should_loop &&
985                                     !ceph_wbc.head_snapc &&
986                                     wbc->sync_mode != WB_SYNC_NONE)
987                                         should_loop = true;
988                                 unlock_page(page);
989                                 continue;
990                         }
991                         if (page_offset(page) >= ceph_wbc.i_size) {
992                                 struct folio *folio = page_folio(page);
993
994                                 dout("folio at %lu beyond eof %llu\n",
995                                      folio->index, ceph_wbc.i_size);
996                                 if ((ceph_wbc.size_stable ||
997                                     folio_pos(folio) >= i_size_read(inode)) &&
998                                     folio_clear_dirty_for_io(folio))
999                                         folio_invalidate(folio, 0,
1000                                                         folio_size(folio));
1001                                 folio_unlock(folio);
1002                                 continue;
1003                         }
1004                         if (strip_unit_end && (page->index > strip_unit_end)) {
1005                                 dout("end of strip unit %p\n", page);
1006                                 unlock_page(page);
1007                                 break;
1008                         }
1009                         if (PageWriteback(page) || PageFsCache(page)) {
1010                                 if (wbc->sync_mode == WB_SYNC_NONE) {
1011                                         dout("%p under writeback\n", page);
1012                                         unlock_page(page);
1013                                         continue;
1014                                 }
1015                                 dout("waiting on writeback %p\n", page);
1016                                 wait_on_page_writeback(page);
1017                                 wait_on_page_fscache(page);
1018                         }
1019
1020                         if (!clear_page_dirty_for_io(page)) {
1021                                 dout("%p !clear_page_dirty_for_io\n", page);
1022                                 unlock_page(page);
1023                                 continue;
1024                         }
1025
1026                         /*
1027                          * We have something to write.  If this is
1028                          * the first locked page this time through,
1029                          * calculate max possinle write size and
1030                          * allocate a page array
1031                          */
1032                         if (locked_pages == 0) {
1033                                 u64 objnum;
1034                                 u64 objoff;
1035                                 u32 xlen;
1036
1037                                 /* prepare async write request */
1038                                 offset = (u64)page_offset(page);
1039                                 ceph_calc_file_object_mapping(&ci->i_layout,
1040                                                               offset, wsize,
1041                                                               &objnum, &objoff,
1042                                                               &xlen);
1043                                 len = xlen;
1044
1045                                 num_ops = 1;
1046                                 strip_unit_end = page->index +
1047                                         ((len - 1) >> PAGE_SHIFT);
1048
1049                                 BUG_ON(pages);
1050                                 max_pages = calc_pages_for(0, (u64)len);
1051                                 pages = kmalloc_array(max_pages,
1052                                                       sizeof(*pages),
1053                                                       GFP_NOFS);
1054                                 if (!pages) {
1055                                         from_pool = true;
1056                                         pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1057                                         BUG_ON(!pages);
1058                                 }
1059
1060                                 len = 0;
1061                         } else if (page->index !=
1062                                    (offset + len) >> PAGE_SHIFT) {
1063                                 if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
1064                                                              CEPH_OSD_MAX_OPS)) {
1065                                         redirty_page_for_writepage(wbc, page);
1066                                         unlock_page(page);
1067                                         break;
1068                                 }
1069
1070                                 num_ops++;
1071                                 offset = (u64)page_offset(page);
1072                                 len = 0;
1073                         }
1074
1075                         /* note position of first page in fbatch */
1076                         dout("%p will write page %p idx %lu\n",
1077                              inode, page, page->index);
1078
1079                         if (atomic_long_inc_return(&fsc->writeback_count) >
1080                             CONGESTION_ON_THRESH(
1081                                     fsc->mount_options->congestion_kb))
1082                                 fsc->write_congested = true;
1083
1084                         pages[locked_pages++] = page;
1085                         fbatch.folios[i] = NULL;
1086
1087                         len += thp_size(page);
1088                 }
1089
1090                 /* did we get anything? */
1091                 if (!locked_pages)
1092                         goto release_folios;
1093                 if (i) {
1094                         unsigned j, n = 0;
1095                         /* shift unused page to beginning of fbatch */
1096                         for (j = 0; j < nr_folios; j++) {
1097                                 if (!fbatch.folios[j])
1098                                         continue;
1099                                 if (n < j)
1100                                         fbatch.folios[n] = fbatch.folios[j];
1101                                 n++;
1102                         }
1103                         fbatch.nr = n;
1104
1105                         if (nr_folios && i == nr_folios &&
1106                             locked_pages < max_pages) {
1107                                 dout("reached end fbatch, trying for more\n");
1108                                 folio_batch_release(&fbatch);
1109                                 goto get_more_pages;
1110                         }
1111                 }
1112
1113 new_request:
1114                 offset = page_offset(pages[0]);
1115                 len = wsize;
1116
1117                 req = ceph_osdc_new_request(&fsc->client->osdc,
1118                                         &ci->i_layout, vino,
1119                                         offset, &len, 0, num_ops,
1120                                         CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1121                                         snapc, ceph_wbc.truncate_seq,
1122                                         ceph_wbc.truncate_size, false);
1123                 if (IS_ERR(req)) {
1124                         req = ceph_osdc_new_request(&fsc->client->osdc,
1125                                                 &ci->i_layout, vino,
1126                                                 offset, &len, 0,
1127                                                 min(num_ops,
1128                                                     CEPH_OSD_SLAB_OPS),
1129                                                 CEPH_OSD_OP_WRITE,
1130                                                 CEPH_OSD_FLAG_WRITE,
1131                                                 snapc, ceph_wbc.truncate_seq,
1132                                                 ceph_wbc.truncate_size, true);
1133                         BUG_ON(IS_ERR(req));
1134                 }
1135                 BUG_ON(len < page_offset(pages[locked_pages - 1]) +
1136                              thp_size(page) - offset);
1137
1138                 req->r_callback = writepages_finish;
1139                 req->r_inode = inode;
1140
1141                 /* Format the osd request message and submit the write */
1142                 len = 0;
1143                 data_pages = pages;
1144                 op_idx = 0;
1145                 for (i = 0; i < locked_pages; i++) {
1146                         u64 cur_offset = page_offset(pages[i]);
1147                         /*
1148                          * Discontinuity in page range? Ceph can handle that by just passing
1149                          * multiple extents in the write op.
1150                          */
1151                         if (offset + len != cur_offset) {
1152                                 /* If it's full, stop here */
1153                                 if (op_idx + 1 == req->r_num_ops)
1154                                         break;
1155
1156                                 /* Kick off an fscache write with what we have so far. */
1157                                 ceph_fscache_write_to_cache(inode, offset, len, caching);
1158
1159                                 /* Start a new extent */
1160                                 osd_req_op_extent_dup_last(req, op_idx,
1161                                                            cur_offset - offset);
1162                                 dout("writepages got pages at %llu~%llu\n",
1163                                      offset, len);
1164                                 osd_req_op_extent_osd_data_pages(req, op_idx,
1165                                                         data_pages, len, 0,
1166                                                         from_pool, false);
1167                                 osd_req_op_extent_update(req, op_idx, len);
1168
1169                                 len = 0;
1170                                 offset = cur_offset;
1171                                 data_pages = pages + i;
1172                                 op_idx++;
1173                         }
1174
1175                         set_page_writeback(pages[i]);
1176                         if (caching)
1177                                 ceph_set_page_fscache(pages[i]);
1178                         len += thp_size(page);
1179                 }
1180                 ceph_fscache_write_to_cache(inode, offset, len, caching);
1181
1182                 if (ceph_wbc.size_stable) {
1183                         len = min(len, ceph_wbc.i_size - offset);
1184                 } else if (i == locked_pages) {
1185                         /* writepages_finish() clears writeback pages
1186                          * according to the data length, so make sure
1187                          * data length covers all locked pages */
1188                         u64 min_len = len + 1 - thp_size(page);
1189                         len = get_writepages_data_length(inode, pages[i - 1],
1190                                                          offset);
1191                         len = max(len, min_len);
1192                 }
1193                 dout("writepages got pages at %llu~%llu\n", offset, len);
1194
1195                 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1196                                                  0, from_pool, false);
1197                 osd_req_op_extent_update(req, op_idx, len);
1198
1199                 BUG_ON(op_idx + 1 != req->r_num_ops);
1200
1201                 from_pool = false;
1202                 if (i < locked_pages) {
1203                         BUG_ON(num_ops <= req->r_num_ops);
1204                         num_ops -= req->r_num_ops;
1205                         locked_pages -= i;
1206
1207                         /* allocate new pages array for next request */
1208                         data_pages = pages;
1209                         pages = kmalloc_array(locked_pages, sizeof(*pages),
1210                                               GFP_NOFS);
1211                         if (!pages) {
1212                                 from_pool = true;
1213                                 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1214                                 BUG_ON(!pages);
1215                         }
1216                         memcpy(pages, data_pages + i,
1217                                locked_pages * sizeof(*pages));
1218                         memset(data_pages + i, 0,
1219                                locked_pages * sizeof(*pages));
1220                 } else {
1221                         BUG_ON(num_ops != req->r_num_ops);
1222                         index = pages[i - 1]->index + 1;
1223                         /* request message now owns the pages array */
1224                         pages = NULL;
1225                 }
1226
1227                 req->r_mtime = inode->i_mtime;
1228                 ceph_osdc_start_request(&fsc->client->osdc, req);
1229                 req = NULL;
1230
1231                 wbc->nr_to_write -= i;
1232                 if (pages)
1233                         goto new_request;
1234
1235                 /*
1236                  * We stop writing back only if we are not doing
1237                  * integrity sync. In case of integrity sync we have to
1238                  * keep going until we have written all the pages
1239                  * we tagged for writeback prior to entering this loop.
1240                  */
1241                 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1242                         done = true;
1243
1244 release_folios:
1245                 dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
1246                      fbatch.nr ? fbatch.folios[0] : NULL);
1247                 folio_batch_release(&fbatch);
1248         }
1249
1250         if (should_loop && !done) {
1251                 /* more to do; loop back to beginning of file */
1252                 dout("writepages looping back to beginning of file\n");
1253                 end = start_index - 1; /* OK even when start_index == 0 */
1254
1255                 /* to write dirty pages associated with next snapc,
1256                  * we need to wait until current writes complete */
1257                 if (wbc->sync_mode != WB_SYNC_NONE &&
1258                     start_index == 0 && /* all dirty pages were checked */
1259                     !ceph_wbc.head_snapc) {
1260                         struct page *page;
1261                         unsigned i, nr;
1262                         index = 0;
1263                         while ((index <= end) &&
1264                                (nr = filemap_get_folios_tag(mapping, &index,
1265                                                 (pgoff_t)-1,
1266                                                 PAGECACHE_TAG_WRITEBACK,
1267                                                 &fbatch))) {
1268                                 for (i = 0; i < nr; i++) {
1269                                         page = &fbatch.folios[i]->page;
1270                                         if (page_snap_context(page) != snapc)
1271                                                 continue;
1272                                         wait_on_page_writeback(page);
1273                                 }
1274                                 folio_batch_release(&fbatch);
1275                                 cond_resched();
1276                         }
1277                 }
1278
1279                 start_index = 0;
1280                 index = 0;
1281                 goto retry;
1282         }
1283
1284         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1285                 mapping->writeback_index = index;
1286
1287 out:
1288         ceph_osdc_put_request(req);
1289         ceph_put_snap_context(last_snapc);
1290         dout("writepages dend - startone, rc = %d\n", rc);
1291         return rc;
1292 }
1293
1294
1295
1296 /*
1297  * See if a given @snapc is either writeable, or already written.
1298  */
1299 static int context_is_writeable_or_written(struct inode *inode,
1300                                            struct ceph_snap_context *snapc)
1301 {
1302         struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1303         int ret = !oldest || snapc->seq <= oldest->seq;
1304
1305         ceph_put_snap_context(oldest);
1306         return ret;
1307 }
1308
1309 /**
1310  * ceph_find_incompatible - find an incompatible context and return it
1311  * @page: page being dirtied
1312  *
1313  * We are only allowed to write into/dirty a page if the page is
1314  * clean, or already dirty within the same snap context. Returns a
1315  * conflicting context if there is one, NULL if there isn't, or a
1316  * negative error code on other errors.
1317  *
1318  * Must be called with page lock held.
1319  */
1320 static struct ceph_snap_context *
1321 ceph_find_incompatible(struct page *page)
1322 {
1323         struct inode *inode = page->mapping->host;
1324         struct ceph_inode_info *ci = ceph_inode(inode);
1325
1326         if (ceph_inode_is_shutdown(inode)) {
1327                 dout(" page %p %llx:%llx is shutdown\n", page,
1328                      ceph_vinop(inode));
1329                 return ERR_PTR(-ESTALE);
1330         }
1331
1332         for (;;) {
1333                 struct ceph_snap_context *snapc, *oldest;
1334
1335                 wait_on_page_writeback(page);
1336
1337                 snapc = page_snap_context(page);
1338                 if (!snapc || snapc == ci->i_head_snapc)
1339                         break;
1340
1341                 /*
1342                  * this page is already dirty in another (older) snap
1343                  * context!  is it writeable now?
1344                  */
1345                 oldest = get_oldest_context(inode, NULL, NULL);
1346                 if (snapc->seq > oldest->seq) {
1347                         /* not writeable -- return it for the caller to deal with */
1348                         ceph_put_snap_context(oldest);
1349                         dout(" page %p snapc %p not current or oldest\n", page, snapc);
1350                         return ceph_get_snap_context(snapc);
1351                 }
1352                 ceph_put_snap_context(oldest);
1353
1354                 /* yay, writeable, do it now (without dropping page lock) */
1355                 dout(" page %p snapc %p not current, but oldest\n", page, snapc);
1356                 if (clear_page_dirty_for_io(page)) {
1357                         int r = writepage_nounlock(page, NULL);
1358                         if (r < 0)
1359                                 return ERR_PTR(r);
1360                 }
1361         }
1362         return NULL;
1363 }
1364
1365 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1366                                         struct folio **foliop, void **_fsdata)
1367 {
1368         struct inode *inode = file_inode(file);
1369         struct ceph_inode_info *ci = ceph_inode(inode);
1370         struct ceph_snap_context *snapc;
1371
1372         snapc = ceph_find_incompatible(folio_page(*foliop, 0));
1373         if (snapc) {
1374                 int r;
1375
1376                 folio_unlock(*foliop);
1377                 folio_put(*foliop);
1378                 *foliop = NULL;
1379                 if (IS_ERR(snapc))
1380                         return PTR_ERR(snapc);
1381
1382                 ceph_queue_writeback(inode);
1383                 r = wait_event_killable(ci->i_cap_wq,
1384                                         context_is_writeable_or_written(inode, snapc));
1385                 ceph_put_snap_context(snapc);
1386                 return r == 0 ? -EAGAIN : r;
1387         }
1388         return 0;
1389 }
1390
1391 /*
1392  * We are only allowed to write into/dirty the page if the page is
1393  * clean, or already dirty within the same snap context.
1394  */
1395 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1396                             loff_t pos, unsigned len,
1397                             struct page **pagep, void **fsdata)
1398 {
1399         struct inode *inode = file_inode(file);
1400         struct ceph_inode_info *ci = ceph_inode(inode);
1401         struct folio *folio = NULL;
1402         int r;
1403
1404         r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
1405         if (r < 0)
1406                 return r;
1407
1408         folio_wait_fscache(folio);
1409         WARN_ON_ONCE(!folio_test_locked(folio));
1410         *pagep = &folio->page;
1411         return 0;
1412 }
1413
1414 /*
1415  * we don't do anything in here that simple_write_end doesn't do
1416  * except adjust dirty page accounting
1417  */
1418 static int ceph_write_end(struct file *file, struct address_space *mapping,
1419                           loff_t pos, unsigned len, unsigned copied,
1420                           struct page *subpage, void *fsdata)
1421 {
1422         struct folio *folio = page_folio(subpage);
1423         struct inode *inode = file_inode(file);
1424         bool check_cap = false;
1425
1426         dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
1427              inode, folio, (int)pos, (int)copied, (int)len);
1428
1429         if (!folio_test_uptodate(folio)) {
1430                 /* just return that nothing was copied on a short copy */
1431                 if (copied < len) {
1432                         copied = 0;
1433                         goto out;
1434                 }
1435                 folio_mark_uptodate(folio);
1436         }
1437
1438         /* did file size increase? */
1439         if (pos+copied > i_size_read(inode))
1440                 check_cap = ceph_inode_set_size(inode, pos+copied);
1441
1442         folio_mark_dirty(folio);
1443
1444 out:
1445         folio_unlock(folio);
1446         folio_put(folio);
1447
1448         if (check_cap)
1449                 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
1450
1451         return copied;
1452 }
1453
1454 const struct address_space_operations ceph_aops = {
1455         .read_folio = netfs_read_folio,
1456         .readahead = netfs_readahead,
1457         .writepage = ceph_writepage,
1458         .writepages = ceph_writepages_start,
1459         .write_begin = ceph_write_begin,
1460         .write_end = ceph_write_end,
1461         .dirty_folio = ceph_dirty_folio,
1462         .invalidate_folio = ceph_invalidate_folio,
1463         .release_folio = ceph_release_folio,
1464         .direct_IO = noop_direct_IO,
1465 };
1466
1467 static void ceph_block_sigs(sigset_t *oldset)
1468 {
1469         sigset_t mask;
1470         siginitsetinv(&mask, sigmask(SIGKILL));
1471         sigprocmask(SIG_BLOCK, &mask, oldset);
1472 }
1473
1474 static void ceph_restore_sigs(sigset_t *oldset)
1475 {
1476         sigprocmask(SIG_SETMASK, oldset, NULL);
1477 }
1478
1479 /*
1480  * vm ops
1481  */
1482 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1483 {
1484         struct vm_area_struct *vma = vmf->vma;
1485         struct inode *inode = file_inode(vma->vm_file);
1486         struct ceph_inode_info *ci = ceph_inode(inode);
1487         struct ceph_file_info *fi = vma->vm_file->private_data;
1488         loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1489         int want, got, err;
1490         sigset_t oldset;
1491         vm_fault_t ret = VM_FAULT_SIGBUS;
1492
1493         if (ceph_inode_is_shutdown(inode))
1494                 return ret;
1495
1496         ceph_block_sigs(&oldset);
1497
1498         dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
1499              inode, ceph_vinop(inode), off);
1500         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1501                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1502         else
1503                 want = CEPH_CAP_FILE_CACHE;
1504
1505         got = 0;
1506         err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1507         if (err < 0)
1508                 goto out_restore;
1509
1510         dout("filemap_fault %p %llu got cap refs on %s\n",
1511              inode, off, ceph_cap_string(got));
1512
1513         if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1514             !ceph_has_inline_data(ci)) {
1515                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1516                 ceph_add_rw_context(fi, &rw_ctx);
1517                 ret = filemap_fault(vmf);
1518                 ceph_del_rw_context(fi, &rw_ctx);
1519                 dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
1520                      inode, off, ceph_cap_string(got), ret);
1521         } else
1522                 err = -EAGAIN;
1523
1524         ceph_put_cap_refs(ci, got);
1525
1526         if (err != -EAGAIN)
1527                 goto out_restore;
1528
1529         /* read inline data */
1530         if (off >= PAGE_SIZE) {
1531                 /* does not support inline data > PAGE_SIZE */
1532                 ret = VM_FAULT_SIGBUS;
1533         } else {
1534                 struct address_space *mapping = inode->i_mapping;
1535                 struct page *page;
1536
1537                 filemap_invalidate_lock_shared(mapping);
1538                 page = find_or_create_page(mapping, 0,
1539                                 mapping_gfp_constraint(mapping, ~__GFP_FS));
1540                 if (!page) {
1541                         ret = VM_FAULT_OOM;
1542                         goto out_inline;
1543                 }
1544                 err = __ceph_do_getattr(inode, page,
1545                                          CEPH_STAT_CAP_INLINE_DATA, true);
1546                 if (err < 0 || off >= i_size_read(inode)) {
1547                         unlock_page(page);
1548                         put_page(page);
1549                         ret = vmf_error(err);
1550                         goto out_inline;
1551                 }
1552                 if (err < PAGE_SIZE)
1553                         zero_user_segment(page, err, PAGE_SIZE);
1554                 else
1555                         flush_dcache_page(page);
1556                 SetPageUptodate(page);
1557                 vmf->page = page;
1558                 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
1559 out_inline:
1560                 filemap_invalidate_unlock_shared(mapping);
1561                 dout("filemap_fault %p %llu read inline data ret %x\n",
1562                      inode, off, ret);
1563         }
1564 out_restore:
1565         ceph_restore_sigs(&oldset);
1566         if (err < 0)
1567                 ret = vmf_error(err);
1568
1569         return ret;
1570 }
1571
1572 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1573 {
1574         struct vm_area_struct *vma = vmf->vma;
1575         struct inode *inode = file_inode(vma->vm_file);
1576         struct ceph_inode_info *ci = ceph_inode(inode);
1577         struct ceph_file_info *fi = vma->vm_file->private_data;
1578         struct ceph_cap_flush *prealloc_cf;
1579         struct page *page = vmf->page;
1580         loff_t off = page_offset(page);
1581         loff_t size = i_size_read(inode);
1582         size_t len;
1583         int want, got, err;
1584         sigset_t oldset;
1585         vm_fault_t ret = VM_FAULT_SIGBUS;
1586
1587         if (ceph_inode_is_shutdown(inode))
1588                 return ret;
1589
1590         prealloc_cf = ceph_alloc_cap_flush();
1591         if (!prealloc_cf)
1592                 return VM_FAULT_OOM;
1593
1594         sb_start_pagefault(inode->i_sb);
1595         ceph_block_sigs(&oldset);
1596
1597         if (off + thp_size(page) <= size)
1598                 len = thp_size(page);
1599         else
1600                 len = offset_in_thp(page, size);
1601
1602         dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1603              inode, ceph_vinop(inode), off, len, size);
1604         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1605                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1606         else
1607                 want = CEPH_CAP_FILE_BUFFER;
1608
1609         got = 0;
1610         err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
1611         if (err < 0)
1612                 goto out_free;
1613
1614         dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1615              inode, off, len, ceph_cap_string(got));
1616
1617         /* Update time before taking page lock */
1618         file_update_time(vma->vm_file);
1619         inode_inc_iversion_raw(inode);
1620
1621         do {
1622                 struct ceph_snap_context *snapc;
1623
1624                 lock_page(page);
1625
1626                 if (page_mkwrite_check_truncate(page, inode) < 0) {
1627                         unlock_page(page);
1628                         ret = VM_FAULT_NOPAGE;
1629                         break;
1630                 }
1631
1632                 snapc = ceph_find_incompatible(page);
1633                 if (!snapc) {
1634                         /* success.  we'll keep the page locked. */
1635                         set_page_dirty(page);
1636                         ret = VM_FAULT_LOCKED;
1637                         break;
1638                 }
1639
1640                 unlock_page(page);
1641
1642                 if (IS_ERR(snapc)) {
1643                         ret = VM_FAULT_SIGBUS;
1644                         break;
1645                 }
1646
1647                 ceph_queue_writeback(inode);
1648                 err = wait_event_killable(ci->i_cap_wq,
1649                                 context_is_writeable_or_written(inode, snapc));
1650                 ceph_put_snap_context(snapc);
1651         } while (err == 0);
1652
1653         if (ret == VM_FAULT_LOCKED) {
1654                 int dirty;
1655                 spin_lock(&ci->i_ceph_lock);
1656                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1657                                                &prealloc_cf);
1658                 spin_unlock(&ci->i_ceph_lock);
1659                 if (dirty)
1660                         __mark_inode_dirty(inode, dirty);
1661         }
1662
1663         dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
1664              inode, off, len, ceph_cap_string(got), ret);
1665         ceph_put_cap_refs_async(ci, got);
1666 out_free:
1667         ceph_restore_sigs(&oldset);
1668         sb_end_pagefault(inode->i_sb);
1669         ceph_free_cap_flush(prealloc_cf);
1670         if (err < 0)
1671                 ret = vmf_error(err);
1672         return ret;
1673 }
1674
1675 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1676                            char *data, size_t len)
1677 {
1678         struct address_space *mapping = inode->i_mapping;
1679         struct page *page;
1680
1681         if (locked_page) {
1682                 page = locked_page;
1683         } else {
1684                 if (i_size_read(inode) == 0)
1685                         return;
1686                 page = find_or_create_page(mapping, 0,
1687                                            mapping_gfp_constraint(mapping,
1688                                            ~__GFP_FS));
1689                 if (!page)
1690                         return;
1691                 if (PageUptodate(page)) {
1692                         unlock_page(page);
1693                         put_page(page);
1694                         return;
1695                 }
1696         }
1697
1698         dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1699              inode, ceph_vinop(inode), len, locked_page);
1700
1701         if (len > 0) {
1702                 void *kaddr = kmap_atomic(page);
1703                 memcpy(kaddr, data, len);
1704                 kunmap_atomic(kaddr);
1705         }
1706
1707         if (page != locked_page) {
1708                 if (len < PAGE_SIZE)
1709                         zero_user_segment(page, len, PAGE_SIZE);
1710                 else
1711                         flush_dcache_page(page);
1712
1713                 SetPageUptodate(page);
1714                 unlock_page(page);
1715                 put_page(page);
1716         }
1717 }
1718
1719 int ceph_uninline_data(struct file *file)
1720 {
1721         struct inode *inode = file_inode(file);
1722         struct ceph_inode_info *ci = ceph_inode(inode);
1723         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1724         struct ceph_osd_request *req = NULL;
1725         struct ceph_cap_flush *prealloc_cf = NULL;
1726         struct folio *folio = NULL;
1727         u64 inline_version = CEPH_INLINE_NONE;
1728         struct page *pages[1];
1729         int err = 0;
1730         u64 len;
1731
1732         spin_lock(&ci->i_ceph_lock);
1733         inline_version = ci->i_inline_version;
1734         spin_unlock(&ci->i_ceph_lock);
1735
1736         dout("uninline_data %p %llx.%llx inline_version %llu\n",
1737              inode, ceph_vinop(inode), inline_version);
1738
1739         if (ceph_inode_is_shutdown(inode)) {
1740                 err = -EIO;
1741                 goto out;
1742         }
1743
1744         if (inline_version == CEPH_INLINE_NONE)
1745                 return 0;
1746
1747         prealloc_cf = ceph_alloc_cap_flush();
1748         if (!prealloc_cf)
1749                 return -ENOMEM;
1750
1751         if (inline_version == 1) /* initial version, no data */
1752                 goto out_uninline;
1753
1754         folio = read_mapping_folio(inode->i_mapping, 0, file);
1755         if (IS_ERR(folio)) {
1756                 err = PTR_ERR(folio);
1757                 goto out;
1758         }
1759
1760         folio_lock(folio);
1761
1762         len = i_size_read(inode);
1763         if (len > folio_size(folio))
1764                 len = folio_size(folio);
1765
1766         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1767                                     ceph_vino(inode), 0, &len, 0, 1,
1768                                     CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
1769                                     NULL, 0, 0, false);
1770         if (IS_ERR(req)) {
1771                 err = PTR_ERR(req);
1772                 goto out_unlock;
1773         }
1774
1775         req->r_mtime = inode->i_mtime;
1776         ceph_osdc_start_request(&fsc->client->osdc, req);
1777         err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1778         ceph_osdc_put_request(req);
1779         if (err < 0)
1780                 goto out_unlock;
1781
1782         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1783                                     ceph_vino(inode), 0, &len, 1, 3,
1784                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1785                                     NULL, ci->i_truncate_seq,
1786                                     ci->i_truncate_size, false);
1787         if (IS_ERR(req)) {
1788                 err = PTR_ERR(req);
1789                 goto out_unlock;
1790         }
1791
1792         pages[0] = folio_page(folio, 0);
1793         osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
1794
1795         {
1796                 __le64 xattr_buf = cpu_to_le64(inline_version);
1797                 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1798                                             "inline_version", &xattr_buf,
1799                                             sizeof(xattr_buf),
1800                                             CEPH_OSD_CMPXATTR_OP_GT,
1801                                             CEPH_OSD_CMPXATTR_MODE_U64);
1802                 if (err)
1803                         goto out_put_req;
1804         }
1805
1806         {
1807                 char xattr_buf[32];
1808                 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1809                                          "%llu", inline_version);
1810                 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1811                                             "inline_version",
1812                                             xattr_buf, xattr_len, 0, 0);
1813                 if (err)
1814                         goto out_put_req;
1815         }
1816
1817         req->r_mtime = inode->i_mtime;
1818         ceph_osdc_start_request(&fsc->client->osdc, req);
1819         err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1820
1821         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1822                                   req->r_end_latency, len, err);
1823
1824 out_uninline:
1825         if (!err) {
1826                 int dirty;
1827
1828                 /* Set to CAP_INLINE_NONE and dirty the caps */
1829                 down_read(&fsc->mdsc->snap_rwsem);
1830                 spin_lock(&ci->i_ceph_lock);
1831                 ci->i_inline_version = CEPH_INLINE_NONE;
1832                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
1833                 spin_unlock(&ci->i_ceph_lock);
1834                 up_read(&fsc->mdsc->snap_rwsem);
1835                 if (dirty)
1836                         __mark_inode_dirty(inode, dirty);
1837         }
1838 out_put_req:
1839         ceph_osdc_put_request(req);
1840         if (err == -ECANCELED)
1841                 err = 0;
1842 out_unlock:
1843         if (folio) {
1844                 folio_unlock(folio);
1845                 folio_put(folio);
1846         }
1847 out:
1848         ceph_free_cap_flush(prealloc_cf);
1849         dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1850              inode, ceph_vinop(inode), inline_version, err);
1851         return err;
1852 }
1853
1854 static const struct vm_operations_struct ceph_vmops = {
1855         .fault          = ceph_filemap_fault,
1856         .page_mkwrite   = ceph_page_mkwrite,
1857 };
1858
1859 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1860 {
1861         struct address_space *mapping = file->f_mapping;
1862
1863         if (!mapping->a_ops->read_folio)
1864                 return -ENOEXEC;
1865         vma->vm_ops = &ceph_vmops;
1866         return 0;
1867 }
1868
1869 enum {
1870         POOL_READ       = 1,
1871         POOL_WRITE      = 2,
1872 };
1873
1874 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1875                                 s64 pool, struct ceph_string *pool_ns)
1876 {
1877         struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
1878         struct ceph_mds_client *mdsc = fsc->mdsc;
1879         struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1880         struct rb_node **p, *parent;
1881         struct ceph_pool_perm *perm;
1882         struct page **pages;
1883         size_t pool_ns_len;
1884         int err = 0, err2 = 0, have = 0;
1885
1886         down_read(&mdsc->pool_perm_rwsem);
1887         p = &mdsc->pool_perm_tree.rb_node;
1888         while (*p) {
1889                 perm = rb_entry(*p, struct ceph_pool_perm, node);
1890                 if (pool < perm->pool)
1891                         p = &(*p)->rb_left;
1892                 else if (pool > perm->pool)
1893                         p = &(*p)->rb_right;
1894                 else {
1895                         int ret = ceph_compare_string(pool_ns,
1896                                                 perm->pool_ns,
1897                                                 perm->pool_ns_len);
1898                         if (ret < 0)
1899                                 p = &(*p)->rb_left;
1900                         else if (ret > 0)
1901                                 p = &(*p)->rb_right;
1902                         else {
1903                                 have = perm->perm;
1904                                 break;
1905                         }
1906                 }
1907         }
1908         up_read(&mdsc->pool_perm_rwsem);
1909         if (*p)
1910                 goto out;
1911
1912         if (pool_ns)
1913                 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
1914                      pool, (int)pool_ns->len, pool_ns->str);
1915         else
1916                 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
1917
1918         down_write(&mdsc->pool_perm_rwsem);
1919         p = &mdsc->pool_perm_tree.rb_node;
1920         parent = NULL;
1921         while (*p) {
1922                 parent = *p;
1923                 perm = rb_entry(parent, struct ceph_pool_perm, node);
1924                 if (pool < perm->pool)
1925                         p = &(*p)->rb_left;
1926                 else if (pool > perm->pool)
1927                         p = &(*p)->rb_right;
1928                 else {
1929                         int ret = ceph_compare_string(pool_ns,
1930                                                 perm->pool_ns,
1931                                                 perm->pool_ns_len);
1932                         if (ret < 0)
1933                                 p = &(*p)->rb_left;
1934                         else if (ret > 0)
1935                                 p = &(*p)->rb_right;
1936                         else {
1937                                 have = perm->perm;
1938                                 break;
1939                         }
1940                 }
1941         }
1942         if (*p) {
1943                 up_write(&mdsc->pool_perm_rwsem);
1944                 goto out;
1945         }
1946
1947         rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1948                                          1, false, GFP_NOFS);
1949         if (!rd_req) {
1950                 err = -ENOMEM;
1951                 goto out_unlock;
1952         }
1953
1954         rd_req->r_flags = CEPH_OSD_FLAG_READ;
1955         osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
1956         rd_req->r_base_oloc.pool = pool;
1957         if (pool_ns)
1958                 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
1959         ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
1960
1961         err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
1962         if (err)
1963                 goto out_unlock;
1964
1965         wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
1966                                          1, false, GFP_NOFS);
1967         if (!wr_req) {
1968                 err = -ENOMEM;
1969                 goto out_unlock;
1970         }
1971
1972         wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
1973         osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
1974         ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
1975         ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
1976
1977         err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
1978         if (err)
1979                 goto out_unlock;
1980
1981         /* one page should be large enough for STAT data */
1982         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
1983         if (IS_ERR(pages)) {
1984                 err = PTR_ERR(pages);
1985                 goto out_unlock;
1986         }
1987
1988         osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
1989                                      0, false, true);
1990         ceph_osdc_start_request(&fsc->client->osdc, rd_req);
1991
1992         wr_req->r_mtime = ci->netfs.inode.i_mtime;
1993         ceph_osdc_start_request(&fsc->client->osdc, wr_req);
1994
1995         err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
1996         err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
1997
1998         if (err >= 0 || err == -ENOENT)
1999                 have |= POOL_READ;
2000         else if (err != -EPERM) {
2001                 if (err == -EBLOCKLISTED)
2002                         fsc->blocklisted = true;
2003                 goto out_unlock;
2004         }
2005
2006         if (err2 == 0 || err2 == -EEXIST)
2007                 have |= POOL_WRITE;
2008         else if (err2 != -EPERM) {
2009                 if (err2 == -EBLOCKLISTED)
2010                         fsc->blocklisted = true;
2011                 err = err2;
2012                 goto out_unlock;
2013         }
2014
2015         pool_ns_len = pool_ns ? pool_ns->len : 0;
2016         perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
2017         if (!perm) {
2018                 err = -ENOMEM;
2019                 goto out_unlock;
2020         }
2021
2022         perm->pool = pool;
2023         perm->perm = have;
2024         perm->pool_ns_len = pool_ns_len;
2025         if (pool_ns_len > 0)
2026                 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2027         perm->pool_ns[pool_ns_len] = 0;
2028
2029         rb_link_node(&perm->node, parent, p);
2030         rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2031         err = 0;
2032 out_unlock:
2033         up_write(&mdsc->pool_perm_rwsem);
2034
2035         ceph_osdc_put_request(rd_req);
2036         ceph_osdc_put_request(wr_req);
2037 out:
2038         if (!err)
2039                 err = have;
2040         if (pool_ns)
2041                 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
2042                      pool, (int)pool_ns->len, pool_ns->str, err);
2043         else
2044                 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
2045         return err;
2046 }
2047
2048 int ceph_pool_perm_check(struct inode *inode, int need)
2049 {
2050         struct ceph_inode_info *ci = ceph_inode(inode);
2051         struct ceph_string *pool_ns;
2052         s64 pool;
2053         int ret, flags;
2054
2055         /* Only need to do this for regular files */
2056         if (!S_ISREG(inode->i_mode))
2057                 return 0;
2058
2059         if (ci->i_vino.snap != CEPH_NOSNAP) {
2060                 /*
2061                  * Pool permission check needs to write to the first object.
2062                  * But for snapshot, head of the first object may have alread
2063                  * been deleted. Skip check to avoid creating orphan object.
2064                  */
2065                 return 0;
2066         }
2067
2068         if (ceph_test_mount_opt(ceph_inode_to_client(inode),
2069                                 NOPOOLPERM))
2070                 return 0;
2071
2072         spin_lock(&ci->i_ceph_lock);
2073         flags = ci->i_ceph_flags;
2074         pool = ci->i_layout.pool_id;
2075         spin_unlock(&ci->i_ceph_lock);
2076 check:
2077         if (flags & CEPH_I_POOL_PERM) {
2078                 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2079                         dout("ceph_pool_perm_check pool %lld no read perm\n",
2080                              pool);
2081                         return -EPERM;
2082                 }
2083                 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2084                         dout("ceph_pool_perm_check pool %lld no write perm\n",
2085                              pool);
2086                         return -EPERM;
2087                 }
2088                 return 0;
2089         }
2090
2091         pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2092         ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2093         ceph_put_string(pool_ns);
2094         if (ret < 0)
2095                 return ret;
2096
2097         flags = CEPH_I_POOL_PERM;
2098         if (ret & POOL_READ)
2099                 flags |= CEPH_I_POOL_RD;
2100         if (ret & POOL_WRITE)
2101                 flags |= CEPH_I_POOL_WR;
2102
2103         spin_lock(&ci->i_ceph_lock);
2104         if (pool == ci->i_layout.pool_id &&
2105             pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2106                 ci->i_ceph_flags |= flags;
2107         } else {
2108                 pool = ci->i_layout.pool_id;
2109                 flags = ci->i_ceph_flags;
2110         }
2111         spin_unlock(&ci->i_ceph_lock);
2112         goto check;
2113 }
2114
2115 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2116 {
2117         struct ceph_pool_perm *perm;
2118         struct rb_node *n;
2119
2120         while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2121                 n = rb_first(&mdsc->pool_perm_tree);
2122                 perm = rb_entry(n, struct ceph_pool_perm, node);
2123                 rb_erase(n, &mdsc->pool_perm_tree);
2124                 kfree(perm);
2125         }
2126 }