cifs: fix a pending undercount of srv_count
[platform/kernel/linux-starfive.git] / fs / ceph / addr.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/swap.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/pagevec.h>
11 #include <linux/task_io_accounting_ops.h>
12 #include <linux/signal.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 #include <linux/netfs.h>
16
17 #include "super.h"
18 #include "mds_client.h"
19 #include "cache.h"
20 #include "metric.h"
21 #include "crypto.h"
22 #include <linux/ceph/osd_client.h>
23 #include <linux/ceph/striper.h>
24
25 /*
26  * Ceph address space ops.
27  *
28  * There are a few funny things going on here.
29  *
30  * The page->private field is used to reference a struct
31  * ceph_snap_context for _every_ dirty page.  This indicates which
32  * snapshot the page was logically dirtied in, and thus which snap
33  * context needs to be associated with the osd write during writeback.
34  *
35  * Similarly, struct ceph_inode_info maintains a set of counters to
36  * count dirty pages on the inode.  In the absence of snapshots,
37  * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
38  *
39  * When a snapshot is taken (that is, when the client receives
40  * notification that a snapshot was taken), each inode with caps and
41  * with dirty pages (dirty pages implies there is a cap) gets a new
42  * ceph_cap_snap in the i_cap_snaps list (which is sorted in ascending
43  * order, new snaps go to the tail).  The i_wrbuffer_ref_head count is
44  * moved to capsnap->dirty. (Unless a sync write is currently in
45  * progress.  In that case, the capsnap is said to be "pending", new
46  * writes cannot start, and the capsnap isn't "finalized" until the
47  * write completes (or fails) and a final size/mtime for the inode for
48  * that snap can be settled upon.)  i_wrbuffer_ref_head is reset to 0.
49  *
50  * On writeback, we must submit writes to the osd IN SNAP ORDER.  So,
51  * we look for the first capsnap in i_cap_snaps and write out pages in
52  * that snap context _only_.  Then we move on to the next capsnap,
53  * eventually reaching the "live" or "head" context (i.e., pages that
54  * are not yet snapped) and are writing the most recently dirtied
55  * pages.
56  *
57  * Invalidate and so forth must take care to ensure the dirty page
58  * accounting is preserved.
59  */
60
61 #define CONGESTION_ON_THRESH(congestion_kb) (congestion_kb >> (PAGE_SHIFT-10))
62 #define CONGESTION_OFF_THRESH(congestion_kb)                            \
63         (CONGESTION_ON_THRESH(congestion_kb) -                          \
64          (CONGESTION_ON_THRESH(congestion_kb) >> 2))
65
66 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
67                                         struct folio **foliop, void **_fsdata);
68
69 static inline struct ceph_snap_context *page_snap_context(struct page *page)
70 {
71         if (PagePrivate(page))
72                 return (void *)page->private;
73         return NULL;
74 }
75
76 /*
77  * Dirty a page.  Optimistically adjust accounting, on the assumption
78  * that we won't race with invalidate.  If we do, readjust.
79  */
80 static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
81 {
82         struct inode *inode;
83         struct ceph_inode_info *ci;
84         struct ceph_snap_context *snapc;
85
86         if (folio_test_dirty(folio)) {
87                 dout("%p dirty_folio %p idx %lu -- already dirty\n",
88                      mapping->host, folio, folio->index);
89                 VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
90                 return false;
91         }
92
93         inode = mapping->host;
94         ci = ceph_inode(inode);
95
96         /* dirty the head */
97         spin_lock(&ci->i_ceph_lock);
98         BUG_ON(ci->i_wr_ref == 0); // caller should hold Fw reference
99         if (__ceph_have_pending_cap_snap(ci)) {
100                 struct ceph_cap_snap *capsnap =
101                                 list_last_entry(&ci->i_cap_snaps,
102                                                 struct ceph_cap_snap,
103                                                 ci_item);
104                 snapc = ceph_get_snap_context(capsnap->context);
105                 capsnap->dirty_pages++;
106         } else {
107                 BUG_ON(!ci->i_head_snapc);
108                 snapc = ceph_get_snap_context(ci->i_head_snapc);
109                 ++ci->i_wrbuffer_ref_head;
110         }
111         if (ci->i_wrbuffer_ref == 0)
112                 ihold(inode);
113         ++ci->i_wrbuffer_ref;
114         dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
115              "snapc %p seq %lld (%d snaps)\n",
116              mapping->host, folio, folio->index,
117              ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
118              ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
119              snapc, snapc->seq, snapc->num_snaps);
120         spin_unlock(&ci->i_ceph_lock);
121
122         /*
123          * Reference snap context in folio->private.  Also set
124          * PagePrivate so that we get invalidate_folio callback.
125          */
126         VM_WARN_ON_FOLIO(folio->private, folio);
127         folio_attach_private(folio, snapc);
128
129         return ceph_fscache_dirty_folio(mapping, folio);
130 }
131
132 /*
133  * If we are truncating the full folio (i.e. offset == 0), adjust the
134  * dirty folio counters appropriately.  Only called if there is private
135  * data on the folio.
136  */
137 static void ceph_invalidate_folio(struct folio *folio, size_t offset,
138                                 size_t length)
139 {
140         struct inode *inode;
141         struct ceph_inode_info *ci;
142         struct ceph_snap_context *snapc;
143
144         inode = folio->mapping->host;
145         ci = ceph_inode(inode);
146
147         if (offset != 0 || length != folio_size(folio)) {
148                 dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
149                      inode, folio->index, offset, length);
150                 return;
151         }
152
153         WARN_ON(!folio_test_locked(folio));
154         if (folio_test_private(folio)) {
155                 dout("%p invalidate_folio idx %lu full dirty page\n",
156                      inode, folio->index);
157
158                 snapc = folio_detach_private(folio);
159                 ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
160                 ceph_put_snap_context(snapc);
161         }
162
163         folio_wait_fscache(folio);
164 }
165
166 static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
167 {
168         struct inode *inode = folio->mapping->host;
169
170         dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
171              ceph_vinop(inode),
172              folio->index, folio_test_dirty(folio) ? "" : "not ");
173
174         if (folio_test_private(folio))
175                 return false;
176
177         if (folio_test_fscache(folio)) {
178                 if (current_is_kswapd() || !(gfp & __GFP_FS))
179                         return false;
180                 folio_wait_fscache(folio);
181         }
182         ceph_fscache_note_page_release(inode);
183         return true;
184 }
185
186 static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
187 {
188         struct inode *inode = rreq->inode;
189         struct ceph_inode_info *ci = ceph_inode(inode);
190         struct ceph_file_layout *lo = &ci->i_layout;
191         unsigned long max_pages = inode->i_sb->s_bdi->ra_pages;
192         loff_t end = rreq->start + rreq->len, new_end;
193         struct ceph_netfs_request_data *priv = rreq->netfs_priv;
194         unsigned long max_len;
195         u32 blockoff;
196
197         if (priv) {
198                 /* Readahead is disabled by posix_fadvise POSIX_FADV_RANDOM */
199                 if (priv->file_ra_disabled)
200                         max_pages = 0;
201                 else
202                         max_pages = priv->file_ra_pages;
203
204         }
205
206         /* Readahead is disabled */
207         if (!max_pages)
208                 return;
209
210         max_len = max_pages << PAGE_SHIFT;
211
212         /*
213          * Try to expand the length forward by rounding up it to the next
214          * block, but do not exceed the file size, unless the original
215          * request already exceeds it.
216          */
217         new_end = min(round_up(end, lo->stripe_unit), rreq->i_size);
218         if (new_end > end && new_end <= rreq->start + max_len)
219                 rreq->len = new_end - rreq->start;
220
221         /* Try to expand the start downward */
222         div_u64_rem(rreq->start, lo->stripe_unit, &blockoff);
223         if (rreq->len + blockoff <= max_len) {
224                 rreq->start -= blockoff;
225                 rreq->len += blockoff;
226         }
227 }
228
229 static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
230 {
231         struct inode *inode = subreq->rreq->inode;
232         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
233         struct ceph_inode_info *ci = ceph_inode(inode);
234         u64 objno, objoff;
235         u32 xlen;
236
237         /* Truncate the extent at the end of the current block */
238         ceph_calc_file_object_mapping(&ci->i_layout, subreq->start, subreq->len,
239                                       &objno, &objoff, &xlen);
240         subreq->len = min(xlen, fsc->mount_options->rsize);
241         return true;
242 }
243
244 static void finish_netfs_read(struct ceph_osd_request *req)
245 {
246         struct inode *inode = req->r_inode;
247         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
248         struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
249         struct netfs_io_subrequest *subreq = req->r_priv;
250         struct ceph_osd_req_op *op = &req->r_ops[0];
251         int err = req->r_result;
252         bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
253
254         ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
255                                  req->r_end_latency, osd_data->length, err);
256
257         dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
258              subreq->len, i_size_read(req->r_inode));
259
260         /* no object means success but no data */
261         if (err == -ENOENT)
262                 err = 0;
263         else if (err == -EBLOCKLISTED)
264                 fsc->blocklisted = true;
265
266         if (err >= 0) {
267                 if (sparse && err > 0)
268                         err = ceph_sparse_ext_map_end(op);
269                 if (err < subreq->len)
270                         __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
271                 if (IS_ENCRYPTED(inode) && err > 0) {
272                         err = ceph_fscrypt_decrypt_extents(inode,
273                                         osd_data->pages, subreq->start,
274                                         op->extent.sparse_ext,
275                                         op->extent.sparse_ext_cnt);
276                         if (err > subreq->len)
277                                 err = subreq->len;
278                 }
279         }
280
281         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
282                 ceph_put_page_vector(osd_data->pages,
283                                      calc_pages_for(osd_data->alignment,
284                                         osd_data->length), false);
285         }
286         netfs_subreq_terminated(subreq, err, false);
287         iput(req->r_inode);
288         ceph_dec_osd_stopping_blocker(fsc->mdsc);
289 }
290
291 static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
292 {
293         struct netfs_io_request *rreq = subreq->rreq;
294         struct inode *inode = rreq->inode;
295         struct ceph_mds_reply_info_parsed *rinfo;
296         struct ceph_mds_reply_info_in *iinfo;
297         struct ceph_mds_request *req;
298         struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
299         struct ceph_inode_info *ci = ceph_inode(inode);
300         struct iov_iter iter;
301         ssize_t err = 0;
302         size_t len;
303         int mode;
304
305         __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
306         __clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
307
308         if (subreq->start >= inode->i_size)
309                 goto out;
310
311         /* We need to fetch the inline data. */
312         mode = ceph_try_to_choose_auth_mds(inode, CEPH_STAT_CAP_INLINE_DATA);
313         req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
314         if (IS_ERR(req)) {
315                 err = PTR_ERR(req);
316                 goto out;
317         }
318         req->r_ino1 = ci->i_vino;
319         req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INLINE_DATA);
320         req->r_num_caps = 2;
321
322         err = ceph_mdsc_do_request(mdsc, NULL, req);
323         if (err < 0)
324                 goto out;
325
326         rinfo = &req->r_reply_info;
327         iinfo = &rinfo->targeti;
328         if (iinfo->inline_version == CEPH_INLINE_NONE) {
329                 /* The data got uninlined */
330                 ceph_mdsc_put_request(req);
331                 return false;
332         }
333
334         len = min_t(size_t, iinfo->inline_len - subreq->start, subreq->len);
335         iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
336         err = copy_to_iter(iinfo->inline_data + subreq->start, len, &iter);
337         if (err == 0)
338                 err = -EFAULT;
339
340         ceph_mdsc_put_request(req);
341 out:
342         netfs_subreq_terminated(subreq, err, false);
343         return true;
344 }
345
346 static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
347 {
348         struct netfs_io_request *rreq = subreq->rreq;
349         struct inode *inode = rreq->inode;
350         struct ceph_inode_info *ci = ceph_inode(inode);
351         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
352         struct ceph_osd_request *req = NULL;
353         struct ceph_vino vino = ceph_vino(inode);
354         struct iov_iter iter;
355         int err = 0;
356         u64 len = subreq->len;
357         bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
358         u64 off = subreq->start;
359
360         if (ceph_inode_is_shutdown(inode)) {
361                 err = -EIO;
362                 goto out;
363         }
364
365         if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
366                 return;
367
368         ceph_fscrypt_adjust_off_and_len(inode, &off, &len);
369
370         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, vino,
371                         off, &len, 0, 1, sparse ? CEPH_OSD_OP_SPARSE_READ : CEPH_OSD_OP_READ,
372                         CEPH_OSD_FLAG_READ | fsc->client->osdc.client->options->read_from_replica,
373                         NULL, ci->i_truncate_seq, ci->i_truncate_size, false);
374         if (IS_ERR(req)) {
375                 err = PTR_ERR(req);
376                 req = NULL;
377                 goto out;
378         }
379
380         if (sparse) {
381                 err = ceph_alloc_sparse_ext_map(&req->r_ops[0]);
382                 if (err)
383                         goto out;
384         }
385
386         dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
387
388         iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
389
390         /*
391          * FIXME: For now, use CEPH_OSD_DATA_TYPE_PAGES instead of _ITER for
392          * encrypted inodes. We'd need infrastructure that handles an iov_iter
393          * instead of page arrays, and we don't have that as of yet. Once the
394          * dust settles on the write helpers and encrypt/decrypt routines for
395          * netfs, we should be able to rework this.
396          */
397         if (IS_ENCRYPTED(inode)) {
398                 struct page **pages;
399                 size_t page_off;
400
401                 err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
402                 if (err < 0) {
403                         dout("%s: iov_ter_get_pages_alloc returned %d\n",
404                              __func__, err);
405                         goto out;
406                 }
407
408                 /* should always give us a page-aligned read */
409                 WARN_ON_ONCE(page_off);
410                 len = err;
411                 err = 0;
412
413                 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false,
414                                                  false);
415         } else {
416                 osd_req_op_extent_osd_iter(req, 0, &iter);
417         }
418         if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
419                 err = -EIO;
420                 goto out;
421         }
422         req->r_callback = finish_netfs_read;
423         req->r_priv = subreq;
424         req->r_inode = inode;
425         ihold(inode);
426
427         ceph_osdc_start_request(req->r_osdc, req);
428 out:
429         ceph_osdc_put_request(req);
430         if (err)
431                 netfs_subreq_terminated(subreq, err, false);
432         dout("%s: result %d\n", __func__, err);
433 }
434
435 static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
436 {
437         struct inode *inode = rreq->inode;
438         int got = 0, want = CEPH_CAP_FILE_CACHE;
439         struct ceph_netfs_request_data *priv;
440         int ret = 0;
441
442         if (rreq->origin != NETFS_READAHEAD)
443                 return 0;
444
445         priv = kzalloc(sizeof(*priv), GFP_NOFS);
446         if (!priv)
447                 return -ENOMEM;
448
449         if (file) {
450                 struct ceph_rw_context *rw_ctx;
451                 struct ceph_file_info *fi = file->private_data;
452
453                 priv->file_ra_pages = file->f_ra.ra_pages;
454                 priv->file_ra_disabled = file->f_mode & FMODE_RANDOM;
455
456                 rw_ctx = ceph_find_rw_context(fi);
457                 if (rw_ctx) {
458                         rreq->netfs_priv = priv;
459                         return 0;
460                 }
461         }
462
463         /*
464          * readahead callers do not necessarily hold Fcb caps
465          * (e.g. fadvise, madvise).
466          */
467         ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
468         if (ret < 0) {
469                 dout("start_read %p, error getting cap\n", inode);
470                 goto out;
471         }
472
473         if (!(got & want)) {
474                 dout("start_read %p, no cache cap\n", inode);
475                 ret = -EACCES;
476                 goto out;
477         }
478         if (ret == 0) {
479                 ret = -EACCES;
480                 goto out;
481         }
482
483         priv->caps = got;
484         rreq->netfs_priv = priv;
485
486 out:
487         if (ret < 0)
488                 kfree(priv);
489
490         return ret;
491 }
492
493 static void ceph_netfs_free_request(struct netfs_io_request *rreq)
494 {
495         struct ceph_netfs_request_data *priv = rreq->netfs_priv;
496
497         if (!priv)
498                 return;
499
500         if (priv->caps)
501                 ceph_put_cap_refs(ceph_inode(rreq->inode), priv->caps);
502         kfree(priv);
503         rreq->netfs_priv = NULL;
504 }
505
506 const struct netfs_request_ops ceph_netfs_ops = {
507         .init_request           = ceph_init_request,
508         .free_request           = ceph_netfs_free_request,
509         .begin_cache_operation  = ceph_begin_cache_operation,
510         .issue_read             = ceph_netfs_issue_read,
511         .expand_readahead       = ceph_netfs_expand_readahead,
512         .clamp_length           = ceph_netfs_clamp_length,
513         .check_write_begin      = ceph_netfs_check_write_begin,
514 };
515
516 #ifdef CONFIG_CEPH_FSCACHE
517 static void ceph_set_page_fscache(struct page *page)
518 {
519         set_page_fscache(page);
520 }
521
522 static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
523 {
524         struct inode *inode = priv;
525
526         if (IS_ERR_VALUE(error) && error != -ENOBUFS)
527                 ceph_fscache_invalidate(inode, false);
528 }
529
530 static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
531 {
532         struct ceph_inode_info *ci = ceph_inode(inode);
533         struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
534
535         fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
536                                ceph_fscache_write_terminated, inode, caching);
537 }
538 #else
539 static inline void ceph_set_page_fscache(struct page *page)
540 {
541 }
542
543 static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
544 {
545 }
546 #endif /* CONFIG_CEPH_FSCACHE */
547
548 struct ceph_writeback_ctl
549 {
550         loff_t i_size;
551         u64 truncate_size;
552         u32 truncate_seq;
553         bool size_stable;
554         bool head_snapc;
555 };
556
557 /*
558  * Get ref for the oldest snapc for an inode with dirty data... that is, the
559  * only snap context we are allowed to write back.
560  */
561 static struct ceph_snap_context *
562 get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
563                    struct ceph_snap_context *page_snapc)
564 {
565         struct ceph_inode_info *ci = ceph_inode(inode);
566         struct ceph_snap_context *snapc = NULL;
567         struct ceph_cap_snap *capsnap = NULL;
568
569         spin_lock(&ci->i_ceph_lock);
570         list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
571                 dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
572                      capsnap->context, capsnap->dirty_pages);
573                 if (!capsnap->dirty_pages)
574                         continue;
575
576                 /* get i_size, truncate_{seq,size} for page_snapc? */
577                 if (snapc && capsnap->context != page_snapc)
578                         continue;
579
580                 if (ctl) {
581                         if (capsnap->writing) {
582                                 ctl->i_size = i_size_read(inode);
583                                 ctl->size_stable = false;
584                         } else {
585                                 ctl->i_size = capsnap->size;
586                                 ctl->size_stable = true;
587                         }
588                         ctl->truncate_size = capsnap->truncate_size;
589                         ctl->truncate_seq = capsnap->truncate_seq;
590                         ctl->head_snapc = false;
591                 }
592
593                 if (snapc)
594                         break;
595
596                 snapc = ceph_get_snap_context(capsnap->context);
597                 if (!page_snapc ||
598                     page_snapc == snapc ||
599                     page_snapc->seq > snapc->seq)
600                         break;
601         }
602         if (!snapc && ci->i_wrbuffer_ref_head) {
603                 snapc = ceph_get_snap_context(ci->i_head_snapc);
604                 dout(" head snapc %p has %d dirty pages\n",
605                      snapc, ci->i_wrbuffer_ref_head);
606                 if (ctl) {
607                         ctl->i_size = i_size_read(inode);
608                         ctl->truncate_size = ci->i_truncate_size;
609                         ctl->truncate_seq = ci->i_truncate_seq;
610                         ctl->size_stable = false;
611                         ctl->head_snapc = true;
612                 }
613         }
614         spin_unlock(&ci->i_ceph_lock);
615         return snapc;
616 }
617
618 static u64 get_writepages_data_length(struct inode *inode,
619                                       struct page *page, u64 start)
620 {
621         struct ceph_inode_info *ci = ceph_inode(inode);
622         struct ceph_snap_context *snapc;
623         struct ceph_cap_snap *capsnap = NULL;
624         u64 end = i_size_read(inode);
625         u64 ret;
626
627         snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
628         if (snapc != ci->i_head_snapc) {
629                 bool found = false;
630                 spin_lock(&ci->i_ceph_lock);
631                 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
632                         if (capsnap->context == snapc) {
633                                 if (!capsnap->writing)
634                                         end = capsnap->size;
635                                 found = true;
636                                 break;
637                         }
638                 }
639                 spin_unlock(&ci->i_ceph_lock);
640                 WARN_ON(!found);
641         }
642         if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
643                 end = ceph_fscrypt_page_offset(page) + thp_size(page);
644         ret = end > start ? end - start : 0;
645         if (ret && fscrypt_is_bounce_page(page))
646                 ret = round_up(ret, CEPH_FSCRYPT_BLOCK_SIZE);
647         return ret;
648 }
649
650 /*
651  * Write a single page, but leave the page locked.
652  *
653  * If we get a write error, mark the mapping for error, but still adjust the
654  * dirty page accounting (i.e., page is no longer dirty).
655  */
656 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
657 {
658         struct folio *folio = page_folio(page);
659         struct inode *inode = page->mapping->host;
660         struct ceph_inode_info *ci = ceph_inode(inode);
661         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
662         struct ceph_snap_context *snapc, *oldest;
663         loff_t page_off = page_offset(page);
664         int err;
665         loff_t len = thp_size(page);
666         loff_t wlen;
667         struct ceph_writeback_ctl ceph_wbc;
668         struct ceph_osd_client *osdc = &fsc->client->osdc;
669         struct ceph_osd_request *req;
670         bool caching = ceph_is_cache_enabled(inode);
671         struct page *bounce_page = NULL;
672
673         dout("writepage %p idx %lu\n", page, page->index);
674
675         if (ceph_inode_is_shutdown(inode))
676                 return -EIO;
677
678         /* verify this is a writeable snap context */
679         snapc = page_snap_context(page);
680         if (!snapc) {
681                 dout("writepage %p page %p not dirty?\n", inode, page);
682                 return 0;
683         }
684         oldest = get_oldest_context(inode, &ceph_wbc, snapc);
685         if (snapc->seq > oldest->seq) {
686                 dout("writepage %p page %p snapc %p not writeable - noop\n",
687                      inode, page, snapc);
688                 /* we should only noop if called by kswapd */
689                 WARN_ON(!(current->flags & PF_MEMALLOC));
690                 ceph_put_snap_context(oldest);
691                 redirty_page_for_writepage(wbc, page);
692                 return 0;
693         }
694         ceph_put_snap_context(oldest);
695
696         /* is this a partial page at end of file? */
697         if (page_off >= ceph_wbc.i_size) {
698                 dout("folio at %lu beyond eof %llu\n", folio->index,
699                                 ceph_wbc.i_size);
700                 folio_invalidate(folio, 0, folio_size(folio));
701                 return 0;
702         }
703
704         if (ceph_wbc.i_size < page_off + len)
705                 len = ceph_wbc.i_size - page_off;
706
707         wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
708         dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
709              inode, page, page->index, page_off, wlen, snapc, snapc->seq);
710
711         if (atomic_long_inc_return(&fsc->writeback_count) >
712             CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
713                 fsc->write_congested = true;
714
715         req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode),
716                                     page_off, &wlen, 0, 1, CEPH_OSD_OP_WRITE,
717                                     CEPH_OSD_FLAG_WRITE, snapc,
718                                     ceph_wbc.truncate_seq,
719                                     ceph_wbc.truncate_size, true);
720         if (IS_ERR(req)) {
721                 redirty_page_for_writepage(wbc, page);
722                 return PTR_ERR(req);
723         }
724
725         if (wlen < len)
726                 len = wlen;
727
728         set_page_writeback(page);
729         if (caching)
730                 ceph_set_page_fscache(page);
731         ceph_fscache_write_to_cache(inode, page_off, len, caching);
732
733         if (IS_ENCRYPTED(inode)) {
734                 bounce_page = fscrypt_encrypt_pagecache_blocks(page,
735                                                     CEPH_FSCRYPT_BLOCK_SIZE, 0,
736                                                     GFP_NOFS);
737                 if (IS_ERR(bounce_page)) {
738                         redirty_page_for_writepage(wbc, page);
739                         end_page_writeback(page);
740                         ceph_osdc_put_request(req);
741                         return PTR_ERR(bounce_page);
742                 }
743         }
744
745         /* it may be a short write due to an object boundary */
746         WARN_ON_ONCE(len > thp_size(page));
747         osd_req_op_extent_osd_data_pages(req, 0,
748                         bounce_page ? &bounce_page : &page, wlen, 0,
749                         false, false);
750         dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
751              page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
752
753         req->r_mtime = inode->i_mtime;
754         ceph_osdc_start_request(osdc, req);
755         err = ceph_osdc_wait_request(osdc, req);
756
757         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
758                                   req->r_end_latency, len, err);
759         fscrypt_free_bounce_page(bounce_page);
760         ceph_osdc_put_request(req);
761         if (err == 0)
762                 err = len;
763
764         if (err < 0) {
765                 struct writeback_control tmp_wbc;
766                 if (!wbc)
767                         wbc = &tmp_wbc;
768                 if (err == -ERESTARTSYS) {
769                         /* killed by SIGKILL */
770                         dout("writepage interrupted page %p\n", page);
771                         redirty_page_for_writepage(wbc, page);
772                         end_page_writeback(page);
773                         return err;
774                 }
775                 if (err == -EBLOCKLISTED)
776                         fsc->blocklisted = true;
777                 dout("writepage setting page/mapping error %d %p\n",
778                      err, page);
779                 mapping_set_error(&inode->i_data, err);
780                 wbc->pages_skipped++;
781         } else {
782                 dout("writepage cleaned page %p\n", page);
783                 err = 0;  /* vfs expects us to return 0 */
784         }
785         oldest = detach_page_private(page);
786         WARN_ON_ONCE(oldest != snapc);
787         end_page_writeback(page);
788         ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
789         ceph_put_snap_context(snapc);  /* page's reference */
790
791         if (atomic_long_dec_return(&fsc->writeback_count) <
792             CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
793                 fsc->write_congested = false;
794
795         return err;
796 }
797
798 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
799 {
800         int err;
801         struct inode *inode = page->mapping->host;
802         BUG_ON(!inode);
803         ihold(inode);
804
805         if (wbc->sync_mode == WB_SYNC_NONE &&
806             ceph_inode_to_client(inode)->write_congested)
807                 return AOP_WRITEPAGE_ACTIVATE;
808
809         wait_on_page_fscache(page);
810
811         err = writepage_nounlock(page, wbc);
812         if (err == -ERESTARTSYS) {
813                 /* direct memory reclaimer was killed by SIGKILL. return 0
814                  * to prevent caller from setting mapping/page error */
815                 err = 0;
816         }
817         unlock_page(page);
818         iput(inode);
819         return err;
820 }
821
822 /*
823  * async writeback completion handler.
824  *
825  * If we get an error, set the mapping error bit, but not the individual
826  * page error bits.
827  */
828 static void writepages_finish(struct ceph_osd_request *req)
829 {
830         struct inode *inode = req->r_inode;
831         struct ceph_inode_info *ci = ceph_inode(inode);
832         struct ceph_osd_data *osd_data;
833         struct page *page;
834         int num_pages, total_pages = 0;
835         int i, j;
836         int rc = req->r_result;
837         struct ceph_snap_context *snapc = req->r_snapc;
838         struct address_space *mapping = inode->i_mapping;
839         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
840         unsigned int len = 0;
841         bool remove_page;
842
843         dout("writepages_finish %p rc %d\n", inode, rc);
844         if (rc < 0) {
845                 mapping_set_error(mapping, rc);
846                 ceph_set_error_write(ci);
847                 if (rc == -EBLOCKLISTED)
848                         fsc->blocklisted = true;
849         } else {
850                 ceph_clear_error_write(ci);
851         }
852
853         /*
854          * We lost the cache cap, need to truncate the page before
855          * it is unlocked, otherwise we'd truncate it later in the
856          * page truncation thread, possibly losing some data that
857          * raced its way in
858          */
859         remove_page = !(ceph_caps_issued(ci) &
860                         (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
861
862         /* clean all pages */
863         for (i = 0; i < req->r_num_ops; i++) {
864                 if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
865                         pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
866                                 __func__, req->r_ops[i].op, req, i, req->r_tid);
867                         break;
868                 }
869
870                 osd_data = osd_req_op_extent_osd_data(req, i);
871                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
872                 len += osd_data->length;
873                 num_pages = calc_pages_for((u64)osd_data->alignment,
874                                            (u64)osd_data->length);
875                 total_pages += num_pages;
876                 for (j = 0; j < num_pages; j++) {
877                         page = osd_data->pages[j];
878                         if (fscrypt_is_bounce_page(page)) {
879                                 page = fscrypt_pagecache_page(page);
880                                 fscrypt_free_bounce_page(osd_data->pages[j]);
881                                 osd_data->pages[j] = page;
882                         }
883                         BUG_ON(!page);
884                         WARN_ON(!PageUptodate(page));
885
886                         if (atomic_long_dec_return(&fsc->writeback_count) <
887                              CONGESTION_OFF_THRESH(
888                                         fsc->mount_options->congestion_kb))
889                                 fsc->write_congested = false;
890
891                         ceph_put_snap_context(detach_page_private(page));
892                         end_page_writeback(page);
893                         dout("unlocking %p\n", page);
894
895                         if (remove_page)
896                                 generic_error_remove_page(inode->i_mapping,
897                                                           page);
898
899                         unlock_page(page);
900                 }
901                 dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
902                      inode, osd_data->length, rc >= 0 ? num_pages : 0);
903
904                 release_pages(osd_data->pages, num_pages);
905         }
906
907         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
908                                   req->r_end_latency, len, rc);
909
910         ceph_put_wrbuffer_cap_refs(ci, total_pages, snapc);
911
912         osd_data = osd_req_op_extent_osd_data(req, 0);
913         if (osd_data->pages_from_pool)
914                 mempool_free(osd_data->pages, ceph_wb_pagevec_pool);
915         else
916                 kfree(osd_data->pages);
917         ceph_osdc_put_request(req);
918         ceph_dec_osd_stopping_blocker(fsc->mdsc);
919 }
920
921 /*
922  * initiate async writeback
923  */
924 static int ceph_writepages_start(struct address_space *mapping,
925                                  struct writeback_control *wbc)
926 {
927         struct inode *inode = mapping->host;
928         struct ceph_inode_info *ci = ceph_inode(inode);
929         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
930         struct ceph_vino vino = ceph_vino(inode);
931         pgoff_t index, start_index, end = -1;
932         struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
933         struct folio_batch fbatch;
934         int rc = 0;
935         unsigned int wsize = i_blocksize(inode);
936         struct ceph_osd_request *req = NULL;
937         struct ceph_writeback_ctl ceph_wbc;
938         bool should_loop, range_whole = false;
939         bool done = false;
940         bool caching = ceph_is_cache_enabled(inode);
941         xa_mark_t tag;
942
943         if (wbc->sync_mode == WB_SYNC_NONE &&
944             fsc->write_congested)
945                 return 0;
946
947         dout("writepages_start %p (mode=%s)\n", inode,
948              wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
949              (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
950
951         if (ceph_inode_is_shutdown(inode)) {
952                 if (ci->i_wrbuffer_ref > 0) {
953                         pr_warn_ratelimited(
954                                 "writepage_start %p %lld forced umount\n",
955                                 inode, ceph_ino(inode));
956                 }
957                 mapping_set_error(mapping, -EIO);
958                 return -EIO; /* we're in a forced umount, don't write! */
959         }
960         if (fsc->mount_options->wsize < wsize)
961                 wsize = fsc->mount_options->wsize;
962
963         folio_batch_init(&fbatch);
964
965         start_index = wbc->range_cyclic ? mapping->writeback_index : 0;
966         index = start_index;
967
968         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
969                 tag = PAGECACHE_TAG_TOWRITE;
970         } else {
971                 tag = PAGECACHE_TAG_DIRTY;
972         }
973 retry:
974         /* find oldest snap context with dirty data */
975         snapc = get_oldest_context(inode, &ceph_wbc, NULL);
976         if (!snapc) {
977                 /* hmm, why does writepages get called when there
978                    is no dirty data? */
979                 dout(" no snap context with dirty data?\n");
980                 goto out;
981         }
982         dout(" oldest snapc is %p seq %lld (%d snaps)\n",
983              snapc, snapc->seq, snapc->num_snaps);
984
985         should_loop = false;
986         if (ceph_wbc.head_snapc && snapc != last_snapc) {
987                 /* where to start/end? */
988                 if (wbc->range_cyclic) {
989                         index = start_index;
990                         end = -1;
991                         if (index > 0)
992                                 should_loop = true;
993                         dout(" cyclic, start at %lu\n", index);
994                 } else {
995                         index = wbc->range_start >> PAGE_SHIFT;
996                         end = wbc->range_end >> PAGE_SHIFT;
997                         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
998                                 range_whole = true;
999                         dout(" not cyclic, %lu to %lu\n", index, end);
1000                 }
1001         } else if (!ceph_wbc.head_snapc) {
1002                 /* Do not respect wbc->range_{start,end}. Dirty pages
1003                  * in that range can be associated with newer snapc.
1004                  * They are not writeable until we write all dirty pages
1005                  * associated with 'snapc' get written */
1006                 if (index > 0)
1007                         should_loop = true;
1008                 dout(" non-head snapc, range whole\n");
1009         }
1010
1011         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1012                 tag_pages_for_writeback(mapping, index, end);
1013
1014         ceph_put_snap_context(last_snapc);
1015         last_snapc = snapc;
1016
1017         while (!done && index <= end) {
1018                 int num_ops = 0, op_idx;
1019                 unsigned i, nr_folios, max_pages, locked_pages = 0;
1020                 struct page **pages = NULL, **data_pages;
1021                 struct page *page;
1022                 pgoff_t strip_unit_end = 0;
1023                 u64 offset = 0, len = 0;
1024                 bool from_pool = false;
1025
1026                 max_pages = wsize >> PAGE_SHIFT;
1027
1028 get_more_pages:
1029                 nr_folios = filemap_get_folios_tag(mapping, &index,
1030                                                    end, tag, &fbatch);
1031                 dout("pagevec_lookup_range_tag got %d\n", nr_folios);
1032                 if (!nr_folios && !locked_pages)
1033                         break;
1034                 for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
1035                         page = &fbatch.folios[i]->page;
1036                         dout("? %p idx %lu\n", page, page->index);
1037                         if (locked_pages == 0)
1038                                 lock_page(page);  /* first page */
1039                         else if (!trylock_page(page))
1040                                 break;
1041
1042                         /* only dirty pages, or our accounting breaks */
1043                         if (unlikely(!PageDirty(page)) ||
1044                             unlikely(page->mapping != mapping)) {
1045                                 dout("!dirty or !mapping %p\n", page);
1046                                 unlock_page(page);
1047                                 continue;
1048                         }
1049                         /* only if matching snap context */
1050                         pgsnapc = page_snap_context(page);
1051                         if (pgsnapc != snapc) {
1052                                 dout("page snapc %p %lld != oldest %p %lld\n",
1053                                      pgsnapc, pgsnapc->seq, snapc, snapc->seq);
1054                                 if (!should_loop &&
1055                                     !ceph_wbc.head_snapc &&
1056                                     wbc->sync_mode != WB_SYNC_NONE)
1057                                         should_loop = true;
1058                                 unlock_page(page);
1059                                 continue;
1060                         }
1061                         if (page_offset(page) >= ceph_wbc.i_size) {
1062                                 struct folio *folio = page_folio(page);
1063
1064                                 dout("folio at %lu beyond eof %llu\n",
1065                                      folio->index, ceph_wbc.i_size);
1066                                 if ((ceph_wbc.size_stable ||
1067                                     folio_pos(folio) >= i_size_read(inode)) &&
1068                                     folio_clear_dirty_for_io(folio))
1069                                         folio_invalidate(folio, 0,
1070                                                         folio_size(folio));
1071                                 folio_unlock(folio);
1072                                 continue;
1073                         }
1074                         if (strip_unit_end && (page->index > strip_unit_end)) {
1075                                 dout("end of strip unit %p\n", page);
1076                                 unlock_page(page);
1077                                 break;
1078                         }
1079                         if (PageWriteback(page) || PageFsCache(page)) {
1080                                 if (wbc->sync_mode == WB_SYNC_NONE) {
1081                                         dout("%p under writeback\n", page);
1082                                         unlock_page(page);
1083                                         continue;
1084                                 }
1085                                 dout("waiting on writeback %p\n", page);
1086                                 wait_on_page_writeback(page);
1087                                 wait_on_page_fscache(page);
1088                         }
1089
1090                         if (!clear_page_dirty_for_io(page)) {
1091                                 dout("%p !clear_page_dirty_for_io\n", page);
1092                                 unlock_page(page);
1093                                 continue;
1094                         }
1095
1096                         /*
1097                          * We have something to write.  If this is
1098                          * the first locked page this time through,
1099                          * calculate max possinle write size and
1100                          * allocate a page array
1101                          */
1102                         if (locked_pages == 0) {
1103                                 u64 objnum;
1104                                 u64 objoff;
1105                                 u32 xlen;
1106
1107                                 /* prepare async write request */
1108                                 offset = (u64)page_offset(page);
1109                                 ceph_calc_file_object_mapping(&ci->i_layout,
1110                                                               offset, wsize,
1111                                                               &objnum, &objoff,
1112                                                               &xlen);
1113                                 len = xlen;
1114
1115                                 num_ops = 1;
1116                                 strip_unit_end = page->index +
1117                                         ((len - 1) >> PAGE_SHIFT);
1118
1119                                 BUG_ON(pages);
1120                                 max_pages = calc_pages_for(0, (u64)len);
1121                                 pages = kmalloc_array(max_pages,
1122                                                       sizeof(*pages),
1123                                                       GFP_NOFS);
1124                                 if (!pages) {
1125                                         from_pool = true;
1126                                         pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1127                                         BUG_ON(!pages);
1128                                 }
1129
1130                                 len = 0;
1131                         } else if (page->index !=
1132                                    (offset + len) >> PAGE_SHIFT) {
1133                                 if (num_ops >= (from_pool ?  CEPH_OSD_SLAB_OPS :
1134                                                              CEPH_OSD_MAX_OPS)) {
1135                                         redirty_page_for_writepage(wbc, page);
1136                                         unlock_page(page);
1137                                         break;
1138                                 }
1139
1140                                 num_ops++;
1141                                 offset = (u64)page_offset(page);
1142                                 len = 0;
1143                         }
1144
1145                         /* note position of first page in fbatch */
1146                         dout("%p will write page %p idx %lu\n",
1147                              inode, page, page->index);
1148
1149                         if (atomic_long_inc_return(&fsc->writeback_count) >
1150                             CONGESTION_ON_THRESH(
1151                                     fsc->mount_options->congestion_kb))
1152                                 fsc->write_congested = true;
1153
1154                         if (IS_ENCRYPTED(inode)) {
1155                                 pages[locked_pages] =
1156                                         fscrypt_encrypt_pagecache_blocks(page,
1157                                                 PAGE_SIZE, 0,
1158                                                 locked_pages ? GFP_NOWAIT : GFP_NOFS);
1159                                 if (IS_ERR(pages[locked_pages])) {
1160                                         if (PTR_ERR(pages[locked_pages]) == -EINVAL)
1161                                                 pr_err("%s: inode->i_blkbits=%hhu\n",
1162                                                         __func__, inode->i_blkbits);
1163                                         /* better not fail on first page! */
1164                                         BUG_ON(locked_pages == 0);
1165                                         pages[locked_pages] = NULL;
1166                                         redirty_page_for_writepage(wbc, page);
1167                                         unlock_page(page);
1168                                         break;
1169                                 }
1170                                 ++locked_pages;
1171                         } else {
1172                                 pages[locked_pages++] = page;
1173                         }
1174
1175                         fbatch.folios[i] = NULL;
1176                         len += thp_size(page);
1177                 }
1178
1179                 /* did we get anything? */
1180                 if (!locked_pages)
1181                         goto release_folios;
1182                 if (i) {
1183                         unsigned j, n = 0;
1184                         /* shift unused page to beginning of fbatch */
1185                         for (j = 0; j < nr_folios; j++) {
1186                                 if (!fbatch.folios[j])
1187                                         continue;
1188                                 if (n < j)
1189                                         fbatch.folios[n] = fbatch.folios[j];
1190                                 n++;
1191                         }
1192                         fbatch.nr = n;
1193
1194                         if (nr_folios && i == nr_folios &&
1195                             locked_pages < max_pages) {
1196                                 dout("reached end fbatch, trying for more\n");
1197                                 folio_batch_release(&fbatch);
1198                                 goto get_more_pages;
1199                         }
1200                 }
1201
1202 new_request:
1203                 offset = ceph_fscrypt_page_offset(pages[0]);
1204                 len = wsize;
1205
1206                 req = ceph_osdc_new_request(&fsc->client->osdc,
1207                                         &ci->i_layout, vino,
1208                                         offset, &len, 0, num_ops,
1209                                         CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1210                                         snapc, ceph_wbc.truncate_seq,
1211                                         ceph_wbc.truncate_size, false);
1212                 if (IS_ERR(req)) {
1213                         req = ceph_osdc_new_request(&fsc->client->osdc,
1214                                                 &ci->i_layout, vino,
1215                                                 offset, &len, 0,
1216                                                 min(num_ops,
1217                                                     CEPH_OSD_SLAB_OPS),
1218                                                 CEPH_OSD_OP_WRITE,
1219                                                 CEPH_OSD_FLAG_WRITE,
1220                                                 snapc, ceph_wbc.truncate_seq,
1221                                                 ceph_wbc.truncate_size, true);
1222                         BUG_ON(IS_ERR(req));
1223                 }
1224                 BUG_ON(len < ceph_fscrypt_page_offset(pages[locked_pages - 1]) +
1225                              thp_size(pages[locked_pages - 1]) - offset);
1226
1227                 if (!ceph_inc_osd_stopping_blocker(fsc->mdsc)) {
1228                         rc = -EIO;
1229                         goto release_folios;
1230                 }
1231                 req->r_callback = writepages_finish;
1232                 req->r_inode = inode;
1233
1234                 /* Format the osd request message and submit the write */
1235                 len = 0;
1236                 data_pages = pages;
1237                 op_idx = 0;
1238                 for (i = 0; i < locked_pages; i++) {
1239                         struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
1240
1241                         u64 cur_offset = page_offset(page);
1242                         /*
1243                          * Discontinuity in page range? Ceph can handle that by just passing
1244                          * multiple extents in the write op.
1245                          */
1246                         if (offset + len != cur_offset) {
1247                                 /* If it's full, stop here */
1248                                 if (op_idx + 1 == req->r_num_ops)
1249                                         break;
1250
1251                                 /* Kick off an fscache write with what we have so far. */
1252                                 ceph_fscache_write_to_cache(inode, offset, len, caching);
1253
1254                                 /* Start a new extent */
1255                                 osd_req_op_extent_dup_last(req, op_idx,
1256                                                            cur_offset - offset);
1257                                 dout("writepages got pages at %llu~%llu\n",
1258                                      offset, len);
1259                                 osd_req_op_extent_osd_data_pages(req, op_idx,
1260                                                         data_pages, len, 0,
1261                                                         from_pool, false);
1262                                 osd_req_op_extent_update(req, op_idx, len);
1263
1264                                 len = 0;
1265                                 offset = cur_offset;
1266                                 data_pages = pages + i;
1267                                 op_idx++;
1268                         }
1269
1270                         set_page_writeback(page);
1271                         if (caching)
1272                                 ceph_set_page_fscache(page);
1273                         len += thp_size(page);
1274                 }
1275                 ceph_fscache_write_to_cache(inode, offset, len, caching);
1276
1277                 if (ceph_wbc.size_stable) {
1278                         len = min(len, ceph_wbc.i_size - offset);
1279                 } else if (i == locked_pages) {
1280                         /* writepages_finish() clears writeback pages
1281                          * according to the data length, so make sure
1282                          * data length covers all locked pages */
1283                         u64 min_len = len + 1 - thp_size(page);
1284                         len = get_writepages_data_length(inode, pages[i - 1],
1285                                                          offset);
1286                         len = max(len, min_len);
1287                 }
1288                 if (IS_ENCRYPTED(inode))
1289                         len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
1290
1291                 dout("writepages got pages at %llu~%llu\n", offset, len);
1292
1293                 if (IS_ENCRYPTED(inode) &&
1294                     ((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
1295                         pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
1296                                 __func__, offset, len);
1297
1298                 osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
1299                                                  0, from_pool, false);
1300                 osd_req_op_extent_update(req, op_idx, len);
1301
1302                 BUG_ON(op_idx + 1 != req->r_num_ops);
1303
1304                 from_pool = false;
1305                 if (i < locked_pages) {
1306                         BUG_ON(num_ops <= req->r_num_ops);
1307                         num_ops -= req->r_num_ops;
1308                         locked_pages -= i;
1309
1310                         /* allocate new pages array for next request */
1311                         data_pages = pages;
1312                         pages = kmalloc_array(locked_pages, sizeof(*pages),
1313                                               GFP_NOFS);
1314                         if (!pages) {
1315                                 from_pool = true;
1316                                 pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS);
1317                                 BUG_ON(!pages);
1318                         }
1319                         memcpy(pages, data_pages + i,
1320                                locked_pages * sizeof(*pages));
1321                         memset(data_pages + i, 0,
1322                                locked_pages * sizeof(*pages));
1323                 } else {
1324                         BUG_ON(num_ops != req->r_num_ops);
1325                         index = pages[i - 1]->index + 1;
1326                         /* request message now owns the pages array */
1327                         pages = NULL;
1328                 }
1329
1330                 req->r_mtime = inode->i_mtime;
1331                 ceph_osdc_start_request(&fsc->client->osdc, req);
1332                 req = NULL;
1333
1334                 wbc->nr_to_write -= i;
1335                 if (pages)
1336                         goto new_request;
1337
1338                 /*
1339                  * We stop writing back only if we are not doing
1340                  * integrity sync. In case of integrity sync we have to
1341                  * keep going until we have written all the pages
1342                  * we tagged for writeback prior to entering this loop.
1343                  */
1344                 if (wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE)
1345                         done = true;
1346
1347 release_folios:
1348                 dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
1349                      fbatch.nr ? fbatch.folios[0] : NULL);
1350                 folio_batch_release(&fbatch);
1351         }
1352
1353         if (should_loop && !done) {
1354                 /* more to do; loop back to beginning of file */
1355                 dout("writepages looping back to beginning of file\n");
1356                 end = start_index - 1; /* OK even when start_index == 0 */
1357
1358                 /* to write dirty pages associated with next snapc,
1359                  * we need to wait until current writes complete */
1360                 if (wbc->sync_mode != WB_SYNC_NONE &&
1361                     start_index == 0 && /* all dirty pages were checked */
1362                     !ceph_wbc.head_snapc) {
1363                         struct page *page;
1364                         unsigned i, nr;
1365                         index = 0;
1366                         while ((index <= end) &&
1367                                (nr = filemap_get_folios_tag(mapping, &index,
1368                                                 (pgoff_t)-1,
1369                                                 PAGECACHE_TAG_WRITEBACK,
1370                                                 &fbatch))) {
1371                                 for (i = 0; i < nr; i++) {
1372                                         page = &fbatch.folios[i]->page;
1373                                         if (page_snap_context(page) != snapc)
1374                                                 continue;
1375                                         wait_on_page_writeback(page);
1376                                 }
1377                                 folio_batch_release(&fbatch);
1378                                 cond_resched();
1379                         }
1380                 }
1381
1382                 start_index = 0;
1383                 index = 0;
1384                 goto retry;
1385         }
1386
1387         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1388                 mapping->writeback_index = index;
1389
1390 out:
1391         ceph_osdc_put_request(req);
1392         ceph_put_snap_context(last_snapc);
1393         dout("writepages dend - startone, rc = %d\n", rc);
1394         return rc;
1395 }
1396
1397
1398
1399 /*
1400  * See if a given @snapc is either writeable, or already written.
1401  */
1402 static int context_is_writeable_or_written(struct inode *inode,
1403                                            struct ceph_snap_context *snapc)
1404 {
1405         struct ceph_snap_context *oldest = get_oldest_context(inode, NULL, NULL);
1406         int ret = !oldest || snapc->seq <= oldest->seq;
1407
1408         ceph_put_snap_context(oldest);
1409         return ret;
1410 }
1411
1412 /**
1413  * ceph_find_incompatible - find an incompatible context and return it
1414  * @page: page being dirtied
1415  *
1416  * We are only allowed to write into/dirty a page if the page is
1417  * clean, or already dirty within the same snap context. Returns a
1418  * conflicting context if there is one, NULL if there isn't, or a
1419  * negative error code on other errors.
1420  *
1421  * Must be called with page lock held.
1422  */
1423 static struct ceph_snap_context *
1424 ceph_find_incompatible(struct page *page)
1425 {
1426         struct inode *inode = page->mapping->host;
1427         struct ceph_inode_info *ci = ceph_inode(inode);
1428
1429         if (ceph_inode_is_shutdown(inode)) {
1430                 dout(" page %p %llx:%llx is shutdown\n", page,
1431                      ceph_vinop(inode));
1432                 return ERR_PTR(-ESTALE);
1433         }
1434
1435         for (;;) {
1436                 struct ceph_snap_context *snapc, *oldest;
1437
1438                 wait_on_page_writeback(page);
1439
1440                 snapc = page_snap_context(page);
1441                 if (!snapc || snapc == ci->i_head_snapc)
1442                         break;
1443
1444                 /*
1445                  * this page is already dirty in another (older) snap
1446                  * context!  is it writeable now?
1447                  */
1448                 oldest = get_oldest_context(inode, NULL, NULL);
1449                 if (snapc->seq > oldest->seq) {
1450                         /* not writeable -- return it for the caller to deal with */
1451                         ceph_put_snap_context(oldest);
1452                         dout(" page %p snapc %p not current or oldest\n", page, snapc);
1453                         return ceph_get_snap_context(snapc);
1454                 }
1455                 ceph_put_snap_context(oldest);
1456
1457                 /* yay, writeable, do it now (without dropping page lock) */
1458                 dout(" page %p snapc %p not current, but oldest\n", page, snapc);
1459                 if (clear_page_dirty_for_io(page)) {
1460                         int r = writepage_nounlock(page, NULL);
1461                         if (r < 0)
1462                                 return ERR_PTR(r);
1463                 }
1464         }
1465         return NULL;
1466 }
1467
1468 static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
1469                                         struct folio **foliop, void **_fsdata)
1470 {
1471         struct inode *inode = file_inode(file);
1472         struct ceph_inode_info *ci = ceph_inode(inode);
1473         struct ceph_snap_context *snapc;
1474
1475         snapc = ceph_find_incompatible(folio_page(*foliop, 0));
1476         if (snapc) {
1477                 int r;
1478
1479                 folio_unlock(*foliop);
1480                 folio_put(*foliop);
1481                 *foliop = NULL;
1482                 if (IS_ERR(snapc))
1483                         return PTR_ERR(snapc);
1484
1485                 ceph_queue_writeback(inode);
1486                 r = wait_event_killable(ci->i_cap_wq,
1487                                         context_is_writeable_or_written(inode, snapc));
1488                 ceph_put_snap_context(snapc);
1489                 return r == 0 ? -EAGAIN : r;
1490         }
1491         return 0;
1492 }
1493
1494 /*
1495  * We are only allowed to write into/dirty the page if the page is
1496  * clean, or already dirty within the same snap context.
1497  */
1498 static int ceph_write_begin(struct file *file, struct address_space *mapping,
1499                             loff_t pos, unsigned len,
1500                             struct page **pagep, void **fsdata)
1501 {
1502         struct inode *inode = file_inode(file);
1503         struct ceph_inode_info *ci = ceph_inode(inode);
1504         struct folio *folio = NULL;
1505         int r;
1506
1507         r = netfs_write_begin(&ci->netfs, file, inode->i_mapping, pos, len, &folio, NULL);
1508         if (r < 0)
1509                 return r;
1510
1511         folio_wait_fscache(folio);
1512         WARN_ON_ONCE(!folio_test_locked(folio));
1513         *pagep = &folio->page;
1514         return 0;
1515 }
1516
1517 /*
1518  * we don't do anything in here that simple_write_end doesn't do
1519  * except adjust dirty page accounting
1520  */
1521 static int ceph_write_end(struct file *file, struct address_space *mapping,
1522                           loff_t pos, unsigned len, unsigned copied,
1523                           struct page *subpage, void *fsdata)
1524 {
1525         struct folio *folio = page_folio(subpage);
1526         struct inode *inode = file_inode(file);
1527         bool check_cap = false;
1528
1529         dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
1530              inode, folio, (int)pos, (int)copied, (int)len);
1531
1532         if (!folio_test_uptodate(folio)) {
1533                 /* just return that nothing was copied on a short copy */
1534                 if (copied < len) {
1535                         copied = 0;
1536                         goto out;
1537                 }
1538                 folio_mark_uptodate(folio);
1539         }
1540
1541         /* did file size increase? */
1542         if (pos+copied > i_size_read(inode))
1543                 check_cap = ceph_inode_set_size(inode, pos+copied);
1544
1545         folio_mark_dirty(folio);
1546
1547 out:
1548         folio_unlock(folio);
1549         folio_put(folio);
1550
1551         if (check_cap)
1552                 ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY);
1553
1554         return copied;
1555 }
1556
1557 const struct address_space_operations ceph_aops = {
1558         .read_folio = netfs_read_folio,
1559         .readahead = netfs_readahead,
1560         .writepage = ceph_writepage,
1561         .writepages = ceph_writepages_start,
1562         .write_begin = ceph_write_begin,
1563         .write_end = ceph_write_end,
1564         .dirty_folio = ceph_dirty_folio,
1565         .invalidate_folio = ceph_invalidate_folio,
1566         .release_folio = ceph_release_folio,
1567         .direct_IO = noop_direct_IO,
1568 };
1569
1570 static void ceph_block_sigs(sigset_t *oldset)
1571 {
1572         sigset_t mask;
1573         siginitsetinv(&mask, sigmask(SIGKILL));
1574         sigprocmask(SIG_BLOCK, &mask, oldset);
1575 }
1576
1577 static void ceph_restore_sigs(sigset_t *oldset)
1578 {
1579         sigprocmask(SIG_SETMASK, oldset, NULL);
1580 }
1581
1582 /*
1583  * vm ops
1584  */
1585 static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
1586 {
1587         struct vm_area_struct *vma = vmf->vma;
1588         struct inode *inode = file_inode(vma->vm_file);
1589         struct ceph_inode_info *ci = ceph_inode(inode);
1590         struct ceph_file_info *fi = vma->vm_file->private_data;
1591         loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
1592         int want, got, err;
1593         sigset_t oldset;
1594         vm_fault_t ret = VM_FAULT_SIGBUS;
1595
1596         if (ceph_inode_is_shutdown(inode))
1597                 return ret;
1598
1599         ceph_block_sigs(&oldset);
1600
1601         dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
1602              inode, ceph_vinop(inode), off);
1603         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1604                 want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1605         else
1606                 want = CEPH_CAP_FILE_CACHE;
1607
1608         got = 0;
1609         err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_RD, want, -1, &got);
1610         if (err < 0)
1611                 goto out_restore;
1612
1613         dout("filemap_fault %p %llu got cap refs on %s\n",
1614              inode, off, ceph_cap_string(got));
1615
1616         if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
1617             !ceph_has_inline_data(ci)) {
1618                 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1619                 ceph_add_rw_context(fi, &rw_ctx);
1620                 ret = filemap_fault(vmf);
1621                 ceph_del_rw_context(fi, &rw_ctx);
1622                 dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
1623                      inode, off, ceph_cap_string(got), ret);
1624         } else
1625                 err = -EAGAIN;
1626
1627         ceph_put_cap_refs(ci, got);
1628
1629         if (err != -EAGAIN)
1630                 goto out_restore;
1631
1632         /* read inline data */
1633         if (off >= PAGE_SIZE) {
1634                 /* does not support inline data > PAGE_SIZE */
1635                 ret = VM_FAULT_SIGBUS;
1636         } else {
1637                 struct address_space *mapping = inode->i_mapping;
1638                 struct page *page;
1639
1640                 filemap_invalidate_lock_shared(mapping);
1641                 page = find_or_create_page(mapping, 0,
1642                                 mapping_gfp_constraint(mapping, ~__GFP_FS));
1643                 if (!page) {
1644                         ret = VM_FAULT_OOM;
1645                         goto out_inline;
1646                 }
1647                 err = __ceph_do_getattr(inode, page,
1648                                          CEPH_STAT_CAP_INLINE_DATA, true);
1649                 if (err < 0 || off >= i_size_read(inode)) {
1650                         unlock_page(page);
1651                         put_page(page);
1652                         ret = vmf_error(err);
1653                         goto out_inline;
1654                 }
1655                 if (err < PAGE_SIZE)
1656                         zero_user_segment(page, err, PAGE_SIZE);
1657                 else
1658                         flush_dcache_page(page);
1659                 SetPageUptodate(page);
1660                 vmf->page = page;
1661                 ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
1662 out_inline:
1663                 filemap_invalidate_unlock_shared(mapping);
1664                 dout("filemap_fault %p %llu read inline data ret %x\n",
1665                      inode, off, ret);
1666         }
1667 out_restore:
1668         ceph_restore_sigs(&oldset);
1669         if (err < 0)
1670                 ret = vmf_error(err);
1671
1672         return ret;
1673 }
1674
1675 static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
1676 {
1677         struct vm_area_struct *vma = vmf->vma;
1678         struct inode *inode = file_inode(vma->vm_file);
1679         struct ceph_inode_info *ci = ceph_inode(inode);
1680         struct ceph_file_info *fi = vma->vm_file->private_data;
1681         struct ceph_cap_flush *prealloc_cf;
1682         struct page *page = vmf->page;
1683         loff_t off = page_offset(page);
1684         loff_t size = i_size_read(inode);
1685         size_t len;
1686         int want, got, err;
1687         sigset_t oldset;
1688         vm_fault_t ret = VM_FAULT_SIGBUS;
1689
1690         if (ceph_inode_is_shutdown(inode))
1691                 return ret;
1692
1693         prealloc_cf = ceph_alloc_cap_flush();
1694         if (!prealloc_cf)
1695                 return VM_FAULT_OOM;
1696
1697         sb_start_pagefault(inode->i_sb);
1698         ceph_block_sigs(&oldset);
1699
1700         if (off + thp_size(page) <= size)
1701                 len = thp_size(page);
1702         else
1703                 len = offset_in_thp(page, size);
1704
1705         dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
1706              inode, ceph_vinop(inode), off, len, size);
1707         if (fi->fmode & CEPH_FILE_MODE_LAZY)
1708                 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1709         else
1710                 want = CEPH_CAP_FILE_BUFFER;
1711
1712         got = 0;
1713         err = ceph_get_caps(vma->vm_file, CEPH_CAP_FILE_WR, want, off + len, &got);
1714         if (err < 0)
1715                 goto out_free;
1716
1717         dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
1718              inode, off, len, ceph_cap_string(got));
1719
1720         /* Update time before taking page lock */
1721         file_update_time(vma->vm_file);
1722         inode_inc_iversion_raw(inode);
1723
1724         do {
1725                 struct ceph_snap_context *snapc;
1726
1727                 lock_page(page);
1728
1729                 if (page_mkwrite_check_truncate(page, inode) < 0) {
1730                         unlock_page(page);
1731                         ret = VM_FAULT_NOPAGE;
1732                         break;
1733                 }
1734
1735                 snapc = ceph_find_incompatible(page);
1736                 if (!snapc) {
1737                         /* success.  we'll keep the page locked. */
1738                         set_page_dirty(page);
1739                         ret = VM_FAULT_LOCKED;
1740                         break;
1741                 }
1742
1743                 unlock_page(page);
1744
1745                 if (IS_ERR(snapc)) {
1746                         ret = VM_FAULT_SIGBUS;
1747                         break;
1748                 }
1749
1750                 ceph_queue_writeback(inode);
1751                 err = wait_event_killable(ci->i_cap_wq,
1752                                 context_is_writeable_or_written(inode, snapc));
1753                 ceph_put_snap_context(snapc);
1754         } while (err == 0);
1755
1756         if (ret == VM_FAULT_LOCKED) {
1757                 int dirty;
1758                 spin_lock(&ci->i_ceph_lock);
1759                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1760                                                &prealloc_cf);
1761                 spin_unlock(&ci->i_ceph_lock);
1762                 if (dirty)
1763                         __mark_inode_dirty(inode, dirty);
1764         }
1765
1766         dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
1767              inode, off, len, ceph_cap_string(got), ret);
1768         ceph_put_cap_refs_async(ci, got);
1769 out_free:
1770         ceph_restore_sigs(&oldset);
1771         sb_end_pagefault(inode->i_sb);
1772         ceph_free_cap_flush(prealloc_cf);
1773         if (err < 0)
1774                 ret = vmf_error(err);
1775         return ret;
1776 }
1777
1778 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1779                            char *data, size_t len)
1780 {
1781         struct address_space *mapping = inode->i_mapping;
1782         struct page *page;
1783
1784         if (locked_page) {
1785                 page = locked_page;
1786         } else {
1787                 if (i_size_read(inode) == 0)
1788                         return;
1789                 page = find_or_create_page(mapping, 0,
1790                                            mapping_gfp_constraint(mapping,
1791                                            ~__GFP_FS));
1792                 if (!page)
1793                         return;
1794                 if (PageUptodate(page)) {
1795                         unlock_page(page);
1796                         put_page(page);
1797                         return;
1798                 }
1799         }
1800
1801         dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
1802              inode, ceph_vinop(inode), len, locked_page);
1803
1804         if (len > 0) {
1805                 void *kaddr = kmap_atomic(page);
1806                 memcpy(kaddr, data, len);
1807                 kunmap_atomic(kaddr);
1808         }
1809
1810         if (page != locked_page) {
1811                 if (len < PAGE_SIZE)
1812                         zero_user_segment(page, len, PAGE_SIZE);
1813                 else
1814                         flush_dcache_page(page);
1815
1816                 SetPageUptodate(page);
1817                 unlock_page(page);
1818                 put_page(page);
1819         }
1820 }
1821
1822 int ceph_uninline_data(struct file *file)
1823 {
1824         struct inode *inode = file_inode(file);
1825         struct ceph_inode_info *ci = ceph_inode(inode);
1826         struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1827         struct ceph_osd_request *req = NULL;
1828         struct ceph_cap_flush *prealloc_cf = NULL;
1829         struct folio *folio = NULL;
1830         u64 inline_version = CEPH_INLINE_NONE;
1831         struct page *pages[1];
1832         int err = 0;
1833         u64 len;
1834
1835         spin_lock(&ci->i_ceph_lock);
1836         inline_version = ci->i_inline_version;
1837         spin_unlock(&ci->i_ceph_lock);
1838
1839         dout("uninline_data %p %llx.%llx inline_version %llu\n",
1840              inode, ceph_vinop(inode), inline_version);
1841
1842         if (ceph_inode_is_shutdown(inode)) {
1843                 err = -EIO;
1844                 goto out;
1845         }
1846
1847         if (inline_version == CEPH_INLINE_NONE)
1848                 return 0;
1849
1850         prealloc_cf = ceph_alloc_cap_flush();
1851         if (!prealloc_cf)
1852                 return -ENOMEM;
1853
1854         if (inline_version == 1) /* initial version, no data */
1855                 goto out_uninline;
1856
1857         folio = read_mapping_folio(inode->i_mapping, 0, file);
1858         if (IS_ERR(folio)) {
1859                 err = PTR_ERR(folio);
1860                 goto out;
1861         }
1862
1863         folio_lock(folio);
1864
1865         len = i_size_read(inode);
1866         if (len > folio_size(folio))
1867                 len = folio_size(folio);
1868
1869         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1870                                     ceph_vino(inode), 0, &len, 0, 1,
1871                                     CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
1872                                     NULL, 0, 0, false);
1873         if (IS_ERR(req)) {
1874                 err = PTR_ERR(req);
1875                 goto out_unlock;
1876         }
1877
1878         req->r_mtime = inode->i_mtime;
1879         ceph_osdc_start_request(&fsc->client->osdc, req);
1880         err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1881         ceph_osdc_put_request(req);
1882         if (err < 0)
1883                 goto out_unlock;
1884
1885         req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1886                                     ceph_vino(inode), 0, &len, 1, 3,
1887                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
1888                                     NULL, ci->i_truncate_seq,
1889                                     ci->i_truncate_size, false);
1890         if (IS_ERR(req)) {
1891                 err = PTR_ERR(req);
1892                 goto out_unlock;
1893         }
1894
1895         pages[0] = folio_page(folio, 0);
1896         osd_req_op_extent_osd_data_pages(req, 1, pages, len, 0, false, false);
1897
1898         {
1899                 __le64 xattr_buf = cpu_to_le64(inline_version);
1900                 err = osd_req_op_xattr_init(req, 0, CEPH_OSD_OP_CMPXATTR,
1901                                             "inline_version", &xattr_buf,
1902                                             sizeof(xattr_buf),
1903                                             CEPH_OSD_CMPXATTR_OP_GT,
1904                                             CEPH_OSD_CMPXATTR_MODE_U64);
1905                 if (err)
1906                         goto out_put_req;
1907         }
1908
1909         {
1910                 char xattr_buf[32];
1911                 int xattr_len = snprintf(xattr_buf, sizeof(xattr_buf),
1912                                          "%llu", inline_version);
1913                 err = osd_req_op_xattr_init(req, 2, CEPH_OSD_OP_SETXATTR,
1914                                             "inline_version",
1915                                             xattr_buf, xattr_len, 0, 0);
1916                 if (err)
1917                         goto out_put_req;
1918         }
1919
1920         req->r_mtime = inode->i_mtime;
1921         ceph_osdc_start_request(&fsc->client->osdc, req);
1922         err = ceph_osdc_wait_request(&fsc->client->osdc, req);
1923
1924         ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1925                                   req->r_end_latency, len, err);
1926
1927 out_uninline:
1928         if (!err) {
1929                 int dirty;
1930
1931                 /* Set to CAP_INLINE_NONE and dirty the caps */
1932                 down_read(&fsc->mdsc->snap_rwsem);
1933                 spin_lock(&ci->i_ceph_lock);
1934                 ci->i_inline_version = CEPH_INLINE_NONE;
1935                 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR, &prealloc_cf);
1936                 spin_unlock(&ci->i_ceph_lock);
1937                 up_read(&fsc->mdsc->snap_rwsem);
1938                 if (dirty)
1939                         __mark_inode_dirty(inode, dirty);
1940         }
1941 out_put_req:
1942         ceph_osdc_put_request(req);
1943         if (err == -ECANCELED)
1944                 err = 0;
1945 out_unlock:
1946         if (folio) {
1947                 folio_unlock(folio);
1948                 folio_put(folio);
1949         }
1950 out:
1951         ceph_free_cap_flush(prealloc_cf);
1952         dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
1953              inode, ceph_vinop(inode), inline_version, err);
1954         return err;
1955 }
1956
1957 static const struct vm_operations_struct ceph_vmops = {
1958         .fault          = ceph_filemap_fault,
1959         .page_mkwrite   = ceph_page_mkwrite,
1960 };
1961
1962 int ceph_mmap(struct file *file, struct vm_area_struct *vma)
1963 {
1964         struct address_space *mapping = file->f_mapping;
1965
1966         if (!mapping->a_ops->read_folio)
1967                 return -ENOEXEC;
1968         vma->vm_ops = &ceph_vmops;
1969         return 0;
1970 }
1971
1972 enum {
1973         POOL_READ       = 1,
1974         POOL_WRITE      = 2,
1975 };
1976
1977 static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
1978                                 s64 pool, struct ceph_string *pool_ns)
1979 {
1980         struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
1981         struct ceph_mds_client *mdsc = fsc->mdsc;
1982         struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
1983         struct rb_node **p, *parent;
1984         struct ceph_pool_perm *perm;
1985         struct page **pages;
1986         size_t pool_ns_len;
1987         int err = 0, err2 = 0, have = 0;
1988
1989         down_read(&mdsc->pool_perm_rwsem);
1990         p = &mdsc->pool_perm_tree.rb_node;
1991         while (*p) {
1992                 perm = rb_entry(*p, struct ceph_pool_perm, node);
1993                 if (pool < perm->pool)
1994                         p = &(*p)->rb_left;
1995                 else if (pool > perm->pool)
1996                         p = &(*p)->rb_right;
1997                 else {
1998                         int ret = ceph_compare_string(pool_ns,
1999                                                 perm->pool_ns,
2000                                                 perm->pool_ns_len);
2001                         if (ret < 0)
2002                                 p = &(*p)->rb_left;
2003                         else if (ret > 0)
2004                                 p = &(*p)->rb_right;
2005                         else {
2006                                 have = perm->perm;
2007                                 break;
2008                         }
2009                 }
2010         }
2011         up_read(&mdsc->pool_perm_rwsem);
2012         if (*p)
2013                 goto out;
2014
2015         if (pool_ns)
2016                 dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
2017                      pool, (int)pool_ns->len, pool_ns->str);
2018         else
2019                 dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
2020
2021         down_write(&mdsc->pool_perm_rwsem);
2022         p = &mdsc->pool_perm_tree.rb_node;
2023         parent = NULL;
2024         while (*p) {
2025                 parent = *p;
2026                 perm = rb_entry(parent, struct ceph_pool_perm, node);
2027                 if (pool < perm->pool)
2028                         p = &(*p)->rb_left;
2029                 else if (pool > perm->pool)
2030                         p = &(*p)->rb_right;
2031                 else {
2032                         int ret = ceph_compare_string(pool_ns,
2033                                                 perm->pool_ns,
2034                                                 perm->pool_ns_len);
2035                         if (ret < 0)
2036                                 p = &(*p)->rb_left;
2037                         else if (ret > 0)
2038                                 p = &(*p)->rb_right;
2039                         else {
2040                                 have = perm->perm;
2041                                 break;
2042                         }
2043                 }
2044         }
2045         if (*p) {
2046                 up_write(&mdsc->pool_perm_rwsem);
2047                 goto out;
2048         }
2049
2050         rd_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2051                                          1, false, GFP_NOFS);
2052         if (!rd_req) {
2053                 err = -ENOMEM;
2054                 goto out_unlock;
2055         }
2056
2057         rd_req->r_flags = CEPH_OSD_FLAG_READ;
2058         osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
2059         rd_req->r_base_oloc.pool = pool;
2060         if (pool_ns)
2061                 rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
2062         ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
2063
2064         err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
2065         if (err)
2066                 goto out_unlock;
2067
2068         wr_req = ceph_osdc_alloc_request(&fsc->client->osdc, NULL,
2069                                          1, false, GFP_NOFS);
2070         if (!wr_req) {
2071                 err = -ENOMEM;
2072                 goto out_unlock;
2073         }
2074
2075         wr_req->r_flags = CEPH_OSD_FLAG_WRITE;
2076         osd_req_op_init(wr_req, 0, CEPH_OSD_OP_CREATE, CEPH_OSD_OP_FLAG_EXCL);
2077         ceph_oloc_copy(&wr_req->r_base_oloc, &rd_req->r_base_oloc);
2078         ceph_oid_copy(&wr_req->r_base_oid, &rd_req->r_base_oid);
2079
2080         err = ceph_osdc_alloc_messages(wr_req, GFP_NOFS);
2081         if (err)
2082                 goto out_unlock;
2083
2084         /* one page should be large enough for STAT data */
2085         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
2086         if (IS_ERR(pages)) {
2087                 err = PTR_ERR(pages);
2088                 goto out_unlock;
2089         }
2090
2091         osd_req_op_raw_data_in_pages(rd_req, 0, pages, PAGE_SIZE,
2092                                      0, false, true);
2093         ceph_osdc_start_request(&fsc->client->osdc, rd_req);
2094
2095         wr_req->r_mtime = ci->netfs.inode.i_mtime;
2096         ceph_osdc_start_request(&fsc->client->osdc, wr_req);
2097
2098         err = ceph_osdc_wait_request(&fsc->client->osdc, rd_req);
2099         err2 = ceph_osdc_wait_request(&fsc->client->osdc, wr_req);
2100
2101         if (err >= 0 || err == -ENOENT)
2102                 have |= POOL_READ;
2103         else if (err != -EPERM) {
2104                 if (err == -EBLOCKLISTED)
2105                         fsc->blocklisted = true;
2106                 goto out_unlock;
2107         }
2108
2109         if (err2 == 0 || err2 == -EEXIST)
2110                 have |= POOL_WRITE;
2111         else if (err2 != -EPERM) {
2112                 if (err2 == -EBLOCKLISTED)
2113                         fsc->blocklisted = true;
2114                 err = err2;
2115                 goto out_unlock;
2116         }
2117
2118         pool_ns_len = pool_ns ? pool_ns->len : 0;
2119         perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
2120         if (!perm) {
2121                 err = -ENOMEM;
2122                 goto out_unlock;
2123         }
2124
2125         perm->pool = pool;
2126         perm->perm = have;
2127         perm->pool_ns_len = pool_ns_len;
2128         if (pool_ns_len > 0)
2129                 memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
2130         perm->pool_ns[pool_ns_len] = 0;
2131
2132         rb_link_node(&perm->node, parent, p);
2133         rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
2134         err = 0;
2135 out_unlock:
2136         up_write(&mdsc->pool_perm_rwsem);
2137
2138         ceph_osdc_put_request(rd_req);
2139         ceph_osdc_put_request(wr_req);
2140 out:
2141         if (!err)
2142                 err = have;
2143         if (pool_ns)
2144                 dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
2145                      pool, (int)pool_ns->len, pool_ns->str, err);
2146         else
2147                 dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
2148         return err;
2149 }
2150
2151 int ceph_pool_perm_check(struct inode *inode, int need)
2152 {
2153         struct ceph_inode_info *ci = ceph_inode(inode);
2154         struct ceph_string *pool_ns;
2155         s64 pool;
2156         int ret, flags;
2157
2158         /* Only need to do this for regular files */
2159         if (!S_ISREG(inode->i_mode))
2160                 return 0;
2161
2162         if (ci->i_vino.snap != CEPH_NOSNAP) {
2163                 /*
2164                  * Pool permission check needs to write to the first object.
2165                  * But for snapshot, head of the first object may have alread
2166                  * been deleted. Skip check to avoid creating orphan object.
2167                  */
2168                 return 0;
2169         }
2170
2171         if (ceph_test_mount_opt(ceph_inode_to_client(inode),
2172                                 NOPOOLPERM))
2173                 return 0;
2174
2175         spin_lock(&ci->i_ceph_lock);
2176         flags = ci->i_ceph_flags;
2177         pool = ci->i_layout.pool_id;
2178         spin_unlock(&ci->i_ceph_lock);
2179 check:
2180         if (flags & CEPH_I_POOL_PERM) {
2181                 if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
2182                         dout("ceph_pool_perm_check pool %lld no read perm\n",
2183                              pool);
2184                         return -EPERM;
2185                 }
2186                 if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
2187                         dout("ceph_pool_perm_check pool %lld no write perm\n",
2188                              pool);
2189                         return -EPERM;
2190                 }
2191                 return 0;
2192         }
2193
2194         pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
2195         ret = __ceph_pool_perm_get(ci, pool, pool_ns);
2196         ceph_put_string(pool_ns);
2197         if (ret < 0)
2198                 return ret;
2199
2200         flags = CEPH_I_POOL_PERM;
2201         if (ret & POOL_READ)
2202                 flags |= CEPH_I_POOL_RD;
2203         if (ret & POOL_WRITE)
2204                 flags |= CEPH_I_POOL_WR;
2205
2206         spin_lock(&ci->i_ceph_lock);
2207         if (pool == ci->i_layout.pool_id &&
2208             pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
2209                 ci->i_ceph_flags |= flags;
2210         } else {
2211                 pool = ci->i_layout.pool_id;
2212                 flags = ci->i_ceph_flags;
2213         }
2214         spin_unlock(&ci->i_ceph_lock);
2215         goto check;
2216 }
2217
2218 void ceph_pool_perm_destroy(struct ceph_mds_client *mdsc)
2219 {
2220         struct ceph_pool_perm *perm;
2221         struct rb_node *n;
2222
2223         while (!RB_EMPTY_ROOT(&mdsc->pool_perm_tree)) {
2224                 n = rb_first(&mdsc->pool_perm_tree);
2225                 perm = rb_entry(n, struct ceph_pool_perm, node);
2226                 rb_erase(n, &mdsc->pool_perm_tree);
2227                 kfree(perm);
2228         }
2229 }