Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland...
[platform/adaptation/renesas_rcar/renesas_kernel.git] / fs / fuse / dev.c
1 /*
2   FUSE: Filesystem in Userspace
3   Copyright (C) 2001-2008  Miklos Szeredi <miklos@szeredi.hu>
4
5   This program can be distributed under the terms of the GNU GPL.
6   See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25
26 static struct kmem_cache *fuse_req_cachep;
27
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30         /*
31          * Lockless access is OK, because file->private data is set
32          * once during mount and is valid until the file is released.
33          */
34         return file->private_data;
35 }
36
37 static void fuse_request_init(struct fuse_req *req)
38 {
39         memset(req, 0, sizeof(*req));
40         INIT_LIST_HEAD(&req->list);
41         INIT_LIST_HEAD(&req->intr_entry);
42         init_waitqueue_head(&req->waitq);
43         atomic_set(&req->count, 1);
44 }
45
46 struct fuse_req *fuse_request_alloc(void)
47 {
48         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
49         if (req)
50                 fuse_request_init(req);
51         return req;
52 }
53 EXPORT_SYMBOL_GPL(fuse_request_alloc);
54
55 struct fuse_req *fuse_request_alloc_nofs(void)
56 {
57         struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
58         if (req)
59                 fuse_request_init(req);
60         return req;
61 }
62
63 void fuse_request_free(struct fuse_req *req)
64 {
65         kmem_cache_free(fuse_req_cachep, req);
66 }
67
68 static void block_sigs(sigset_t *oldset)
69 {
70         sigset_t mask;
71
72         siginitsetinv(&mask, sigmask(SIGKILL));
73         sigprocmask(SIG_BLOCK, &mask, oldset);
74 }
75
76 static void restore_sigs(sigset_t *oldset)
77 {
78         sigprocmask(SIG_SETMASK, oldset, NULL);
79 }
80
81 static void __fuse_get_request(struct fuse_req *req)
82 {
83         atomic_inc(&req->count);
84 }
85
86 /* Must be called with > 1 refcount */
87 static void __fuse_put_request(struct fuse_req *req)
88 {
89         BUG_ON(atomic_read(&req->count) < 2);
90         atomic_dec(&req->count);
91 }
92
93 static void fuse_req_init_context(struct fuse_req *req)
94 {
95         req->in.h.uid = current_fsuid();
96         req->in.h.gid = current_fsgid();
97         req->in.h.pid = current->pid;
98 }
99
100 struct fuse_req *fuse_get_req(struct fuse_conn *fc)
101 {
102         struct fuse_req *req;
103         sigset_t oldset;
104         int intr;
105         int err;
106
107         atomic_inc(&fc->num_waiting);
108         block_sigs(&oldset);
109         intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
110         restore_sigs(&oldset);
111         err = -EINTR;
112         if (intr)
113                 goto out;
114
115         err = -ENOTCONN;
116         if (!fc->connected)
117                 goto out;
118
119         req = fuse_request_alloc();
120         err = -ENOMEM;
121         if (!req)
122                 goto out;
123
124         fuse_req_init_context(req);
125         req->waiting = 1;
126         return req;
127
128  out:
129         atomic_dec(&fc->num_waiting);
130         return ERR_PTR(err);
131 }
132 EXPORT_SYMBOL_GPL(fuse_get_req);
133
134 /*
135  * Return request in fuse_file->reserved_req.  However that may
136  * currently be in use.  If that is the case, wait for it to become
137  * available.
138  */
139 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
140                                          struct file *file)
141 {
142         struct fuse_req *req = NULL;
143         struct fuse_file *ff = file->private_data;
144
145         do {
146                 wait_event(fc->reserved_req_waitq, ff->reserved_req);
147                 spin_lock(&fc->lock);
148                 if (ff->reserved_req) {
149                         req = ff->reserved_req;
150                         ff->reserved_req = NULL;
151                         get_file(file);
152                         req->stolen_file = file;
153                 }
154                 spin_unlock(&fc->lock);
155         } while (!req);
156
157         return req;
158 }
159
160 /*
161  * Put stolen request back into fuse_file->reserved_req
162  */
163 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
164 {
165         struct file *file = req->stolen_file;
166         struct fuse_file *ff = file->private_data;
167
168         spin_lock(&fc->lock);
169         fuse_request_init(req);
170         BUG_ON(ff->reserved_req);
171         ff->reserved_req = req;
172         wake_up_all(&fc->reserved_req_waitq);
173         spin_unlock(&fc->lock);
174         fput(file);
175 }
176
177 /*
178  * Gets a requests for a file operation, always succeeds
179  *
180  * This is used for sending the FLUSH request, which must get to
181  * userspace, due to POSIX locks which may need to be unlocked.
182  *
183  * If allocation fails due to OOM, use the reserved request in
184  * fuse_file.
185  *
186  * This is very unlikely to deadlock accidentally, since the
187  * filesystem should not have it's own file open.  If deadlock is
188  * intentional, it can still be broken by "aborting" the filesystem.
189  */
190 struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
191 {
192         struct fuse_req *req;
193
194         atomic_inc(&fc->num_waiting);
195         wait_event(fc->blocked_waitq, !fc->blocked);
196         req = fuse_request_alloc();
197         if (!req)
198                 req = get_reserved_req(fc, file);
199
200         fuse_req_init_context(req);
201         req->waiting = 1;
202         return req;
203 }
204
205 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
206 {
207         if (atomic_dec_and_test(&req->count)) {
208                 if (req->waiting)
209                         atomic_dec(&fc->num_waiting);
210
211                 if (req->stolen_file)
212                         put_reserved_req(fc, req);
213                 else
214                         fuse_request_free(req);
215         }
216 }
217 EXPORT_SYMBOL_GPL(fuse_put_request);
218
219 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
220 {
221         unsigned nbytes = 0;
222         unsigned i;
223
224         for (i = 0; i < numargs; i++)
225                 nbytes += args[i].size;
226
227         return nbytes;
228 }
229
230 static u64 fuse_get_unique(struct fuse_conn *fc)
231 {
232         fc->reqctr++;
233         /* zero is special */
234         if (fc->reqctr == 0)
235                 fc->reqctr = 1;
236
237         return fc->reqctr;
238 }
239
240 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
241 {
242         req->in.h.len = sizeof(struct fuse_in_header) +
243                 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
244         list_add_tail(&req->list, &fc->pending);
245         req->state = FUSE_REQ_PENDING;
246         if (!req->waiting) {
247                 req->waiting = 1;
248                 atomic_inc(&fc->num_waiting);
249         }
250         wake_up(&fc->waitq);
251         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
252 }
253
254 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
255                        u64 nodeid, u64 nlookup)
256 {
257         forget->forget_one.nodeid = nodeid;
258         forget->forget_one.nlookup = nlookup;
259
260         spin_lock(&fc->lock);
261         fc->forget_list_tail->next = forget;
262         fc->forget_list_tail = forget;
263         wake_up(&fc->waitq);
264         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
265         spin_unlock(&fc->lock);
266 }
267
268 static void flush_bg_queue(struct fuse_conn *fc)
269 {
270         while (fc->active_background < fc->max_background &&
271                !list_empty(&fc->bg_queue)) {
272                 struct fuse_req *req;
273
274                 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
275                 list_del(&req->list);
276                 fc->active_background++;
277                 req->in.h.unique = fuse_get_unique(fc);
278                 queue_request(fc, req);
279         }
280 }
281
282 /*
283  * This function is called when a request is finished.  Either a reply
284  * has arrived or it was aborted (and not yet sent) or some error
285  * occurred during communication with userspace, or the device file
286  * was closed.  The requester thread is woken up (if still waiting),
287  * the 'end' callback is called if given, else the reference to the
288  * request is released
289  *
290  * Called with fc->lock, unlocks it
291  */
292 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
293 __releases(fc->lock)
294 {
295         void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
296         req->end = NULL;
297         list_del(&req->list);
298         list_del(&req->intr_entry);
299         req->state = FUSE_REQ_FINISHED;
300         if (req->background) {
301                 if (fc->num_background == fc->max_background) {
302                         fc->blocked = 0;
303                         wake_up_all(&fc->blocked_waitq);
304                 }
305                 if (fc->num_background == fc->congestion_threshold &&
306                     fc->connected && fc->bdi_initialized) {
307                         clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
308                         clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
309                 }
310                 fc->num_background--;
311                 fc->active_background--;
312                 flush_bg_queue(fc);
313         }
314         spin_unlock(&fc->lock);
315         wake_up(&req->waitq);
316         if (end)
317                 end(fc, req);
318         fuse_put_request(fc, req);
319 }
320
321 static void wait_answer_interruptible(struct fuse_conn *fc,
322                                       struct fuse_req *req)
323 __releases(fc->lock)
324 __acquires(fc->lock)
325 {
326         if (signal_pending(current))
327                 return;
328
329         spin_unlock(&fc->lock);
330         wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
331         spin_lock(&fc->lock);
332 }
333
334 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
335 {
336         list_add_tail(&req->intr_entry, &fc->interrupts);
337         wake_up(&fc->waitq);
338         kill_fasync(&fc->fasync, SIGIO, POLL_IN);
339 }
340
341 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
342 __releases(fc->lock)
343 __acquires(fc->lock)
344 {
345         if (!fc->no_interrupt) {
346                 /* Any signal may interrupt this */
347                 wait_answer_interruptible(fc, req);
348
349                 if (req->aborted)
350                         goto aborted;
351                 if (req->state == FUSE_REQ_FINISHED)
352                         return;
353
354                 req->interrupted = 1;
355                 if (req->state == FUSE_REQ_SENT)
356                         queue_interrupt(fc, req);
357         }
358
359         if (!req->force) {
360                 sigset_t oldset;
361
362                 /* Only fatal signals may interrupt this */
363                 block_sigs(&oldset);
364                 wait_answer_interruptible(fc, req);
365                 restore_sigs(&oldset);
366
367                 if (req->aborted)
368                         goto aborted;
369                 if (req->state == FUSE_REQ_FINISHED)
370                         return;
371
372                 /* Request is not yet in userspace, bail out */
373                 if (req->state == FUSE_REQ_PENDING) {
374                         list_del(&req->list);
375                         __fuse_put_request(req);
376                         req->out.h.error = -EINTR;
377                         return;
378                 }
379         }
380
381         /*
382          * Either request is already in userspace, or it was forced.
383          * Wait it out.
384          */
385         spin_unlock(&fc->lock);
386         wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
387         spin_lock(&fc->lock);
388
389         if (!req->aborted)
390                 return;
391
392  aborted:
393         BUG_ON(req->state != FUSE_REQ_FINISHED);
394         if (req->locked) {
395                 /* This is uninterruptible sleep, because data is
396                    being copied to/from the buffers of req.  During
397                    locked state, there mustn't be any filesystem
398                    operation (e.g. page fault), since that could lead
399                    to deadlock */
400                 spin_unlock(&fc->lock);
401                 wait_event(req->waitq, !req->locked);
402                 spin_lock(&fc->lock);
403         }
404 }
405
406 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
407 {
408         req->isreply = 1;
409         spin_lock(&fc->lock);
410         if (!fc->connected)
411                 req->out.h.error = -ENOTCONN;
412         else if (fc->conn_error)
413                 req->out.h.error = -ECONNREFUSED;
414         else {
415                 req->in.h.unique = fuse_get_unique(fc);
416                 queue_request(fc, req);
417                 /* acquire extra reference, since request is still needed
418                    after request_end() */
419                 __fuse_get_request(req);
420
421                 request_wait_answer(fc, req);
422         }
423         spin_unlock(&fc->lock);
424 }
425 EXPORT_SYMBOL_GPL(fuse_request_send);
426
427 static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
428                                             struct fuse_req *req)
429 {
430         req->background = 1;
431         fc->num_background++;
432         if (fc->num_background == fc->max_background)
433                 fc->blocked = 1;
434         if (fc->num_background == fc->congestion_threshold &&
435             fc->bdi_initialized) {
436                 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
437                 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
438         }
439         list_add_tail(&req->list, &fc->bg_queue);
440         flush_bg_queue(fc);
441 }
442
443 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
444 {
445         spin_lock(&fc->lock);
446         if (fc->connected) {
447                 fuse_request_send_nowait_locked(fc, req);
448                 spin_unlock(&fc->lock);
449         } else {
450                 req->out.h.error = -ENOTCONN;
451                 request_end(fc, req);
452         }
453 }
454
455 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
456 {
457         req->isreply = 1;
458         fuse_request_send_nowait(fc, req);
459 }
460 EXPORT_SYMBOL_GPL(fuse_request_send_background);
461
462 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
463                                           struct fuse_req *req, u64 unique)
464 {
465         int err = -ENODEV;
466
467         req->isreply = 0;
468         req->in.h.unique = unique;
469         spin_lock(&fc->lock);
470         if (fc->connected) {
471                 queue_request(fc, req);
472                 err = 0;
473         }
474         spin_unlock(&fc->lock);
475
476         return err;
477 }
478
479 /*
480  * Called under fc->lock
481  *
482  * fc->connected must have been checked previously
483  */
484 void fuse_request_send_background_locked(struct fuse_conn *fc,
485                                          struct fuse_req *req)
486 {
487         req->isreply = 1;
488         fuse_request_send_nowait_locked(fc, req);
489 }
490
491 /*
492  * Lock the request.  Up to the next unlock_request() there mustn't be
493  * anything that could cause a page-fault.  If the request was already
494  * aborted bail out.
495  */
496 static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
497 {
498         int err = 0;
499         if (req) {
500                 spin_lock(&fc->lock);
501                 if (req->aborted)
502                         err = -ENOENT;
503                 else
504                         req->locked = 1;
505                 spin_unlock(&fc->lock);
506         }
507         return err;
508 }
509
510 /*
511  * Unlock request.  If it was aborted during being locked, the
512  * requester thread is currently waiting for it to be unlocked, so
513  * wake it up.
514  */
515 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
516 {
517         if (req) {
518                 spin_lock(&fc->lock);
519                 req->locked = 0;
520                 if (req->aborted)
521                         wake_up(&req->waitq);
522                 spin_unlock(&fc->lock);
523         }
524 }
525
526 struct fuse_copy_state {
527         struct fuse_conn *fc;
528         int write;
529         struct fuse_req *req;
530         const struct iovec *iov;
531         struct pipe_buffer *pipebufs;
532         struct pipe_buffer *currbuf;
533         struct pipe_inode_info *pipe;
534         unsigned long nr_segs;
535         unsigned long seglen;
536         unsigned long addr;
537         struct page *pg;
538         void *mapaddr;
539         void *buf;
540         unsigned len;
541         unsigned move_pages:1;
542 };
543
544 static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
545                            int write,
546                            const struct iovec *iov, unsigned long nr_segs)
547 {
548         memset(cs, 0, sizeof(*cs));
549         cs->fc = fc;
550         cs->write = write;
551         cs->iov = iov;
552         cs->nr_segs = nr_segs;
553 }
554
555 /* Unmap and put previous page of userspace buffer */
556 static void fuse_copy_finish(struct fuse_copy_state *cs)
557 {
558         if (cs->currbuf) {
559                 struct pipe_buffer *buf = cs->currbuf;
560
561                 if (!cs->write) {
562                         buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
563                 } else {
564                         kunmap(buf->page);
565                         buf->len = PAGE_SIZE - cs->len;
566                 }
567                 cs->currbuf = NULL;
568                 cs->mapaddr = NULL;
569         } else if (cs->mapaddr) {
570                 kunmap(cs->pg);
571                 if (cs->write) {
572                         flush_dcache_page(cs->pg);
573                         set_page_dirty_lock(cs->pg);
574                 }
575                 put_page(cs->pg);
576                 cs->mapaddr = NULL;
577         }
578 }
579
580 /*
581  * Get another pagefull of userspace buffer, and map it to kernel
582  * address space, and lock request
583  */
584 static int fuse_copy_fill(struct fuse_copy_state *cs)
585 {
586         unsigned long offset;
587         int err;
588
589         unlock_request(cs->fc, cs->req);
590         fuse_copy_finish(cs);
591         if (cs->pipebufs) {
592                 struct pipe_buffer *buf = cs->pipebufs;
593
594                 if (!cs->write) {
595                         err = buf->ops->confirm(cs->pipe, buf);
596                         if (err)
597                                 return err;
598
599                         BUG_ON(!cs->nr_segs);
600                         cs->currbuf = buf;
601                         cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
602                         cs->len = buf->len;
603                         cs->buf = cs->mapaddr + buf->offset;
604                         cs->pipebufs++;
605                         cs->nr_segs--;
606                 } else {
607                         struct page *page;
608
609                         if (cs->nr_segs == cs->pipe->buffers)
610                                 return -EIO;
611
612                         page = alloc_page(GFP_HIGHUSER);
613                         if (!page)
614                                 return -ENOMEM;
615
616                         buf->page = page;
617                         buf->offset = 0;
618                         buf->len = 0;
619
620                         cs->currbuf = buf;
621                         cs->mapaddr = kmap(page);
622                         cs->buf = cs->mapaddr;
623                         cs->len = PAGE_SIZE;
624                         cs->pipebufs++;
625                         cs->nr_segs++;
626                 }
627         } else {
628                 if (!cs->seglen) {
629                         BUG_ON(!cs->nr_segs);
630                         cs->seglen = cs->iov[0].iov_len;
631                         cs->addr = (unsigned long) cs->iov[0].iov_base;
632                         cs->iov++;
633                         cs->nr_segs--;
634                 }
635                 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
636                 if (err < 0)
637                         return err;
638                 BUG_ON(err != 1);
639                 offset = cs->addr % PAGE_SIZE;
640                 cs->mapaddr = kmap(cs->pg);
641                 cs->buf = cs->mapaddr + offset;
642                 cs->len = min(PAGE_SIZE - offset, cs->seglen);
643                 cs->seglen -= cs->len;
644                 cs->addr += cs->len;
645         }
646
647         return lock_request(cs->fc, cs->req);
648 }
649
650 /* Do as much copy to/from userspace buffer as we can */
651 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
652 {
653         unsigned ncpy = min(*size, cs->len);
654         if (val) {
655                 if (cs->write)
656                         memcpy(cs->buf, *val, ncpy);
657                 else
658                         memcpy(*val, cs->buf, ncpy);
659                 *val += ncpy;
660         }
661         *size -= ncpy;
662         cs->len -= ncpy;
663         cs->buf += ncpy;
664         return ncpy;
665 }
666
667 static int fuse_check_page(struct page *page)
668 {
669         if (page_mapcount(page) ||
670             page->mapping != NULL ||
671             page_count(page) != 1 ||
672             (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
673              ~(1 << PG_locked |
674                1 << PG_referenced |
675                1 << PG_uptodate |
676                1 << PG_lru |
677                1 << PG_active |
678                1 << PG_reclaim))) {
679                 printk(KERN_WARNING "fuse: trying to steal weird page\n");
680                 printk(KERN_WARNING "  page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
681                 return 1;
682         }
683         return 0;
684 }
685
686 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
687 {
688         int err;
689         struct page *oldpage = *pagep;
690         struct page *newpage;
691         struct pipe_buffer *buf = cs->pipebufs;
692         struct address_space *mapping;
693         pgoff_t index;
694
695         unlock_request(cs->fc, cs->req);
696         fuse_copy_finish(cs);
697
698         err = buf->ops->confirm(cs->pipe, buf);
699         if (err)
700                 return err;
701
702         BUG_ON(!cs->nr_segs);
703         cs->currbuf = buf;
704         cs->len = buf->len;
705         cs->pipebufs++;
706         cs->nr_segs--;
707
708         if (cs->len != PAGE_SIZE)
709                 goto out_fallback;
710
711         if (buf->ops->steal(cs->pipe, buf) != 0)
712                 goto out_fallback;
713
714         newpage = buf->page;
715
716         if (WARN_ON(!PageUptodate(newpage)))
717                 return -EIO;
718
719         ClearPageMappedToDisk(newpage);
720
721         if (fuse_check_page(newpage) != 0)
722                 goto out_fallback_unlock;
723
724         mapping = oldpage->mapping;
725         index = oldpage->index;
726
727         /*
728          * This is a new and locked page, it shouldn't be mapped or
729          * have any special flags on it
730          */
731         if (WARN_ON(page_mapped(oldpage)))
732                 goto out_fallback_unlock;
733         if (WARN_ON(page_has_private(oldpage)))
734                 goto out_fallback_unlock;
735         if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
736                 goto out_fallback_unlock;
737         if (WARN_ON(PageMlocked(oldpage)))
738                 goto out_fallback_unlock;
739
740         err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
741         if (err) {
742                 unlock_page(newpage);
743                 return err;
744         }
745
746         page_cache_get(newpage);
747
748         if (!(buf->flags & PIPE_BUF_FLAG_LRU))
749                 lru_cache_add_file(newpage);
750
751         err = 0;
752         spin_lock(&cs->fc->lock);
753         if (cs->req->aborted)
754                 err = -ENOENT;
755         else
756                 *pagep = newpage;
757         spin_unlock(&cs->fc->lock);
758
759         if (err) {
760                 unlock_page(newpage);
761                 page_cache_release(newpage);
762                 return err;
763         }
764
765         unlock_page(oldpage);
766         page_cache_release(oldpage);
767         cs->len = 0;
768
769         return 0;
770
771 out_fallback_unlock:
772         unlock_page(newpage);
773 out_fallback:
774         cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
775         cs->buf = cs->mapaddr + buf->offset;
776
777         err = lock_request(cs->fc, cs->req);
778         if (err)
779                 return err;
780
781         return 1;
782 }
783
784 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
785                          unsigned offset, unsigned count)
786 {
787         struct pipe_buffer *buf;
788
789         if (cs->nr_segs == cs->pipe->buffers)
790                 return -EIO;
791
792         unlock_request(cs->fc, cs->req);
793         fuse_copy_finish(cs);
794
795         buf = cs->pipebufs;
796         page_cache_get(page);
797         buf->page = page;
798         buf->offset = offset;
799         buf->len = count;
800
801         cs->pipebufs++;
802         cs->nr_segs++;
803         cs->len = 0;
804
805         return 0;
806 }
807
808 /*
809  * Copy a page in the request to/from the userspace buffer.  Must be
810  * done atomically
811  */
812 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
813                           unsigned offset, unsigned count, int zeroing)
814 {
815         int err;
816         struct page *page = *pagep;
817
818         if (page && zeroing && count < PAGE_SIZE)
819                 clear_highpage(page);
820
821         while (count) {
822                 if (cs->write && cs->pipebufs && page) {
823                         return fuse_ref_page(cs, page, offset, count);
824                 } else if (!cs->len) {
825                         if (cs->move_pages && page &&
826                             offset == 0 && count == PAGE_SIZE) {
827                                 err = fuse_try_move_page(cs, pagep);
828                                 if (err <= 0)
829                                         return err;
830                         } else {
831                                 err = fuse_copy_fill(cs);
832                                 if (err)
833                                         return err;
834                         }
835                 }
836                 if (page) {
837                         void *mapaddr = kmap_atomic(page, KM_USER0);
838                         void *buf = mapaddr + offset;
839                         offset += fuse_copy_do(cs, &buf, &count);
840                         kunmap_atomic(mapaddr, KM_USER0);
841                 } else
842                         offset += fuse_copy_do(cs, NULL, &count);
843         }
844         if (page && !cs->write)
845                 flush_dcache_page(page);
846         return 0;
847 }
848
849 /* Copy pages in the request to/from userspace buffer */
850 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
851                            int zeroing)
852 {
853         unsigned i;
854         struct fuse_req *req = cs->req;
855         unsigned offset = req->page_offset;
856         unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
857
858         for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
859                 int err;
860
861                 err = fuse_copy_page(cs, &req->pages[i], offset, count,
862                                      zeroing);
863                 if (err)
864                         return err;
865
866                 nbytes -= count;
867                 count = min(nbytes, (unsigned) PAGE_SIZE);
868                 offset = 0;
869         }
870         return 0;
871 }
872
873 /* Copy a single argument in the request to/from userspace buffer */
874 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
875 {
876         while (size) {
877                 if (!cs->len) {
878                         int err = fuse_copy_fill(cs);
879                         if (err)
880                                 return err;
881                 }
882                 fuse_copy_do(cs, &val, &size);
883         }
884         return 0;
885 }
886
887 /* Copy request arguments to/from userspace buffer */
888 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
889                           unsigned argpages, struct fuse_arg *args,
890                           int zeroing)
891 {
892         int err = 0;
893         unsigned i;
894
895         for (i = 0; !err && i < numargs; i++)  {
896                 struct fuse_arg *arg = &args[i];
897                 if (i == numargs - 1 && argpages)
898                         err = fuse_copy_pages(cs, arg->size, zeroing);
899                 else
900                         err = fuse_copy_one(cs, arg->value, arg->size);
901         }
902         return err;
903 }
904
905 static int forget_pending(struct fuse_conn *fc)
906 {
907         return fc->forget_list_head.next != NULL;
908 }
909
910 static int request_pending(struct fuse_conn *fc)
911 {
912         return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
913                 forget_pending(fc);
914 }
915
916 /* Wait until a request is available on the pending list */
917 static void request_wait(struct fuse_conn *fc)
918 __releases(fc->lock)
919 __acquires(fc->lock)
920 {
921         DECLARE_WAITQUEUE(wait, current);
922
923         add_wait_queue_exclusive(&fc->waitq, &wait);
924         while (fc->connected && !request_pending(fc)) {
925                 set_current_state(TASK_INTERRUPTIBLE);
926                 if (signal_pending(current))
927                         break;
928
929                 spin_unlock(&fc->lock);
930                 schedule();
931                 spin_lock(&fc->lock);
932         }
933         set_current_state(TASK_RUNNING);
934         remove_wait_queue(&fc->waitq, &wait);
935 }
936
937 /*
938  * Transfer an interrupt request to userspace
939  *
940  * Unlike other requests this is assembled on demand, without a need
941  * to allocate a separate fuse_req structure.
942  *
943  * Called with fc->lock held, releases it
944  */
945 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
946                                size_t nbytes, struct fuse_req *req)
947 __releases(fc->lock)
948 {
949         struct fuse_in_header ih;
950         struct fuse_interrupt_in arg;
951         unsigned reqsize = sizeof(ih) + sizeof(arg);
952         int err;
953
954         list_del_init(&req->intr_entry);
955         req->intr_unique = fuse_get_unique(fc);
956         memset(&ih, 0, sizeof(ih));
957         memset(&arg, 0, sizeof(arg));
958         ih.len = reqsize;
959         ih.opcode = FUSE_INTERRUPT;
960         ih.unique = req->intr_unique;
961         arg.unique = req->in.h.unique;
962
963         spin_unlock(&fc->lock);
964         if (nbytes < reqsize)
965                 return -EINVAL;
966
967         err = fuse_copy_one(cs, &ih, sizeof(ih));
968         if (!err)
969                 err = fuse_copy_one(cs, &arg, sizeof(arg));
970         fuse_copy_finish(cs);
971
972         return err ? err : reqsize;
973 }
974
975 static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
976                                                unsigned max,
977                                                unsigned *countp)
978 {
979         struct fuse_forget_link *head = fc->forget_list_head.next;
980         struct fuse_forget_link **newhead = &head;
981         unsigned count;
982
983         for (count = 0; *newhead != NULL && count < max; count++)
984                 newhead = &(*newhead)->next;
985
986         fc->forget_list_head.next = *newhead;
987         *newhead = NULL;
988         if (fc->forget_list_head.next == NULL)
989                 fc->forget_list_tail = &fc->forget_list_head;
990
991         if (countp != NULL)
992                 *countp = count;
993
994         return head;
995 }
996
997 static int fuse_read_single_forget(struct fuse_conn *fc,
998                                    struct fuse_copy_state *cs,
999                                    size_t nbytes)
1000 __releases(fc->lock)
1001 {
1002         int err;
1003         struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
1004         struct fuse_forget_in arg = {
1005                 .nlookup = forget->forget_one.nlookup,
1006         };
1007         struct fuse_in_header ih = {
1008                 .opcode = FUSE_FORGET,
1009                 .nodeid = forget->forget_one.nodeid,
1010                 .unique = fuse_get_unique(fc),
1011                 .len = sizeof(ih) + sizeof(arg),
1012         };
1013
1014         spin_unlock(&fc->lock);
1015         kfree(forget);
1016         if (nbytes < ih.len)
1017                 return -EINVAL;
1018
1019         err = fuse_copy_one(cs, &ih, sizeof(ih));
1020         if (!err)
1021                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1022         fuse_copy_finish(cs);
1023
1024         if (err)
1025                 return err;
1026
1027         return ih.len;
1028 }
1029
1030 static int fuse_read_batch_forget(struct fuse_conn *fc,
1031                                    struct fuse_copy_state *cs, size_t nbytes)
1032 __releases(fc->lock)
1033 {
1034         int err;
1035         unsigned max_forgets;
1036         unsigned count;
1037         struct fuse_forget_link *head;
1038         struct fuse_batch_forget_in arg = { .count = 0 };
1039         struct fuse_in_header ih = {
1040                 .opcode = FUSE_BATCH_FORGET,
1041                 .unique = fuse_get_unique(fc),
1042                 .len = sizeof(ih) + sizeof(arg),
1043         };
1044
1045         if (nbytes < ih.len) {
1046                 spin_unlock(&fc->lock);
1047                 return -EINVAL;
1048         }
1049
1050         max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1051         head = dequeue_forget(fc, max_forgets, &count);
1052         spin_unlock(&fc->lock);
1053
1054         arg.count = count;
1055         ih.len += count * sizeof(struct fuse_forget_one);
1056         err = fuse_copy_one(cs, &ih, sizeof(ih));
1057         if (!err)
1058                 err = fuse_copy_one(cs, &arg, sizeof(arg));
1059
1060         while (head) {
1061                 struct fuse_forget_link *forget = head;
1062
1063                 if (!err) {
1064                         err = fuse_copy_one(cs, &forget->forget_one,
1065                                             sizeof(forget->forget_one));
1066                 }
1067                 head = forget->next;
1068                 kfree(forget);
1069         }
1070
1071         fuse_copy_finish(cs);
1072
1073         if (err)
1074                 return err;
1075
1076         return ih.len;
1077 }
1078
1079 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1080                             size_t nbytes)
1081 __releases(fc->lock)
1082 {
1083         if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1084                 return fuse_read_single_forget(fc, cs, nbytes);
1085         else
1086                 return fuse_read_batch_forget(fc, cs, nbytes);
1087 }
1088
1089 /*
1090  * Read a single request into the userspace filesystem's buffer.  This
1091  * function waits until a request is available, then removes it from
1092  * the pending list and copies request data to userspace buffer.  If
1093  * no reply is needed (FORGET) or request has been aborted or there
1094  * was an error during the copying then it's finished by calling
1095  * request_end().  Otherwise add it to the processing list, and set
1096  * the 'sent' flag.
1097  */
1098 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1099                                 struct fuse_copy_state *cs, size_t nbytes)
1100 {
1101         int err;
1102         struct fuse_req *req;
1103         struct fuse_in *in;
1104         unsigned reqsize;
1105
1106  restart:
1107         spin_lock(&fc->lock);
1108         err = -EAGAIN;
1109         if ((file->f_flags & O_NONBLOCK) && fc->connected &&
1110             !request_pending(fc))
1111                 goto err_unlock;
1112
1113         request_wait(fc);
1114         err = -ENODEV;
1115         if (!fc->connected)
1116                 goto err_unlock;
1117         err = -ERESTARTSYS;
1118         if (!request_pending(fc))
1119                 goto err_unlock;
1120
1121         if (!list_empty(&fc->interrupts)) {
1122                 req = list_entry(fc->interrupts.next, struct fuse_req,
1123                                  intr_entry);
1124                 return fuse_read_interrupt(fc, cs, nbytes, req);
1125         }
1126
1127         if (forget_pending(fc)) {
1128                 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
1129                         return fuse_read_forget(fc, cs, nbytes);
1130
1131                 if (fc->forget_batch <= -8)
1132                         fc->forget_batch = 16;
1133         }
1134
1135         req = list_entry(fc->pending.next, struct fuse_req, list);
1136         req->state = FUSE_REQ_READING;
1137         list_move(&req->list, &fc->io);
1138
1139         in = &req->in;
1140         reqsize = in->h.len;
1141         /* If request is too large, reply with an error and restart the read */
1142         if (nbytes < reqsize) {
1143                 req->out.h.error = -EIO;
1144                 /* SETXATTR is special, since it may contain too large data */
1145                 if (in->h.opcode == FUSE_SETXATTR)
1146                         req->out.h.error = -E2BIG;
1147                 request_end(fc, req);
1148                 goto restart;
1149         }
1150         spin_unlock(&fc->lock);
1151         cs->req = req;
1152         err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1153         if (!err)
1154                 err = fuse_copy_args(cs, in->numargs, in->argpages,
1155                                      (struct fuse_arg *) in->args, 0);
1156         fuse_copy_finish(cs);
1157         spin_lock(&fc->lock);
1158         req->locked = 0;
1159         if (req->aborted) {
1160                 request_end(fc, req);
1161                 return -ENODEV;
1162         }
1163         if (err) {
1164                 req->out.h.error = -EIO;
1165                 request_end(fc, req);
1166                 return err;
1167         }
1168         if (!req->isreply)
1169                 request_end(fc, req);
1170         else {
1171                 req->state = FUSE_REQ_SENT;
1172                 list_move_tail(&req->list, &fc->processing);
1173                 if (req->interrupted)
1174                         queue_interrupt(fc, req);
1175                 spin_unlock(&fc->lock);
1176         }
1177         return reqsize;
1178
1179  err_unlock:
1180         spin_unlock(&fc->lock);
1181         return err;
1182 }
1183
1184 static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1185                               unsigned long nr_segs, loff_t pos)
1186 {
1187         struct fuse_copy_state cs;
1188         struct file *file = iocb->ki_filp;
1189         struct fuse_conn *fc = fuse_get_conn(file);
1190         if (!fc)
1191                 return -EPERM;
1192
1193         fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1194
1195         return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1196 }
1197
1198 static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1199                                    struct pipe_buffer *buf)
1200 {
1201         return 1;
1202 }
1203
1204 static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1205         .can_merge = 0,
1206         .map = generic_pipe_buf_map,
1207         .unmap = generic_pipe_buf_unmap,
1208         .confirm = generic_pipe_buf_confirm,
1209         .release = generic_pipe_buf_release,
1210         .steal = fuse_dev_pipe_buf_steal,
1211         .get = generic_pipe_buf_get,
1212 };
1213
1214 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1215                                     struct pipe_inode_info *pipe,
1216                                     size_t len, unsigned int flags)
1217 {
1218         int ret;
1219         int page_nr = 0;
1220         int do_wakeup = 0;
1221         struct pipe_buffer *bufs;
1222         struct fuse_copy_state cs;
1223         struct fuse_conn *fc = fuse_get_conn(in);
1224         if (!fc)
1225                 return -EPERM;
1226
1227         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1228         if (!bufs)
1229                 return -ENOMEM;
1230
1231         fuse_copy_init(&cs, fc, 1, NULL, 0);
1232         cs.pipebufs = bufs;
1233         cs.pipe = pipe;
1234         ret = fuse_dev_do_read(fc, in, &cs, len);
1235         if (ret < 0)
1236                 goto out;
1237
1238         ret = 0;
1239         pipe_lock(pipe);
1240
1241         if (!pipe->readers) {
1242                 send_sig(SIGPIPE, current, 0);
1243                 if (!ret)
1244                         ret = -EPIPE;
1245                 goto out_unlock;
1246         }
1247
1248         if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1249                 ret = -EIO;
1250                 goto out_unlock;
1251         }
1252
1253         while (page_nr < cs.nr_segs) {
1254                 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1255                 struct pipe_buffer *buf = pipe->bufs + newbuf;
1256
1257                 buf->page = bufs[page_nr].page;
1258                 buf->offset = bufs[page_nr].offset;
1259                 buf->len = bufs[page_nr].len;
1260                 buf->ops = &fuse_dev_pipe_buf_ops;
1261
1262                 pipe->nrbufs++;
1263                 page_nr++;
1264                 ret += buf->len;
1265
1266                 if (pipe->inode)
1267                         do_wakeup = 1;
1268         }
1269
1270 out_unlock:
1271         pipe_unlock(pipe);
1272
1273         if (do_wakeup) {
1274                 smp_mb();
1275                 if (waitqueue_active(&pipe->wait))
1276                         wake_up_interruptible(&pipe->wait);
1277                 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1278         }
1279
1280 out:
1281         for (; page_nr < cs.nr_segs; page_nr++)
1282                 page_cache_release(bufs[page_nr].page);
1283
1284         kfree(bufs);
1285         return ret;
1286 }
1287
1288 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1289                             struct fuse_copy_state *cs)
1290 {
1291         struct fuse_notify_poll_wakeup_out outarg;
1292         int err = -EINVAL;
1293
1294         if (size != sizeof(outarg))
1295                 goto err;
1296
1297         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1298         if (err)
1299                 goto err;
1300
1301         fuse_copy_finish(cs);
1302         return fuse_notify_poll_wakeup(fc, &outarg);
1303
1304 err:
1305         fuse_copy_finish(cs);
1306         return err;
1307 }
1308
1309 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1310                                    struct fuse_copy_state *cs)
1311 {
1312         struct fuse_notify_inval_inode_out outarg;
1313         int err = -EINVAL;
1314
1315         if (size != sizeof(outarg))
1316                 goto err;
1317
1318         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1319         if (err)
1320                 goto err;
1321         fuse_copy_finish(cs);
1322
1323         down_read(&fc->killsb);
1324         err = -ENOENT;
1325         if (fc->sb) {
1326                 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1327                                                outarg.off, outarg.len);
1328         }
1329         up_read(&fc->killsb);
1330         return err;
1331
1332 err:
1333         fuse_copy_finish(cs);
1334         return err;
1335 }
1336
1337 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1338                                    struct fuse_copy_state *cs)
1339 {
1340         struct fuse_notify_inval_entry_out outarg;
1341         int err = -ENOMEM;
1342         char *buf;
1343         struct qstr name;
1344
1345         buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1346         if (!buf)
1347                 goto err;
1348
1349         err = -EINVAL;
1350         if (size < sizeof(outarg))
1351                 goto err;
1352
1353         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1354         if (err)
1355                 goto err;
1356
1357         err = -ENAMETOOLONG;
1358         if (outarg.namelen > FUSE_NAME_MAX)
1359                 goto err;
1360
1361         name.name = buf;
1362         name.len = outarg.namelen;
1363         err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1364         if (err)
1365                 goto err;
1366         fuse_copy_finish(cs);
1367         buf[outarg.namelen] = 0;
1368         name.hash = full_name_hash(name.name, name.len);
1369
1370         down_read(&fc->killsb);
1371         err = -ENOENT;
1372         if (fc->sb)
1373                 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
1374         up_read(&fc->killsb);
1375         kfree(buf);
1376         return err;
1377
1378 err:
1379         kfree(buf);
1380         fuse_copy_finish(cs);
1381         return err;
1382 }
1383
1384 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1385                              struct fuse_copy_state *cs)
1386 {
1387         struct fuse_notify_store_out outarg;
1388         struct inode *inode;
1389         struct address_space *mapping;
1390         u64 nodeid;
1391         int err;
1392         pgoff_t index;
1393         unsigned int offset;
1394         unsigned int num;
1395         loff_t file_size;
1396         loff_t end;
1397
1398         err = -EINVAL;
1399         if (size < sizeof(outarg))
1400                 goto out_finish;
1401
1402         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1403         if (err)
1404                 goto out_finish;
1405
1406         err = -EINVAL;
1407         if (size - sizeof(outarg) != outarg.size)
1408                 goto out_finish;
1409
1410         nodeid = outarg.nodeid;
1411
1412         down_read(&fc->killsb);
1413
1414         err = -ENOENT;
1415         if (!fc->sb)
1416                 goto out_up_killsb;
1417
1418         inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1419         if (!inode)
1420                 goto out_up_killsb;
1421
1422         mapping = inode->i_mapping;
1423         index = outarg.offset >> PAGE_CACHE_SHIFT;
1424         offset = outarg.offset & ~PAGE_CACHE_MASK;
1425         file_size = i_size_read(inode);
1426         end = outarg.offset + outarg.size;
1427         if (end > file_size) {
1428                 file_size = end;
1429                 fuse_write_update_size(inode, file_size);
1430         }
1431
1432         num = outarg.size;
1433         while (num) {
1434                 struct page *page;
1435                 unsigned int this_num;
1436
1437                 err = -ENOMEM;
1438                 page = find_or_create_page(mapping, index,
1439                                            mapping_gfp_mask(mapping));
1440                 if (!page)
1441                         goto out_iput;
1442
1443                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1444                 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1445                 if (!err && offset == 0 && (num != 0 || file_size == end))
1446                         SetPageUptodate(page);
1447                 unlock_page(page);
1448                 page_cache_release(page);
1449
1450                 if (err)
1451                         goto out_iput;
1452
1453                 num -= this_num;
1454                 offset = 0;
1455                 index++;
1456         }
1457
1458         err = 0;
1459
1460 out_iput:
1461         iput(inode);
1462 out_up_killsb:
1463         up_read(&fc->killsb);
1464 out_finish:
1465         fuse_copy_finish(cs);
1466         return err;
1467 }
1468
1469 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1470 {
1471         release_pages(req->pages, req->num_pages, 0);
1472 }
1473
1474 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1475                          struct fuse_notify_retrieve_out *outarg)
1476 {
1477         int err;
1478         struct address_space *mapping = inode->i_mapping;
1479         struct fuse_req *req;
1480         pgoff_t index;
1481         loff_t file_size;
1482         unsigned int num;
1483         unsigned int offset;
1484         size_t total_len = 0;
1485
1486         req = fuse_get_req(fc);
1487         if (IS_ERR(req))
1488                 return PTR_ERR(req);
1489
1490         offset = outarg->offset & ~PAGE_CACHE_MASK;
1491
1492         req->in.h.opcode = FUSE_NOTIFY_REPLY;
1493         req->in.h.nodeid = outarg->nodeid;
1494         req->in.numargs = 2;
1495         req->in.argpages = 1;
1496         req->page_offset = offset;
1497         req->end = fuse_retrieve_end;
1498
1499         index = outarg->offset >> PAGE_CACHE_SHIFT;
1500         file_size = i_size_read(inode);
1501         num = outarg->size;
1502         if (outarg->offset > file_size)
1503                 num = 0;
1504         else if (outarg->offset + num > file_size)
1505                 num = file_size - outarg->offset;
1506
1507         while (num) {
1508                 struct page *page;
1509                 unsigned int this_num;
1510
1511                 page = find_get_page(mapping, index);
1512                 if (!page)
1513                         break;
1514
1515                 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1516                 req->pages[req->num_pages] = page;
1517                 req->num_pages++;
1518
1519                 num -= this_num;
1520                 total_len += this_num;
1521         }
1522         req->misc.retrieve_in.offset = outarg->offset;
1523         req->misc.retrieve_in.size = total_len;
1524         req->in.args[0].size = sizeof(req->misc.retrieve_in);
1525         req->in.args[0].value = &req->misc.retrieve_in;
1526         req->in.args[1].size = total_len;
1527
1528         err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1529         if (err)
1530                 fuse_retrieve_end(fc, req);
1531
1532         return err;
1533 }
1534
1535 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1536                                 struct fuse_copy_state *cs)
1537 {
1538         struct fuse_notify_retrieve_out outarg;
1539         struct inode *inode;
1540         int err;
1541
1542         err = -EINVAL;
1543         if (size != sizeof(outarg))
1544                 goto copy_finish;
1545
1546         err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1547         if (err)
1548                 goto copy_finish;
1549
1550         fuse_copy_finish(cs);
1551
1552         down_read(&fc->killsb);
1553         err = -ENOENT;
1554         if (fc->sb) {
1555                 u64 nodeid = outarg.nodeid;
1556
1557                 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1558                 if (inode) {
1559                         err = fuse_retrieve(fc, inode, &outarg);
1560                         iput(inode);
1561                 }
1562         }
1563         up_read(&fc->killsb);
1564
1565         return err;
1566
1567 copy_finish:
1568         fuse_copy_finish(cs);
1569         return err;
1570 }
1571
1572 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1573                        unsigned int size, struct fuse_copy_state *cs)
1574 {
1575         switch (code) {
1576         case FUSE_NOTIFY_POLL:
1577                 return fuse_notify_poll(fc, size, cs);
1578
1579         case FUSE_NOTIFY_INVAL_INODE:
1580                 return fuse_notify_inval_inode(fc, size, cs);
1581
1582         case FUSE_NOTIFY_INVAL_ENTRY:
1583                 return fuse_notify_inval_entry(fc, size, cs);
1584
1585         case FUSE_NOTIFY_STORE:
1586                 return fuse_notify_store(fc, size, cs);
1587
1588         case FUSE_NOTIFY_RETRIEVE:
1589                 return fuse_notify_retrieve(fc, size, cs);
1590
1591         default:
1592                 fuse_copy_finish(cs);
1593                 return -EINVAL;
1594         }
1595 }
1596
1597 /* Look up request on processing list by unique ID */
1598 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1599 {
1600         struct list_head *entry;
1601
1602         list_for_each(entry, &fc->processing) {
1603                 struct fuse_req *req;
1604                 req = list_entry(entry, struct fuse_req, list);
1605                 if (req->in.h.unique == unique || req->intr_unique == unique)
1606                         return req;
1607         }
1608         return NULL;
1609 }
1610
1611 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1612                          unsigned nbytes)
1613 {
1614         unsigned reqsize = sizeof(struct fuse_out_header);
1615
1616         if (out->h.error)
1617                 return nbytes != reqsize ? -EINVAL : 0;
1618
1619         reqsize += len_args(out->numargs, out->args);
1620
1621         if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1622                 return -EINVAL;
1623         else if (reqsize > nbytes) {
1624                 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1625                 unsigned diffsize = reqsize - nbytes;
1626                 if (diffsize > lastarg->size)
1627                         return -EINVAL;
1628                 lastarg->size -= diffsize;
1629         }
1630         return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1631                               out->page_zeroing);
1632 }
1633
1634 /*
1635  * Write a single reply to a request.  First the header is copied from
1636  * the write buffer.  The request is then searched on the processing
1637  * list by the unique ID found in the header.  If found, then remove
1638  * it from the list and copy the rest of the buffer to the request.
1639  * The request is finished by calling request_end()
1640  */
1641 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1642                                  struct fuse_copy_state *cs, size_t nbytes)
1643 {
1644         int err;
1645         struct fuse_req *req;
1646         struct fuse_out_header oh;
1647
1648         if (nbytes < sizeof(struct fuse_out_header))
1649                 return -EINVAL;
1650
1651         err = fuse_copy_one(cs, &oh, sizeof(oh));
1652         if (err)
1653                 goto err_finish;
1654
1655         err = -EINVAL;
1656         if (oh.len != nbytes)
1657                 goto err_finish;
1658
1659         /*
1660          * Zero oh.unique indicates unsolicited notification message
1661          * and error contains notification code.
1662          */
1663         if (!oh.unique) {
1664                 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1665                 return err ? err : nbytes;
1666         }
1667
1668         err = -EINVAL;
1669         if (oh.error <= -1000 || oh.error > 0)
1670                 goto err_finish;
1671
1672         spin_lock(&fc->lock);
1673         err = -ENOENT;
1674         if (!fc->connected)
1675                 goto err_unlock;
1676
1677         req = request_find(fc, oh.unique);
1678         if (!req)
1679                 goto err_unlock;
1680
1681         if (req->aborted) {
1682                 spin_unlock(&fc->lock);
1683                 fuse_copy_finish(cs);
1684                 spin_lock(&fc->lock);
1685                 request_end(fc, req);
1686                 return -ENOENT;
1687         }
1688         /* Is it an interrupt reply? */
1689         if (req->intr_unique == oh.unique) {
1690                 err = -EINVAL;
1691                 if (nbytes != sizeof(struct fuse_out_header))
1692                         goto err_unlock;
1693
1694                 if (oh.error == -ENOSYS)
1695                         fc->no_interrupt = 1;
1696                 else if (oh.error == -EAGAIN)
1697                         queue_interrupt(fc, req);
1698
1699                 spin_unlock(&fc->lock);
1700                 fuse_copy_finish(cs);
1701                 return nbytes;
1702         }
1703
1704         req->state = FUSE_REQ_WRITING;
1705         list_move(&req->list, &fc->io);
1706         req->out.h = oh;
1707         req->locked = 1;
1708         cs->req = req;
1709         if (!req->out.page_replace)
1710                 cs->move_pages = 0;
1711         spin_unlock(&fc->lock);
1712
1713         err = copy_out_args(cs, &req->out, nbytes);
1714         fuse_copy_finish(cs);
1715
1716         spin_lock(&fc->lock);
1717         req->locked = 0;
1718         if (!err) {
1719                 if (req->aborted)
1720                         err = -ENOENT;
1721         } else if (!req->aborted)
1722                 req->out.h.error = -EIO;
1723         request_end(fc, req);
1724
1725         return err ? err : nbytes;
1726
1727  err_unlock:
1728         spin_unlock(&fc->lock);
1729  err_finish:
1730         fuse_copy_finish(cs);
1731         return err;
1732 }
1733
1734 static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1735                               unsigned long nr_segs, loff_t pos)
1736 {
1737         struct fuse_copy_state cs;
1738         struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1739         if (!fc)
1740                 return -EPERM;
1741
1742         fuse_copy_init(&cs, fc, 0, iov, nr_segs);
1743
1744         return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1745 }
1746
1747 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1748                                      struct file *out, loff_t *ppos,
1749                                      size_t len, unsigned int flags)
1750 {
1751         unsigned nbuf;
1752         unsigned idx;
1753         struct pipe_buffer *bufs;
1754         struct fuse_copy_state cs;
1755         struct fuse_conn *fc;
1756         size_t rem;
1757         ssize_t ret;
1758
1759         fc = fuse_get_conn(out);
1760         if (!fc)
1761                 return -EPERM;
1762
1763         bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1764         if (!bufs)
1765                 return -ENOMEM;
1766
1767         pipe_lock(pipe);
1768         nbuf = 0;
1769         rem = 0;
1770         for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1771                 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1772
1773         ret = -EINVAL;
1774         if (rem < len) {
1775                 pipe_unlock(pipe);
1776                 goto out;
1777         }
1778
1779         rem = len;
1780         while (rem) {
1781                 struct pipe_buffer *ibuf;
1782                 struct pipe_buffer *obuf;
1783
1784                 BUG_ON(nbuf >= pipe->buffers);
1785                 BUG_ON(!pipe->nrbufs);
1786                 ibuf = &pipe->bufs[pipe->curbuf];
1787                 obuf = &bufs[nbuf];
1788
1789                 if (rem >= ibuf->len) {
1790                         *obuf = *ibuf;
1791                         ibuf->ops = NULL;
1792                         pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1793                         pipe->nrbufs--;
1794                 } else {
1795                         ibuf->ops->get(pipe, ibuf);
1796                         *obuf = *ibuf;
1797                         obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1798                         obuf->len = rem;
1799                         ibuf->offset += obuf->len;
1800                         ibuf->len -= obuf->len;
1801                 }
1802                 nbuf++;
1803                 rem -= obuf->len;
1804         }
1805         pipe_unlock(pipe);
1806
1807         fuse_copy_init(&cs, fc, 0, NULL, nbuf);
1808         cs.pipebufs = bufs;
1809         cs.pipe = pipe;
1810
1811         if (flags & SPLICE_F_MOVE)
1812                 cs.move_pages = 1;
1813
1814         ret = fuse_dev_do_write(fc, &cs, len);
1815
1816         for (idx = 0; idx < nbuf; idx++) {
1817                 struct pipe_buffer *buf = &bufs[idx];
1818                 buf->ops->release(pipe, buf);
1819         }
1820 out:
1821         kfree(bufs);
1822         return ret;
1823 }
1824
1825 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1826 {
1827         unsigned mask = POLLOUT | POLLWRNORM;
1828         struct fuse_conn *fc = fuse_get_conn(file);
1829         if (!fc)
1830                 return POLLERR;
1831
1832         poll_wait(file, &fc->waitq, wait);
1833
1834         spin_lock(&fc->lock);
1835         if (!fc->connected)
1836                 mask = POLLERR;
1837         else if (request_pending(fc))
1838                 mask |= POLLIN | POLLRDNORM;
1839         spin_unlock(&fc->lock);
1840
1841         return mask;
1842 }
1843
1844 /*
1845  * Abort all requests on the given list (pending or processing)
1846  *
1847  * This function releases and reacquires fc->lock
1848  */
1849 static void end_requests(struct fuse_conn *fc, struct list_head *head)
1850 __releases(fc->lock)
1851 __acquires(fc->lock)
1852 {
1853         while (!list_empty(head)) {
1854                 struct fuse_req *req;
1855                 req = list_entry(head->next, struct fuse_req, list);
1856                 req->out.h.error = -ECONNABORTED;
1857                 request_end(fc, req);
1858                 spin_lock(&fc->lock);
1859         }
1860 }
1861
1862 /*
1863  * Abort requests under I/O
1864  *
1865  * The requests are set to aborted and finished, and the request
1866  * waiter is woken up.  This will make request_wait_answer() wait
1867  * until the request is unlocked and then return.
1868  *
1869  * If the request is asynchronous, then the end function needs to be
1870  * called after waiting for the request to be unlocked (if it was
1871  * locked).
1872  */
1873 static void end_io_requests(struct fuse_conn *fc)
1874 __releases(fc->lock)
1875 __acquires(fc->lock)
1876 {
1877         while (!list_empty(&fc->io)) {
1878                 struct fuse_req *req =
1879                         list_entry(fc->io.next, struct fuse_req, list);
1880                 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1881
1882                 req->aborted = 1;
1883                 req->out.h.error = -ECONNABORTED;
1884                 req->state = FUSE_REQ_FINISHED;
1885                 list_del_init(&req->list);
1886                 wake_up(&req->waitq);
1887                 if (end) {
1888                         req->end = NULL;
1889                         __fuse_get_request(req);
1890                         spin_unlock(&fc->lock);
1891                         wait_event(req->waitq, !req->locked);
1892                         end(fc, req);
1893                         fuse_put_request(fc, req);
1894                         spin_lock(&fc->lock);
1895                 }
1896         }
1897 }
1898
1899 static void end_queued_requests(struct fuse_conn *fc)
1900 __releases(fc->lock)
1901 __acquires(fc->lock)
1902 {
1903         fc->max_background = UINT_MAX;
1904         flush_bg_queue(fc);
1905         end_requests(fc, &fc->pending);
1906         end_requests(fc, &fc->processing);
1907         while (forget_pending(fc))
1908                 kfree(dequeue_forget(fc, 1, NULL));
1909 }
1910
1911 static void end_polls(struct fuse_conn *fc)
1912 {
1913         struct rb_node *p;
1914
1915         p = rb_first(&fc->polled_files);
1916
1917         while (p) {
1918                 struct fuse_file *ff;
1919                 ff = rb_entry(p, struct fuse_file, polled_node);
1920                 wake_up_interruptible_all(&ff->poll_wait);
1921
1922                 p = rb_next(p);
1923         }
1924 }
1925
1926 /*
1927  * Abort all requests.
1928  *
1929  * Emergency exit in case of a malicious or accidental deadlock, or
1930  * just a hung filesystem.
1931  *
1932  * The same effect is usually achievable through killing the
1933  * filesystem daemon and all users of the filesystem.  The exception
1934  * is the combination of an asynchronous request and the tricky
1935  * deadlock (see Documentation/filesystems/fuse.txt).
1936  *
1937  * During the aborting, progression of requests from the pending and
1938  * processing lists onto the io list, and progression of new requests
1939  * onto the pending list is prevented by req->connected being false.
1940  *
1941  * Progression of requests under I/O to the processing list is
1942  * prevented by the req->aborted flag being true for these requests.
1943  * For this reason requests on the io list must be aborted first.
1944  */
1945 void fuse_abort_conn(struct fuse_conn *fc)
1946 {
1947         spin_lock(&fc->lock);
1948         if (fc->connected) {
1949                 fc->connected = 0;
1950                 fc->blocked = 0;
1951                 end_io_requests(fc);
1952                 end_queued_requests(fc);
1953                 end_polls(fc);
1954                 wake_up_all(&fc->waitq);
1955                 wake_up_all(&fc->blocked_waitq);
1956                 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
1957         }
1958         spin_unlock(&fc->lock);
1959 }
1960 EXPORT_SYMBOL_GPL(fuse_abort_conn);
1961
1962 int fuse_dev_release(struct inode *inode, struct file *file)
1963 {
1964         struct fuse_conn *fc = fuse_get_conn(file);
1965         if (fc) {
1966                 spin_lock(&fc->lock);
1967                 fc->connected = 0;
1968                 fc->blocked = 0;
1969                 end_queued_requests(fc);
1970                 end_polls(fc);
1971                 wake_up_all(&fc->blocked_waitq);
1972                 spin_unlock(&fc->lock);
1973                 fuse_conn_put(fc);
1974         }
1975
1976         return 0;
1977 }
1978 EXPORT_SYMBOL_GPL(fuse_dev_release);
1979
1980 static int fuse_dev_fasync(int fd, struct file *file, int on)
1981 {
1982         struct fuse_conn *fc = fuse_get_conn(file);
1983         if (!fc)
1984                 return -EPERM;
1985
1986         /* No locking - fasync_helper does its own locking */
1987         return fasync_helper(fd, file, on, &fc->fasync);
1988 }
1989
1990 const struct file_operations fuse_dev_operations = {
1991         .owner          = THIS_MODULE,
1992         .llseek         = no_llseek,
1993         .read           = do_sync_read,
1994         .aio_read       = fuse_dev_read,
1995         .splice_read    = fuse_dev_splice_read,
1996         .write          = do_sync_write,
1997         .aio_write      = fuse_dev_write,
1998         .splice_write   = fuse_dev_splice_write,
1999         .poll           = fuse_dev_poll,
2000         .release        = fuse_dev_release,
2001         .fasync         = fuse_dev_fasync,
2002 };
2003 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2004
2005 static struct miscdevice fuse_miscdevice = {
2006         .minor = FUSE_MINOR,
2007         .name  = "fuse",
2008         .fops = &fuse_dev_operations,
2009 };
2010
2011 int __init fuse_dev_init(void)
2012 {
2013         int err = -ENOMEM;
2014         fuse_req_cachep = kmem_cache_create("fuse_request",
2015                                             sizeof(struct fuse_req),
2016                                             0, 0, NULL);
2017         if (!fuse_req_cachep)
2018                 goto out;
2019
2020         err = misc_register(&fuse_miscdevice);
2021         if (err)
2022                 goto out_cache_clean;
2023
2024         return 0;
2025
2026  out_cache_clean:
2027         kmem_cache_destroy(fuse_req_cachep);
2028  out:
2029         return err;
2030 }
2031
2032 void fuse_dev_cleanup(void)
2033 {
2034         misc_deregister(&fuse_miscdevice);
2035         kmem_cache_destroy(fuse_req_cachep);
2036 }