[AF_RXRPC]: Make the in-kernel AFS filesystem use AF_RXRPC.
[platform/kernel/linux-starfive.git] / fs / afs / callback.c
1 /*
2  * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
3  *
4  * This software may be freely redistributed under the terms of the
5  * GNU General Public License.
6  *
7  * You should have received a copy of the GNU General Public License
8  * along with this program; if not, write to the Free Software
9  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
10  *
11  * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
12  *          David Howells <dhowells@redhat.com>
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/circ_buf.h>
20 #include "internal.h"
21
22 unsigned afs_vnode_update_timeout = 10;
23
24 #define afs_breakring_space(server) \
25         CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail,    \
26                    ARRAY_SIZE((server)->cb_break))
27
28 //static void afs_callback_updater(struct work_struct *);
29
30 static struct workqueue_struct *afs_callback_update_worker;
31
32 /*
33  * allow the fileserver to request callback state (re-)initialisation
34  */
35 void afs_init_callback_state(struct afs_server *server)
36 {
37         struct afs_vnode *vnode;
38
39         _enter("{%p}", server);
40
41         spin_lock(&server->cb_lock);
42
43         /* kill all the promises on record from this server */
44         while (!RB_EMPTY_ROOT(&server->cb_promises)) {
45                 vnode = rb_entry(server->cb_promises.rb_node,
46                                  struct afs_vnode, cb_promise);
47                 printk("\nUNPROMISE on %p\n", vnode);
48                 rb_erase(&vnode->cb_promise, &server->cb_promises);
49                 vnode->cb_promised = false;
50         }
51
52         spin_unlock(&server->cb_lock);
53         _leave("");
54 }
55
56 /*
57  * handle the data invalidation side of a callback being broken
58  */
59 void afs_broken_callback_work(struct work_struct *work)
60 {
61         struct afs_vnode *vnode =
62                 container_of(work, struct afs_vnode, cb_broken_work);
63
64         _enter("");
65
66         if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
67                 return;
68
69         /* we're only interested in dealing with a broken callback on *this*
70          * vnode and only if no-one else has dealt with it yet */
71         if (!mutex_trylock(&vnode->cb_broken_lock))
72                 return; /* someone else is dealing with it */
73
74         if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
75                 if (afs_vnode_fetch_status(vnode) < 0)
76                         goto out;
77
78                 if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
79                         goto out;
80
81                 /* if the vnode's data version number changed then its contents
82                  * are different */
83                 if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
84                         _debug("zap data");
85                         invalidate_remote_inode(&vnode->vfs_inode);
86                 }
87         }
88
89 out:
90         mutex_unlock(&vnode->cb_broken_lock);
91
92         /* avoid the potential race whereby the mutex_trylock() in this
93          * function happens again between the clear_bit() and the
94          * mutex_unlock() */
95         if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
96                 _debug("requeue");
97                 queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
98         }
99         _leave("");
100 }
101
102 /*
103  * actually break a callback
104  */
105 static void afs_break_callback(struct afs_server *server,
106                                struct afs_vnode *vnode)
107 {
108         _enter("");
109
110         set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
111
112         if (vnode->cb_promised) {
113                 spin_lock(&vnode->lock);
114
115                 _debug("break callback");
116
117                 spin_lock(&server->cb_lock);
118                 if (vnode->cb_promised) {
119                         rb_erase(&vnode->cb_promise, &server->cb_promises);
120                         vnode->cb_promised = false;
121                 }
122                 spin_unlock(&server->cb_lock);
123
124                 queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
125                 spin_unlock(&vnode->lock);
126         }
127 }
128
129 /*
130  * allow the fileserver to explicitly break one callback
131  * - happens when
132  *   - the backing file is changed
133  *   - a lock is released
134  */
135 static void afs_break_one_callback(struct afs_server *server,
136                                    struct afs_fid *fid)
137 {
138         struct afs_vnode *vnode;
139         struct rb_node *p;
140
141         _debug("find");
142         spin_lock(&server->fs_lock);
143         p = server->fs_vnodes.rb_node;
144         while (p) {
145                 vnode = rb_entry(p, struct afs_vnode, server_rb);
146                 if (fid->vid < vnode->fid.vid)
147                         p = p->rb_left;
148                 else if (fid->vid > vnode->fid.vid)
149                         p = p->rb_right;
150                 else if (fid->vnode < vnode->fid.vnode)
151                         p = p->rb_left;
152                 else if (fid->vnode > vnode->fid.vnode)
153                         p = p->rb_right;
154                 else if (fid->unique < vnode->fid.unique)
155                         p = p->rb_left;
156                 else if (fid->unique > vnode->fid.unique)
157                         p = p->rb_right;
158                 else
159                         goto found;
160         }
161
162         /* not found so we just ignore it (it may have moved to another
163          * server) */
164 not_available:
165         _debug("not avail");
166         spin_unlock(&server->fs_lock);
167         _leave("");
168         return;
169
170 found:
171         _debug("found");
172         ASSERTCMP(server, ==, vnode->server);
173
174         if (!igrab(AFS_VNODE_TO_I(vnode)))
175                 goto not_available;
176         spin_unlock(&server->fs_lock);
177
178         afs_break_callback(server, vnode);
179         iput(&vnode->vfs_inode);
180         _leave("");
181 }
182
183 /*
184  * allow the fileserver to break callback promises
185  */
186 void afs_break_callbacks(struct afs_server *server, size_t count,
187                          struct afs_callback callbacks[])
188 {
189         _enter("%p,%zu,", server, count);
190
191         ASSERT(server != NULL);
192         ASSERTCMP(count, <=, AFSCBMAX);
193
194         for (; count > 0; callbacks++, count--) {
195                 _debug("- Fid { vl=%08x n=%u u=%u }  CB { v=%u x=%u t=%u }",
196                        callbacks->fid.vid,
197                        callbacks->fid.vnode,
198                        callbacks->fid.unique,
199                        callbacks->version,
200                        callbacks->expiry,
201                        callbacks->type
202                        );
203                 afs_break_one_callback(server, &callbacks->fid);
204         }
205
206         _leave("");
207         return;
208 }
209
210 /*
211  * record the callback for breaking
212  * - the caller must hold server->cb_lock
213  */
214 static void afs_do_give_up_callback(struct afs_server *server,
215                                     struct afs_vnode *vnode)
216 {
217         struct afs_callback *cb;
218
219         _enter("%p,%p", server, vnode);
220
221         cb = &server->cb_break[server->cb_break_head];
222         cb->fid         = vnode->fid;
223         cb->version     = vnode->cb_version;
224         cb->expiry      = vnode->cb_expiry;
225         cb->type        = vnode->cb_type;
226         smp_wmb();
227         server->cb_break_head =
228                 (server->cb_break_head + 1) &
229                 (ARRAY_SIZE(server->cb_break) - 1);
230
231         /* defer the breaking of callbacks to try and collect as many as
232          * possible to ship in one operation */
233         switch (atomic_inc_return(&server->cb_break_n)) {
234         case 1 ... AFSCBMAX - 1:
235                 queue_delayed_work(afs_callback_update_worker,
236                                    &server->cb_break_work, HZ * 2);
237                 break;
238         case AFSCBMAX:
239                 afs_flush_callback_breaks(server);
240                 break;
241         default:
242                 break;
243         }
244
245         ASSERT(server->cb_promises.rb_node != NULL);
246         rb_erase(&vnode->cb_promise, &server->cb_promises);
247         vnode->cb_promised = false;
248         _leave("");
249 }
250
251 /*
252  * give up the callback registered for a vnode on the file server when the
253  * inode is being cleared
254  */
255 void afs_give_up_callback(struct afs_vnode *vnode)
256 {
257         struct afs_server *server = vnode->server;
258
259         DECLARE_WAITQUEUE(myself, current);
260
261         _enter("%d", vnode->cb_promised);
262
263         _debug("GIVE UP INODE %p", &vnode->vfs_inode);
264
265         if (!vnode->cb_promised) {
266                 _leave(" [not promised]");
267                 return;
268         }
269
270         ASSERT(server != NULL);
271
272         spin_lock(&server->cb_lock);
273         if (vnode->cb_promised && afs_breakring_space(server) == 0) {
274                 add_wait_queue(&server->cb_break_waitq, &myself);
275                 for (;;) {
276                         set_current_state(TASK_UNINTERRUPTIBLE);
277                         if (!vnode->cb_promised ||
278                             afs_breakring_space(server) != 0)
279                                 break;
280                         spin_unlock(&server->cb_lock);
281                         schedule();
282                         spin_lock(&server->cb_lock);
283                 }
284                 remove_wait_queue(&server->cb_break_waitq, &myself);
285                 __set_current_state(TASK_RUNNING);
286         }
287
288         /* of course, it's always possible for the server to break this vnode's
289          * callback first... */
290         if (vnode->cb_promised)
291                 afs_do_give_up_callback(server, vnode);
292
293         spin_unlock(&server->cb_lock);
294         _leave("");
295 }
296
297 /*
298  * dispatch a deferred give up callbacks operation
299  */
300 void afs_dispatch_give_up_callbacks(struct work_struct *work)
301 {
302         struct afs_server *server =
303                 container_of(work, struct afs_server, cb_break_work.work);
304
305         _enter("");
306
307         /* tell the fileserver to discard the callback promises it has
308          * - in the event of ENOMEM or some other error, we just forget that we
309          *   had callbacks entirely, and the server will call us later to break
310          *   them
311          */
312         afs_fs_give_up_callbacks(server, &afs_async_call);
313 }
314
315 /*
316  * flush the outstanding callback breaks on a server
317  */
318 void afs_flush_callback_breaks(struct afs_server *server)
319 {
320         cancel_delayed_work(&server->cb_break_work);
321         queue_delayed_work(afs_callback_update_worker,
322                            &server->cb_break_work, 0);
323 }
324
325 #if 0
326 /*
327  * update a bunch of callbacks
328  */
329 static void afs_callback_updater(struct work_struct *work)
330 {
331         struct afs_server *server;
332         struct afs_vnode *vnode, *xvnode;
333         time_t now;
334         long timeout;
335         int ret;
336
337         server = container_of(work, struct afs_server, updater);
338
339         _enter("");
340
341         now = get_seconds();
342
343         /* find the first vnode to update */
344         spin_lock(&server->cb_lock);
345         for (;;) {
346                 if (RB_EMPTY_ROOT(&server->cb_promises)) {
347                         spin_unlock(&server->cb_lock);
348                         _leave(" [nothing]");
349                         return;
350                 }
351
352                 vnode = rb_entry(rb_first(&server->cb_promises),
353                                  struct afs_vnode, cb_promise);
354                 if (atomic_read(&vnode->usage) > 0)
355                         break;
356                 rb_erase(&vnode->cb_promise, &server->cb_promises);
357                 vnode->cb_promised = false;
358         }
359
360         timeout = vnode->update_at - now;
361         if (timeout > 0) {
362                 queue_delayed_work(afs_vnode_update_worker,
363                                    &afs_vnode_update, timeout * HZ);
364                 spin_unlock(&server->cb_lock);
365                 _leave(" [nothing]");
366                 return;
367         }
368
369         list_del_init(&vnode->update);
370         atomic_inc(&vnode->usage);
371         spin_unlock(&server->cb_lock);
372
373         /* we can now perform the update */
374         _debug("update %s", vnode->vldb.name);
375         vnode->state = AFS_VL_UPDATING;
376         vnode->upd_rej_cnt = 0;
377         vnode->upd_busy_cnt = 0;
378
379         ret = afs_vnode_update_record(vl, &vldb);
380         switch (ret) {
381         case 0:
382                 afs_vnode_apply_update(vl, &vldb);
383                 vnode->state = AFS_VL_UPDATING;
384                 break;
385         case -ENOMEDIUM:
386                 vnode->state = AFS_VL_VOLUME_DELETED;
387                 break;
388         default:
389                 vnode->state = AFS_VL_UNCERTAIN;
390                 break;
391         }
392
393         /* and then reschedule */
394         _debug("reschedule");
395         vnode->update_at = get_seconds() + afs_vnode_update_timeout;
396
397         spin_lock(&server->cb_lock);
398
399         if (!list_empty(&server->cb_promises)) {
400                 /* next update in 10 minutes, but wait at least 1 second more
401                  * than the newest record already queued so that we don't spam
402                  * the VL server suddenly with lots of requests
403                  */
404                 xvnode = list_entry(server->cb_promises.prev,
405                                     struct afs_vnode, update);
406                 if (vnode->update_at <= xvnode->update_at)
407                         vnode->update_at = xvnode->update_at + 1;
408                 xvnode = list_entry(server->cb_promises.next,
409                                     struct afs_vnode, update);
410                 timeout = xvnode->update_at - now;
411                 if (timeout < 0)
412                         timeout = 0;
413         } else {
414                 timeout = afs_vnode_update_timeout;
415         }
416
417         list_add_tail(&vnode->update, &server->cb_promises);
418
419         _debug("timeout %ld", timeout);
420         queue_delayed_work(afs_vnode_update_worker,
421                            &afs_vnode_update, timeout * HZ);
422         spin_unlock(&server->cb_lock);
423         afs_put_vnode(vl);
424 }
425 #endif
426
427 /*
428  * initialise the callback update process
429  */
430 int __init afs_callback_update_init(void)
431 {
432         afs_callback_update_worker =
433                 create_singlethread_workqueue("kafs_callbackd");
434         return afs_callback_update_worker ? 0 : -ENOMEM;
435 }
436
437 /*
438  * shut down the callback update process
439  */
440 void __exit afs_callback_update_kill(void)
441 {
442         destroy_workqueue(afs_callback_update_worker);
443 }