Merge branch 'core-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[platform/kernel/linux-arm64.git] / fs / ocfs2 / dlm / dlmdomain.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmdomain.c
5  *
6  * defines domain join / leave apis
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27 #include <linux/module.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <linux/delay.h>
34 #include <linux/err.h>
35 #include <linux/debugfs.h>
36
37 #include "cluster/heartbeat.h"
38 #include "cluster/nodemanager.h"
39 #include "cluster/tcp.h"
40
41 #include "dlmapi.h"
42 #include "dlmcommon.h"
43 #include "dlmdomain.h"
44 #include "dlmdebug.h"
45
46 #include "dlmver.h"
47
48 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN)
49 #include "cluster/masklog.h"
50
51 /*
52  * ocfs2 node maps are array of long int, which limits to send them freely
53  * across the wire due to endianness issues. To workaround this, we convert
54  * long ints to byte arrays. Following 3 routines are helper functions to
55  * set/test/copy bits within those array of bytes
56  */
57 static inline void byte_set_bit(u8 nr, u8 map[])
58 {
59         map[nr >> 3] |= (1UL << (nr & 7));
60 }
61
62 static inline int byte_test_bit(u8 nr, u8 map[])
63 {
64         return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0;
65 }
66
67 static inline void byte_copymap(u8 dmap[], unsigned long smap[],
68                         unsigned int sz)
69 {
70         unsigned int nn;
71
72         if (!sz)
73                 return;
74
75         memset(dmap, 0, ((sz + 7) >> 3));
76         for (nn = 0 ; nn < sz; nn++)
77                 if (test_bit(nn, smap))
78                         byte_set_bit(nn, dmap);
79 }
80
81 static void dlm_free_pagevec(void **vec, int pages)
82 {
83         while (pages--)
84                 free_page((unsigned long)vec[pages]);
85         kfree(vec);
86 }
87
88 static void **dlm_alloc_pagevec(int pages)
89 {
90         void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL);
91         int i;
92
93         if (!vec)
94                 return NULL;
95
96         for (i = 0; i < pages; i++)
97                 if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL)))
98                         goto out_free;
99
100         mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n",
101              pages, (unsigned long)DLM_HASH_PAGES,
102              (unsigned long)DLM_BUCKETS_PER_PAGE);
103         return vec;
104 out_free:
105         dlm_free_pagevec(vec, i);
106         return NULL;
107 }
108
109 /*
110  *
111  * spinlock lock ordering: if multiple locks are needed, obey this ordering:
112  *    dlm_domain_lock
113  *    struct dlm_ctxt->spinlock
114  *    struct dlm_lock_resource->spinlock
115  *    struct dlm_ctxt->master_lock
116  *    struct dlm_ctxt->ast_lock
117  *    dlm_master_list_entry->spinlock
118  *    dlm_lock->spinlock
119  *
120  */
121
122 DEFINE_SPINLOCK(dlm_domain_lock);
123 LIST_HEAD(dlm_domains);
124 static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
125
126 /*
127  * The supported protocol version for DLM communication.  Running domains
128  * will have a negotiated version with the same major number and a minor
129  * number equal or smaller.  The dlm_ctxt->dlm_locking_proto field should
130  * be used to determine what a running domain is actually using.
131  *
132  * New in version 1.1:
133  *      - Message DLM_QUERY_REGION added to support global heartbeat
134  *      - Message DLM_QUERY_NODEINFO added to allow online node removes
135  * New in version 1.2:
136  *      - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain
137  */
138 static const struct dlm_protocol_version dlm_protocol = {
139         .pv_major = 1,
140         .pv_minor = 2,
141 };
142
143 #define DLM_DOMAIN_BACKOFF_MS 200
144
145 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
146                                   void **ret_data);
147 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
148                                      void **ret_data);
149 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
150                                    void **ret_data);
151 static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
152                                     void *data, void **ret_data);
153 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
154                                    void **ret_data);
155 static int dlm_protocol_compare(struct dlm_protocol_version *existing,
156                                 struct dlm_protocol_version *request);
157
158 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm);
159
160 void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
161 {
162         if (hlist_unhashed(&res->hash_node))
163                 return;
164
165         mlog(0, "%s: Unhash res %.*s\n", dlm->name, res->lockname.len,
166              res->lockname.name);
167         hlist_del_init(&res->hash_node);
168         dlm_lockres_put(res);
169 }
170
171 void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
172 {
173         struct hlist_head *bucket;
174         struct qstr *q;
175
176         assert_spin_locked(&dlm->spinlock);
177
178         q = &res->lockname;
179         bucket = dlm_lockres_hash(dlm, q->hash);
180
181         /* get a reference for our hashtable */
182         dlm_lockres_get(res);
183
184         hlist_add_head(&res->hash_node, bucket);
185
186         mlog(0, "%s: Hash res %.*s\n", dlm->name, res->lockname.len,
187              res->lockname.name);
188 }
189
190 struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm,
191                                                      const char *name,
192                                                      unsigned int len,
193                                                      unsigned int hash)
194 {
195         struct hlist_head *bucket;
196         struct hlist_node *list;
197
198         mlog(0, "%.*s\n", len, name);
199
200         assert_spin_locked(&dlm->spinlock);
201
202         bucket = dlm_lockres_hash(dlm, hash);
203
204         hlist_for_each(list, bucket) {
205                 struct dlm_lock_resource *res = hlist_entry(list,
206                         struct dlm_lock_resource, hash_node);
207                 if (res->lockname.name[0] != name[0])
208                         continue;
209                 if (unlikely(res->lockname.len != len))
210                         continue;
211                 if (memcmp(res->lockname.name + 1, name + 1, len - 1))
212                         continue;
213                 dlm_lockres_get(res);
214                 return res;
215         }
216         return NULL;
217 }
218
219 /* intended to be called by functions which do not care about lock
220  * resources which are being purged (most net _handler functions).
221  * this will return NULL for any lock resource which is found but
222  * currently in the process of dropping its mastery reference.
223  * use __dlm_lookup_lockres_full when you need the lock resource
224  * regardless (e.g. dlm_get_lock_resource) */
225 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
226                                                 const char *name,
227                                                 unsigned int len,
228                                                 unsigned int hash)
229 {
230         struct dlm_lock_resource *res = NULL;
231
232         mlog(0, "%.*s\n", len, name);
233
234         assert_spin_locked(&dlm->spinlock);
235
236         res = __dlm_lookup_lockres_full(dlm, name, len, hash);
237         if (res) {
238                 spin_lock(&res->spinlock);
239                 if (res->state & DLM_LOCK_RES_DROPPING_REF) {
240                         spin_unlock(&res->spinlock);
241                         dlm_lockres_put(res);
242                         return NULL;
243                 }
244                 spin_unlock(&res->spinlock);
245         }
246
247         return res;
248 }
249
250 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
251                                     const char *name,
252                                     unsigned int len)
253 {
254         struct dlm_lock_resource *res;
255         unsigned int hash = dlm_lockid_hash(name, len);
256
257         spin_lock(&dlm->spinlock);
258         res = __dlm_lookup_lockres(dlm, name, len, hash);
259         spin_unlock(&dlm->spinlock);
260         return res;
261 }
262
263 static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len)
264 {
265         struct dlm_ctxt *tmp = NULL;
266         struct list_head *iter;
267
268         assert_spin_locked(&dlm_domain_lock);
269
270         /* tmp->name here is always NULL terminated,
271          * but domain may not be! */
272         list_for_each(iter, &dlm_domains) {
273                 tmp = list_entry (iter, struct dlm_ctxt, list);
274                 if (strlen(tmp->name) == len &&
275                     memcmp(tmp->name, domain, len)==0)
276                         break;
277                 tmp = NULL;
278         }
279
280         return tmp;
281 }
282
283 /* For null terminated domain strings ONLY */
284 static struct dlm_ctxt * __dlm_lookup_domain(const char *domain)
285 {
286         assert_spin_locked(&dlm_domain_lock);
287
288         return __dlm_lookup_domain_full(domain, strlen(domain));
289 }
290
291
292 /* returns true on one of two conditions:
293  * 1) the domain does not exist
294  * 2) the domain exists and it's state is "joined" */
295 static int dlm_wait_on_domain_helper(const char *domain)
296 {
297         int ret = 0;
298         struct dlm_ctxt *tmp = NULL;
299
300         spin_lock(&dlm_domain_lock);
301
302         tmp = __dlm_lookup_domain(domain);
303         if (!tmp)
304                 ret = 1;
305         else if (tmp->dlm_state == DLM_CTXT_JOINED)
306                 ret = 1;
307
308         spin_unlock(&dlm_domain_lock);
309         return ret;
310 }
311
312 static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm)
313 {
314         dlm_destroy_debugfs_subroot(dlm);
315
316         if (dlm->lockres_hash)
317                 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
318
319         if (dlm->master_hash)
320                 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
321
322         kfree(dlm->name);
323         kfree(dlm);
324 }
325
326 /* A little strange - this function will be called while holding
327  * dlm_domain_lock and is expected to be holding it on the way out. We
328  * will however drop and reacquire it multiple times */
329 static void dlm_ctxt_release(struct kref *kref)
330 {
331         struct dlm_ctxt *dlm;
332
333         dlm = container_of(kref, struct dlm_ctxt, dlm_refs);
334
335         BUG_ON(dlm->num_joins);
336         BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED);
337
338         /* we may still be in the list if we hit an error during join. */
339         list_del_init(&dlm->list);
340
341         spin_unlock(&dlm_domain_lock);
342
343         mlog(0, "freeing memory from domain %s\n", dlm->name);
344
345         wake_up(&dlm_domain_events);
346
347         dlm_free_ctxt_mem(dlm);
348
349         spin_lock(&dlm_domain_lock);
350 }
351
352 void dlm_put(struct dlm_ctxt *dlm)
353 {
354         spin_lock(&dlm_domain_lock);
355         kref_put(&dlm->dlm_refs, dlm_ctxt_release);
356         spin_unlock(&dlm_domain_lock);
357 }
358
359 static void __dlm_get(struct dlm_ctxt *dlm)
360 {
361         kref_get(&dlm->dlm_refs);
362 }
363
364 /* given a questionable reference to a dlm object, gets a reference if
365  * it can find it in the list, otherwise returns NULL in which case
366  * you shouldn't trust your pointer. */
367 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm)
368 {
369         struct list_head *iter;
370         struct dlm_ctxt *target = NULL;
371
372         spin_lock(&dlm_domain_lock);
373
374         list_for_each(iter, &dlm_domains) {
375                 target = list_entry (iter, struct dlm_ctxt, list);
376
377                 if (target == dlm) {
378                         __dlm_get(target);
379                         break;
380                 }
381
382                 target = NULL;
383         }
384
385         spin_unlock(&dlm_domain_lock);
386
387         return target;
388 }
389
390 int dlm_domain_fully_joined(struct dlm_ctxt *dlm)
391 {
392         int ret;
393
394         spin_lock(&dlm_domain_lock);
395         ret = (dlm->dlm_state == DLM_CTXT_JOINED) ||
396                 (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN);
397         spin_unlock(&dlm_domain_lock);
398
399         return ret;
400 }
401
402 static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm)
403 {
404         if (dlm->dlm_worker) {
405                 flush_workqueue(dlm->dlm_worker);
406                 destroy_workqueue(dlm->dlm_worker);
407                 dlm->dlm_worker = NULL;
408         }
409 }
410
411 static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm)
412 {
413         dlm_unregister_domain_handlers(dlm);
414         dlm_debug_shutdown(dlm);
415         dlm_complete_thread(dlm);
416         dlm_complete_recovery_thread(dlm);
417         dlm_destroy_dlm_worker(dlm);
418
419         /* We've left the domain. Now we can take ourselves out of the
420          * list and allow the kref stuff to help us free the
421          * memory. */
422         spin_lock(&dlm_domain_lock);
423         list_del_init(&dlm->list);
424         spin_unlock(&dlm_domain_lock);
425
426         /* Wake up anyone waiting for us to remove this domain */
427         wake_up(&dlm_domain_events);
428 }
429
430 static int dlm_migrate_all_locks(struct dlm_ctxt *dlm)
431 {
432         int i, num, n, ret = 0;
433         struct dlm_lock_resource *res;
434         struct hlist_node *iter;
435         struct hlist_head *bucket;
436         int dropped;
437
438         mlog(0, "Migrating locks from domain %s\n", dlm->name);
439
440         num = 0;
441         spin_lock(&dlm->spinlock);
442         for (i = 0; i < DLM_HASH_BUCKETS; i++) {
443 redo_bucket:
444                 n = 0;
445                 bucket = dlm_lockres_hash(dlm, i);
446                 iter = bucket->first;
447                 while (iter) {
448                         n++;
449                         res = hlist_entry(iter, struct dlm_lock_resource,
450                                           hash_node);
451                         dlm_lockres_get(res);
452                         /* migrate, if necessary.  this will drop the dlm
453                          * spinlock and retake it if it does migration. */
454                         dropped = dlm_empty_lockres(dlm, res);
455
456                         spin_lock(&res->spinlock);
457                         if (dropped)
458                                 __dlm_lockres_calc_usage(dlm, res);
459                         else
460                                 iter = res->hash_node.next;
461                         spin_unlock(&res->spinlock);
462
463                         dlm_lockres_put(res);
464
465                         if (dropped) {
466                                 cond_resched_lock(&dlm->spinlock);
467                                 goto redo_bucket;
468                         }
469                 }
470                 cond_resched_lock(&dlm->spinlock);
471                 num += n;
472         }
473         spin_unlock(&dlm->spinlock);
474         wake_up(&dlm->dlm_thread_wq);
475
476         /* let the dlm thread take care of purging, keep scanning until
477          * nothing remains in the hash */
478         if (num) {
479                 mlog(0, "%s: %d lock resources in hash last pass\n",
480                      dlm->name, num);
481                 ret = -EAGAIN;
482         }
483         mlog(0, "DONE Migrating locks from domain %s\n", dlm->name);
484         return ret;
485 }
486
487 static int dlm_no_joining_node(struct dlm_ctxt *dlm)
488 {
489         int ret;
490
491         spin_lock(&dlm->spinlock);
492         ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN;
493         spin_unlock(&dlm->spinlock);
494
495         return ret;
496 }
497
498 static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len,
499                                          void *data, void **ret_data)
500 {
501         struct dlm_ctxt *dlm = data;
502         unsigned int node;
503         struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
504
505         if (!dlm_grab(dlm))
506                 return 0;
507
508         node = exit_msg->node_idx;
509         mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node);
510
511         spin_lock(&dlm->spinlock);
512         set_bit(node, dlm->exit_domain_map);
513         spin_unlock(&dlm->spinlock);
514
515         dlm_put(dlm);
516
517         return 0;
518 }
519
520 static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm)
521 {
522         /* Yikes, a double spinlock! I need domain_lock for the dlm
523          * state and the dlm spinlock for join state... Sorry! */
524 again:
525         spin_lock(&dlm_domain_lock);
526         spin_lock(&dlm->spinlock);
527
528         if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
529                 mlog(0, "Node %d is joining, we wait on it.\n",
530                           dlm->joining_node);
531                 spin_unlock(&dlm->spinlock);
532                 spin_unlock(&dlm_domain_lock);
533
534                 wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm));
535                 goto again;
536         }
537
538         dlm->dlm_state = DLM_CTXT_LEAVING;
539         spin_unlock(&dlm->spinlock);
540         spin_unlock(&dlm_domain_lock);
541 }
542
543 static void __dlm_print_nodes(struct dlm_ctxt *dlm)
544 {
545         int node = -1, num = 0;
546
547         assert_spin_locked(&dlm->spinlock);
548
549         printk("( ");
550         while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
551                                      node + 1)) < O2NM_MAX_NODES) {
552                 printk("%d ", node);
553                 ++num;
554         }
555         printk(") %u nodes\n", num);
556 }
557
558 static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data,
559                                    void **ret_data)
560 {
561         struct dlm_ctxt *dlm = data;
562         unsigned int node;
563         struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf;
564
565         mlog(0, "%p %u %p", msg, len, data);
566
567         if (!dlm_grab(dlm))
568                 return 0;
569
570         node = exit_msg->node_idx;
571
572         spin_lock(&dlm->spinlock);
573         clear_bit(node, dlm->domain_map);
574         clear_bit(node, dlm->exit_domain_map);
575         printk(KERN_NOTICE "o2dlm: Node %u leaves domain %s ", node, dlm->name);
576         __dlm_print_nodes(dlm);
577
578         /* notify anything attached to the heartbeat events */
579         dlm_hb_event_notify_attached(dlm, node, 0);
580
581         spin_unlock(&dlm->spinlock);
582
583         dlm_put(dlm);
584
585         return 0;
586 }
587
588 static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type,
589                                     unsigned int node)
590 {
591         int status;
592         struct dlm_exit_domain leave_msg;
593
594         mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name,
595              msg_type, node);
596
597         memset(&leave_msg, 0, sizeof(leave_msg));
598         leave_msg.node_idx = dlm->node_num;
599
600         status = o2net_send_message(msg_type, dlm->key, &leave_msg,
601                                     sizeof(leave_msg), node, NULL);
602         if (status < 0)
603                 mlog(ML_ERROR, "Error %d sending domain exit message %u "
604                      "to node %u on domain %s\n", status, msg_type, node,
605                      dlm->name);
606
607         return status;
608 }
609
610 static void dlm_begin_exit_domain(struct dlm_ctxt *dlm)
611 {
612         int node = -1;
613
614         /* Support for begin exit domain was added in 1.2 */
615         if (dlm->dlm_locking_proto.pv_major == 1 &&
616             dlm->dlm_locking_proto.pv_minor < 2)
617                 return;
618
619         /*
620          * Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely
621          * informational. Meaning if a node does not receive the message,
622          * so be it.
623          */
624         spin_lock(&dlm->spinlock);
625         while (1) {
626                 node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1);
627                 if (node >= O2NM_MAX_NODES)
628                         break;
629                 if (node == dlm->node_num)
630                         continue;
631
632                 spin_unlock(&dlm->spinlock);
633                 dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node);
634                 spin_lock(&dlm->spinlock);
635         }
636         spin_unlock(&dlm->spinlock);
637 }
638
639 static void dlm_leave_domain(struct dlm_ctxt *dlm)
640 {
641         int node, clear_node, status;
642
643         /* At this point we've migrated away all our locks and won't
644          * accept mastership of new ones. The dlm is responsible for
645          * almost nothing now. We make sure not to confuse any joining
646          * nodes and then commence shutdown procedure. */
647
648         spin_lock(&dlm->spinlock);
649         /* Clear ourselves from the domain map */
650         clear_bit(dlm->node_num, dlm->domain_map);
651         while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES,
652                                      0)) < O2NM_MAX_NODES) {
653                 /* Drop the dlm spinlock. This is safe wrt the domain_map.
654                  * -nodes cannot be added now as the
655                  *   query_join_handlers knows to respond with OK_NO_MAP
656                  * -we catch the right network errors if a node is
657                  *   removed from the map while we're sending him the
658                  *   exit message. */
659                 spin_unlock(&dlm->spinlock);
660
661                 clear_node = 1;
662
663                 status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG,
664                                                   node);
665                 if (status < 0 &&
666                     status != -ENOPROTOOPT &&
667                     status != -ENOTCONN) {
668                         mlog(ML_NOTICE, "Error %d sending domain exit message "
669                              "to node %d\n", status, node);
670
671                         /* Not sure what to do here but lets sleep for
672                          * a bit in case this was a transient
673                          * error... */
674                         msleep(DLM_DOMAIN_BACKOFF_MS);
675                         clear_node = 0;
676                 }
677
678                 spin_lock(&dlm->spinlock);
679                 /* If we're not clearing the node bit then we intend
680                  * to loop back around to try again. */
681                 if (clear_node)
682                         clear_bit(node, dlm->domain_map);
683         }
684         spin_unlock(&dlm->spinlock);
685 }
686
687 int dlm_joined(struct dlm_ctxt *dlm)
688 {
689         int ret = 0;
690
691         spin_lock(&dlm_domain_lock);
692
693         if (dlm->dlm_state == DLM_CTXT_JOINED)
694                 ret = 1;
695
696         spin_unlock(&dlm_domain_lock);
697
698         return ret;
699 }
700
701 int dlm_shutting_down(struct dlm_ctxt *dlm)
702 {
703         int ret = 0;
704
705         spin_lock(&dlm_domain_lock);
706
707         if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN)
708                 ret = 1;
709
710         spin_unlock(&dlm_domain_lock);
711
712         return ret;
713 }
714
715 void dlm_unregister_domain(struct dlm_ctxt *dlm)
716 {
717         int leave = 0;
718         struct dlm_lock_resource *res;
719
720         spin_lock(&dlm_domain_lock);
721         BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED);
722         BUG_ON(!dlm->num_joins);
723
724         dlm->num_joins--;
725         if (!dlm->num_joins) {
726                 /* We mark it "in shutdown" now so new register
727                  * requests wait until we've completely left the
728                  * domain. Don't use DLM_CTXT_LEAVING yet as we still
729                  * want new domain joins to communicate with us at
730                  * least until we've completed migration of our
731                  * resources. */
732                 dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN;
733                 leave = 1;
734         }
735         spin_unlock(&dlm_domain_lock);
736
737         if (leave) {
738                 mlog(0, "shutting down domain %s\n", dlm->name);
739                 dlm_begin_exit_domain(dlm);
740
741                 /* We changed dlm state, notify the thread */
742                 dlm_kick_thread(dlm, NULL);
743
744                 while (dlm_migrate_all_locks(dlm)) {
745                         /* Give dlm_thread time to purge the lockres' */
746                         msleep(500);
747                         mlog(0, "%s: more migration to do\n", dlm->name);
748                 }
749
750                 /* This list should be empty. If not, print remaining lockres */
751                 if (!list_empty(&dlm->tracking_list)) {
752                         mlog(ML_ERROR, "Following lockres' are still on the "
753                              "tracking list:\n");
754                         list_for_each_entry(res, &dlm->tracking_list, tracking)
755                                 dlm_print_one_lock_resource(res);
756                 }
757
758                 dlm_mark_domain_leaving(dlm);
759                 dlm_leave_domain(dlm);
760                 printk(KERN_NOTICE "o2dlm: Leaving domain %s\n", dlm->name);
761                 dlm_force_free_mles(dlm);
762                 dlm_complete_dlm_shutdown(dlm);
763         }
764         dlm_put(dlm);
765 }
766 EXPORT_SYMBOL_GPL(dlm_unregister_domain);
767
768 static int dlm_query_join_proto_check(char *proto_type, int node,
769                                       struct dlm_protocol_version *ours,
770                                       struct dlm_protocol_version *request)
771 {
772         int rc;
773         struct dlm_protocol_version proto = *request;
774
775         if (!dlm_protocol_compare(ours, &proto)) {
776                 mlog(0,
777                      "node %u wanted to join with %s locking protocol "
778                      "%u.%u, we respond with %u.%u\n",
779                      node, proto_type,
780                      request->pv_major,
781                      request->pv_minor,
782                      proto.pv_major, proto.pv_minor);
783                 request->pv_minor = proto.pv_minor;
784                 rc = 0;
785         } else {
786                 mlog(ML_NOTICE,
787                      "Node %u wanted to join with %s locking "
788                      "protocol %u.%u, but we have %u.%u, disallowing\n",
789                      node, proto_type,
790                      request->pv_major,
791                      request->pv_minor,
792                      ours->pv_major,
793                      ours->pv_minor);
794                 rc = 1;
795         }
796
797         return rc;
798 }
799
800 /*
801  * struct dlm_query_join_packet is made up of four one-byte fields.  They
802  * are effectively in big-endian order already.  However, little-endian
803  * machines swap them before putting the packet on the wire (because
804  * query_join's response is a status, and that status is treated as a u32
805  * on the wire).  Thus, a big-endian and little-endian machines will treat
806  * this structure differently.
807  *
808  * The solution is to have little-endian machines swap the structure when
809  * converting from the structure to the u32 representation.  This will
810  * result in the structure having the correct format on the wire no matter
811  * the host endian format.
812  */
813 static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
814                                           u32 *wire)
815 {
816         union dlm_query_join_response response;
817
818         response.packet = *packet;
819         *wire = be32_to_cpu(response.intval);
820 }
821
822 static void dlm_query_join_wire_to_packet(u32 wire,
823                                           struct dlm_query_join_packet *packet)
824 {
825         union dlm_query_join_response response;
826
827         response.intval = cpu_to_be32(wire);
828         *packet = response.packet;
829 }
830
831 static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
832                                   void **ret_data)
833 {
834         struct dlm_query_join_request *query;
835         struct dlm_query_join_packet packet = {
836                 .code = JOIN_DISALLOW,
837         };
838         struct dlm_ctxt *dlm = NULL;
839         u32 response;
840         u8 nodenum;
841
842         query = (struct dlm_query_join_request *) msg->buf;
843
844         mlog(0, "node %u wants to join domain %s\n", query->node_idx,
845                   query->domain);
846
847         /*
848          * If heartbeat doesn't consider the node live, tell it
849          * to back off and try again.  This gives heartbeat a chance
850          * to catch up.
851          */
852         if (!o2hb_check_node_heartbeating(query->node_idx)) {
853                 mlog(0, "node %u is not in our live map yet\n",
854                      query->node_idx);
855
856                 packet.code = JOIN_DISALLOW;
857                 goto respond;
858         }
859
860         packet.code = JOIN_OK_NO_MAP;
861
862         spin_lock(&dlm_domain_lock);
863         dlm = __dlm_lookup_domain_full(query->domain, query->name_len);
864         if (!dlm)
865                 goto unlock_respond;
866
867         /*
868          * There is a small window where the joining node may not see the
869          * node(s) that just left but still part of the cluster. DISALLOW
870          * join request if joining node has different node map.
871          */
872         nodenum=0;
873         while (nodenum < O2NM_MAX_NODES) {
874                 if (test_bit(nodenum, dlm->domain_map)) {
875                         if (!byte_test_bit(nodenum, query->node_map)) {
876                                 mlog(0, "disallow join as node %u does not "
877                                      "have node %u in its nodemap\n",
878                                      query->node_idx, nodenum);
879                                 packet.code = JOIN_DISALLOW;
880                                 goto unlock_respond;
881                         }
882                 }
883                 nodenum++;
884         }
885
886         /* Once the dlm ctxt is marked as leaving then we don't want
887          * to be put in someone's domain map.
888          * Also, explicitly disallow joining at certain troublesome
889          * times (ie. during recovery). */
890         if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
891                 int bit = query->node_idx;
892                 spin_lock(&dlm->spinlock);
893
894                 if (dlm->dlm_state == DLM_CTXT_NEW &&
895                     dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) {
896                         /*If this is a brand new context and we
897                          * haven't started our join process yet, then
898                          * the other node won the race. */
899                         packet.code = JOIN_OK_NO_MAP;
900                 } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) {
901                         /* Disallow parallel joins. */
902                         packet.code = JOIN_DISALLOW;
903                 } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) {
904                         mlog(0, "node %u trying to join, but recovery "
905                              "is ongoing.\n", bit);
906                         packet.code = JOIN_DISALLOW;
907                 } else if (test_bit(bit, dlm->recovery_map)) {
908                         mlog(0, "node %u trying to join, but it "
909                              "still needs recovery.\n", bit);
910                         packet.code = JOIN_DISALLOW;
911                 } else if (test_bit(bit, dlm->domain_map)) {
912                         mlog(0, "node %u trying to join, but it "
913                              "is still in the domain! needs recovery?\n",
914                              bit);
915                         packet.code = JOIN_DISALLOW;
916                 } else {
917                         /* Alright we're fully a part of this domain
918                          * so we keep some state as to who's joining
919                          * and indicate to him that needs to be fixed
920                          * up. */
921
922                         /* Make sure we speak compatible locking protocols.  */
923                         if (dlm_query_join_proto_check("DLM", bit,
924                                                        &dlm->dlm_locking_proto,
925                                                        &query->dlm_proto)) {
926                                 packet.code = JOIN_PROTOCOL_MISMATCH;
927                         } else if (dlm_query_join_proto_check("fs", bit,
928                                                               &dlm->fs_locking_proto,
929                                                               &query->fs_proto)) {
930                                 packet.code = JOIN_PROTOCOL_MISMATCH;
931                         } else {
932                                 packet.dlm_minor = query->dlm_proto.pv_minor;
933                                 packet.fs_minor = query->fs_proto.pv_minor;
934                                 packet.code = JOIN_OK;
935                                 __dlm_set_joining_node(dlm, query->node_idx);
936                         }
937                 }
938
939                 spin_unlock(&dlm->spinlock);
940         }
941 unlock_respond:
942         spin_unlock(&dlm_domain_lock);
943
944 respond:
945         mlog(0, "We respond with %u\n", packet.code);
946
947         dlm_query_join_packet_to_wire(&packet, &response);
948         return response;
949 }
950
951 static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data,
952                                      void **ret_data)
953 {
954         struct dlm_assert_joined *assert;
955         struct dlm_ctxt *dlm = NULL;
956
957         assert = (struct dlm_assert_joined *) msg->buf;
958
959         mlog(0, "node %u asserts join on domain %s\n", assert->node_idx,
960                   assert->domain);
961
962         spin_lock(&dlm_domain_lock);
963         dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len);
964         /* XXX should we consider no dlm ctxt an error? */
965         if (dlm) {
966                 spin_lock(&dlm->spinlock);
967
968                 /* Alright, this node has officially joined our
969                  * domain. Set him in the map and clean up our
970                  * leftover join state. */
971                 BUG_ON(dlm->joining_node != assert->node_idx);
972                 set_bit(assert->node_idx, dlm->domain_map);
973                 clear_bit(assert->node_idx, dlm->exit_domain_map);
974                 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
975
976                 printk(KERN_NOTICE "o2dlm: Node %u joins domain %s ",
977                        assert->node_idx, dlm->name);
978                 __dlm_print_nodes(dlm);
979
980                 /* notify anything attached to the heartbeat events */
981                 dlm_hb_event_notify_attached(dlm, assert->node_idx, 1);
982
983                 spin_unlock(&dlm->spinlock);
984         }
985         spin_unlock(&dlm_domain_lock);
986
987         return 0;
988 }
989
990 static int dlm_match_regions(struct dlm_ctxt *dlm,
991                              struct dlm_query_region *qr,
992                              char *local, int locallen)
993 {
994         char *remote = qr->qr_regions;
995         char *l, *r;
996         int localnr, i, j, foundit;
997         int status = 0;
998
999         if (!o2hb_global_heartbeat_active()) {
1000                 if (qr->qr_numregions) {
1001                         mlog(ML_ERROR, "Domain %s: Joining node %d has global "
1002                              "heartbeat enabled but local node %d does not\n",
1003                              qr->qr_domain, qr->qr_node, dlm->node_num);
1004                         status = -EINVAL;
1005                 }
1006                 goto bail;
1007         }
1008
1009         if (o2hb_global_heartbeat_active() && !qr->qr_numregions) {
1010                 mlog(ML_ERROR, "Domain %s: Local node %d has global "
1011                      "heartbeat enabled but joining node %d does not\n",
1012                      qr->qr_domain, dlm->node_num, qr->qr_node);
1013                 status = -EINVAL;
1014                 goto bail;
1015         }
1016
1017         r = remote;
1018         for (i = 0; i < qr->qr_numregions; ++i) {
1019                 mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, r);
1020                 r += O2HB_MAX_REGION_NAME_LEN;
1021         }
1022
1023         localnr = min(O2NM_MAX_REGIONS, locallen/O2HB_MAX_REGION_NAME_LEN);
1024         localnr = o2hb_get_all_regions(local, (u8)localnr);
1025
1026         /* compare local regions with remote */
1027         l = local;
1028         for (i = 0; i < localnr; ++i) {
1029                 foundit = 0;
1030                 r = remote;
1031                 for (j = 0; j <= qr->qr_numregions; ++j) {
1032                         if (!memcmp(l, r, O2HB_MAX_REGION_NAME_LEN)) {
1033                                 foundit = 1;
1034                                 break;
1035                         }
1036                         r += O2HB_MAX_REGION_NAME_LEN;
1037                 }
1038                 if (!foundit) {
1039                         status = -EINVAL;
1040                         mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
1041                              "in local node %d but not in joining node %d\n",
1042                              qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, l,
1043                              dlm->node_num, qr->qr_node);
1044                         goto bail;
1045                 }
1046                 l += O2HB_MAX_REGION_NAME_LEN;
1047         }
1048
1049         /* compare remote with local regions */
1050         r = remote;
1051         for (i = 0; i < qr->qr_numregions; ++i) {
1052                 foundit = 0;
1053                 l = local;
1054                 for (j = 0; j < localnr; ++j) {
1055                         if (!memcmp(r, l, O2HB_MAX_REGION_NAME_LEN)) {
1056                                 foundit = 1;
1057                                 break;
1058                         }
1059                         l += O2HB_MAX_REGION_NAME_LEN;
1060                 }
1061                 if (!foundit) {
1062                         status = -EINVAL;
1063                         mlog(ML_ERROR, "Domain %s: Region '%.*s' registered "
1064                              "in joining node %d but not in local node %d\n",
1065                              qr->qr_domain, O2HB_MAX_REGION_NAME_LEN, r,
1066                              qr->qr_node, dlm->node_num);
1067                         goto bail;
1068                 }
1069                 r += O2HB_MAX_REGION_NAME_LEN;
1070         }
1071
1072 bail:
1073         return status;
1074 }
1075
1076 static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
1077 {
1078         struct dlm_query_region *qr = NULL;
1079         int status, ret = 0, i;
1080         char *p;
1081
1082         if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
1083                 goto bail;
1084
1085         qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
1086         if (!qr) {
1087                 ret = -ENOMEM;
1088                 mlog_errno(ret);
1089                 goto bail;
1090         }
1091
1092         qr->qr_node = dlm->node_num;
1093         qr->qr_namelen = strlen(dlm->name);
1094         memcpy(qr->qr_domain, dlm->name, qr->qr_namelen);
1095         /* if local hb, the numregions will be zero */
1096         if (o2hb_global_heartbeat_active())
1097                 qr->qr_numregions = o2hb_get_all_regions(qr->qr_regions,
1098                                                          O2NM_MAX_REGIONS);
1099
1100         p = qr->qr_regions;
1101         for (i = 0; i < qr->qr_numregions; ++i, p += O2HB_MAX_REGION_NAME_LEN)
1102                 mlog(0, "Region %.*s\n", O2HB_MAX_REGION_NAME_LEN, p);
1103
1104         i = -1;
1105         while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
1106                                   i + 1)) < O2NM_MAX_NODES) {
1107                 if (i == dlm->node_num)
1108                         continue;
1109
1110                 mlog(0, "Sending regions to node %d\n", i);
1111
1112                 ret = o2net_send_message(DLM_QUERY_REGION, DLM_MOD_KEY, qr,
1113                                          sizeof(struct dlm_query_region),
1114                                          i, &status);
1115                 if (ret >= 0)
1116                         ret = status;
1117                 if (ret) {
1118                         mlog(ML_ERROR, "Region mismatch %d, node %d\n",
1119                              ret, i);
1120                         break;
1121                 }
1122         }
1123
1124 bail:
1125         kfree(qr);
1126         return ret;
1127 }
1128
1129 static int dlm_query_region_handler(struct o2net_msg *msg, u32 len,
1130                                     void *data, void **ret_data)
1131 {
1132         struct dlm_query_region *qr;
1133         struct dlm_ctxt *dlm = NULL;
1134         char *local = NULL;
1135         int status = 0;
1136         int locked = 0;
1137
1138         qr = (struct dlm_query_region *) msg->buf;
1139
1140         mlog(0, "Node %u queries hb regions on domain %s\n", qr->qr_node,
1141              qr->qr_domain);
1142
1143         /* buffer used in dlm_mast_regions() */
1144         local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL);
1145         if (!local) {
1146                 status = -ENOMEM;
1147                 goto bail;
1148         }
1149
1150         status = -EINVAL;
1151
1152         spin_lock(&dlm_domain_lock);
1153         dlm = __dlm_lookup_domain_full(qr->qr_domain, qr->qr_namelen);
1154         if (!dlm) {
1155                 mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
1156                      "before join domain\n", qr->qr_node, qr->qr_domain);
1157                 goto bail;
1158         }
1159
1160         spin_lock(&dlm->spinlock);
1161         locked = 1;
1162         if (dlm->joining_node != qr->qr_node) {
1163                 mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
1164                      "but joining node is %d\n", qr->qr_node, qr->qr_domain,
1165                      dlm->joining_node);
1166                 goto bail;
1167         }
1168
1169         /* Support for global heartbeat was added in 1.1 */
1170         if (dlm->dlm_locking_proto.pv_major == 1 &&
1171             dlm->dlm_locking_proto.pv_minor == 0) {
1172                 mlog(ML_ERROR, "Node %d queried hb regions on domain %s "
1173                      "but active dlm protocol is %d.%d\n", qr->qr_node,
1174                      qr->qr_domain, dlm->dlm_locking_proto.pv_major,
1175                      dlm->dlm_locking_proto.pv_minor);
1176                 goto bail;
1177         }
1178
1179         status = dlm_match_regions(dlm, qr, local, sizeof(qr->qr_regions));
1180
1181 bail:
1182         if (locked)
1183                 spin_unlock(&dlm->spinlock);
1184         spin_unlock(&dlm_domain_lock);
1185
1186         kfree(local);
1187
1188         return status;
1189 }
1190
1191 static int dlm_match_nodes(struct dlm_ctxt *dlm, struct dlm_query_nodeinfo *qn)
1192 {
1193         struct o2nm_node *local;
1194         struct dlm_node_info *remote;
1195         int i, j;
1196         int status = 0;
1197
1198         for (j = 0; j < qn->qn_numnodes; ++j)
1199                 mlog(0, "Node %3d, %pI4:%u\n", qn->qn_nodes[j].ni_nodenum,
1200                      &(qn->qn_nodes[j].ni_ipv4_address),
1201                      ntohs(qn->qn_nodes[j].ni_ipv4_port));
1202
1203         for (i = 0; i < O2NM_MAX_NODES && !status; ++i) {
1204                 local = o2nm_get_node_by_num(i);
1205                 remote = NULL;
1206                 for (j = 0; j < qn->qn_numnodes; ++j) {
1207                         if (qn->qn_nodes[j].ni_nodenum == i) {
1208                                 remote = &(qn->qn_nodes[j]);
1209                                 break;
1210                         }
1211                 }
1212
1213                 if (!local && !remote)
1214                         continue;
1215
1216                 if ((local && !remote) || (!local && remote))
1217                         status = -EINVAL;
1218
1219                 if (!status &&
1220                     ((remote->ni_nodenum != local->nd_num) ||
1221                      (remote->ni_ipv4_port != local->nd_ipv4_port) ||
1222                      (remote->ni_ipv4_address != local->nd_ipv4_address)))
1223                         status = -EINVAL;
1224
1225                 if (status) {
1226                         if (remote && !local)
1227                                 mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
1228                                      "registered in joining node %d but not in "
1229                                      "local node %d\n", qn->qn_domain,
1230                                      remote->ni_nodenum,
1231                                      &(remote->ni_ipv4_address),
1232                                      ntohs(remote->ni_ipv4_port),
1233                                      qn->qn_nodenum, dlm->node_num);
1234                         if (local && !remote)
1235                                 mlog(ML_ERROR, "Domain %s: Node %d (%pI4:%u) "
1236                                      "registered in local node %d but not in "
1237                                      "joining node %d\n", qn->qn_domain,
1238                                      local->nd_num, &(local->nd_ipv4_address),
1239                                      ntohs(local->nd_ipv4_port),
1240                                      dlm->node_num, qn->qn_nodenum);
1241                         BUG_ON((!local && !remote));
1242                 }
1243
1244                 if (local)
1245                         o2nm_node_put(local);
1246         }
1247
1248         return status;
1249 }
1250
1251 static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
1252 {
1253         struct dlm_query_nodeinfo *qn = NULL;
1254         struct o2nm_node *node;
1255         int ret = 0, status, count, i;
1256
1257         if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
1258                 goto bail;
1259
1260         qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);
1261         if (!qn) {
1262                 ret = -ENOMEM;
1263                 mlog_errno(ret);
1264                 goto bail;
1265         }
1266
1267         for (i = 0, count = 0; i < O2NM_MAX_NODES; ++i) {
1268                 node = o2nm_get_node_by_num(i);
1269                 if (!node)
1270                         continue;
1271                 qn->qn_nodes[count].ni_nodenum = node->nd_num;
1272                 qn->qn_nodes[count].ni_ipv4_port = node->nd_ipv4_port;
1273                 qn->qn_nodes[count].ni_ipv4_address = node->nd_ipv4_address;
1274                 mlog(0, "Node %3d, %pI4:%u\n", node->nd_num,
1275                      &(node->nd_ipv4_address), ntohs(node->nd_ipv4_port));
1276                 ++count;
1277                 o2nm_node_put(node);
1278         }
1279
1280         qn->qn_nodenum = dlm->node_num;
1281         qn->qn_numnodes = count;
1282         qn->qn_namelen = strlen(dlm->name);
1283         memcpy(qn->qn_domain, dlm->name, qn->qn_namelen);
1284
1285         i = -1;
1286         while ((i = find_next_bit(node_map, O2NM_MAX_NODES,
1287                                   i + 1)) < O2NM_MAX_NODES) {
1288                 if (i == dlm->node_num)
1289                         continue;
1290
1291                 mlog(0, "Sending nodeinfo to node %d\n", i);
1292
1293                 ret = o2net_send_message(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
1294                                          qn, sizeof(struct dlm_query_nodeinfo),
1295                                          i, &status);
1296                 if (ret >= 0)
1297                         ret = status;
1298                 if (ret) {
1299                         mlog(ML_ERROR, "node mismatch %d, node %d\n", ret, i);
1300                         break;
1301                 }
1302         }
1303
1304 bail:
1305         kfree(qn);
1306         return ret;
1307 }
1308
1309 static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
1310                                       void *data, void **ret_data)
1311 {
1312         struct dlm_query_nodeinfo *qn;
1313         struct dlm_ctxt *dlm = NULL;
1314         int locked = 0, status = -EINVAL;
1315
1316         qn = (struct dlm_query_nodeinfo *) msg->buf;
1317
1318         mlog(0, "Node %u queries nodes on domain %s\n", qn->qn_nodenum,
1319              qn->qn_domain);
1320
1321         spin_lock(&dlm_domain_lock);
1322         dlm = __dlm_lookup_domain_full(qn->qn_domain, qn->qn_namelen);
1323         if (!dlm) {
1324                 mlog(ML_ERROR, "Node %d queried nodes on domain %s before "
1325                      "join domain\n", qn->qn_nodenum, qn->qn_domain);
1326                 goto bail;
1327         }
1328
1329         spin_lock(&dlm->spinlock);
1330         locked = 1;
1331         if (dlm->joining_node != qn->qn_nodenum) {
1332                 mlog(ML_ERROR, "Node %d queried nodes on domain %s but "
1333                      "joining node is %d\n", qn->qn_nodenum, qn->qn_domain,
1334                      dlm->joining_node);
1335                 goto bail;
1336         }
1337
1338         /* Support for node query was added in 1.1 */
1339         if (dlm->dlm_locking_proto.pv_major == 1 &&
1340             dlm->dlm_locking_proto.pv_minor == 0) {
1341                 mlog(ML_ERROR, "Node %d queried nodes on domain %s "
1342                      "but active dlm protocol is %d.%d\n", qn->qn_nodenum,
1343                      qn->qn_domain, dlm->dlm_locking_proto.pv_major,
1344                      dlm->dlm_locking_proto.pv_minor);
1345                 goto bail;
1346         }
1347
1348         status = dlm_match_nodes(dlm, qn);
1349
1350 bail:
1351         if (locked)
1352                 spin_unlock(&dlm->spinlock);
1353         spin_unlock(&dlm_domain_lock);
1354
1355         return status;
1356 }
1357
1358 static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data,
1359                                    void **ret_data)
1360 {
1361         struct dlm_cancel_join *cancel;
1362         struct dlm_ctxt *dlm = NULL;
1363
1364         cancel = (struct dlm_cancel_join *) msg->buf;
1365
1366         mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx,
1367                   cancel->domain);
1368
1369         spin_lock(&dlm_domain_lock);
1370         dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len);
1371
1372         if (dlm) {
1373                 spin_lock(&dlm->spinlock);
1374
1375                 /* Yikes, this guy wants to cancel his join. No
1376                  * problem, we simply cleanup our join state. */
1377                 BUG_ON(dlm->joining_node != cancel->node_idx);
1378                 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1379
1380                 spin_unlock(&dlm->spinlock);
1381         }
1382         spin_unlock(&dlm_domain_lock);
1383
1384         return 0;
1385 }
1386
1387 static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm,
1388                                     unsigned int node)
1389 {
1390         int status;
1391         struct dlm_cancel_join cancel_msg;
1392
1393         memset(&cancel_msg, 0, sizeof(cancel_msg));
1394         cancel_msg.node_idx = dlm->node_num;
1395         cancel_msg.name_len = strlen(dlm->name);
1396         memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len);
1397
1398         status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
1399                                     &cancel_msg, sizeof(cancel_msg), node,
1400                                     NULL);
1401         if (status < 0) {
1402                 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
1403                      "node %u\n", status, DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
1404                      node);
1405                 goto bail;
1406         }
1407
1408 bail:
1409         return status;
1410 }
1411
1412 /* map_size should be in bytes. */
1413 static int dlm_send_join_cancels(struct dlm_ctxt *dlm,
1414                                  unsigned long *node_map,
1415                                  unsigned int map_size)
1416 {
1417         int status, tmpstat;
1418         unsigned int node;
1419
1420         if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) *
1421                          sizeof(unsigned long))) {
1422                 mlog(ML_ERROR,
1423                      "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n",
1424                      map_size, (unsigned)BITS_TO_LONGS(O2NM_MAX_NODES));
1425                 return -EINVAL;
1426         }
1427
1428         status = 0;
1429         node = -1;
1430         while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
1431                                      node + 1)) < O2NM_MAX_NODES) {
1432                 if (node == dlm->node_num)
1433                         continue;
1434
1435                 tmpstat = dlm_send_one_join_cancel(dlm, node);
1436                 if (tmpstat) {
1437                         mlog(ML_ERROR, "Error return %d cancelling join on "
1438                              "node %d\n", tmpstat, node);
1439                         if (!status)
1440                                 status = tmpstat;
1441                 }
1442         }
1443
1444         if (status)
1445                 mlog_errno(status);
1446         return status;
1447 }
1448
1449 static int dlm_request_join(struct dlm_ctxt *dlm,
1450                             int node,
1451                             enum dlm_query_join_response_code *response)
1452 {
1453         int status;
1454         struct dlm_query_join_request join_msg;
1455         struct dlm_query_join_packet packet;
1456         u32 join_resp;
1457
1458         mlog(0, "querying node %d\n", node);
1459
1460         memset(&join_msg, 0, sizeof(join_msg));
1461         join_msg.node_idx = dlm->node_num;
1462         join_msg.name_len = strlen(dlm->name);
1463         memcpy(join_msg.domain, dlm->name, join_msg.name_len);
1464         join_msg.dlm_proto = dlm->dlm_locking_proto;
1465         join_msg.fs_proto = dlm->fs_locking_proto;
1466
1467         /* copy live node map to join message */
1468         byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES);
1469
1470         status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg,
1471                                     sizeof(join_msg), node, &join_resp);
1472         if (status < 0 && status != -ENOPROTOOPT) {
1473                 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
1474                      "node %u\n", status, DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
1475                      node);
1476                 goto bail;
1477         }
1478         dlm_query_join_wire_to_packet(join_resp, &packet);
1479
1480         /* -ENOPROTOOPT from the net code means the other side isn't
1481             listening for our message type -- that's fine, it means
1482             his dlm isn't up, so we can consider him a 'yes' but not
1483             joined into the domain.  */
1484         if (status == -ENOPROTOOPT) {
1485                 status = 0;
1486                 *response = JOIN_OK_NO_MAP;
1487         } else if (packet.code == JOIN_DISALLOW ||
1488                    packet.code == JOIN_OK_NO_MAP) {
1489                 *response = packet.code;
1490         } else if (packet.code == JOIN_PROTOCOL_MISMATCH) {
1491                 mlog(ML_NOTICE,
1492                      "This node requested DLM locking protocol %u.%u and "
1493                      "filesystem locking protocol %u.%u.  At least one of "
1494                      "the protocol versions on node %d is not compatible, "
1495                      "disconnecting\n",
1496                      dlm->dlm_locking_proto.pv_major,
1497                      dlm->dlm_locking_proto.pv_minor,
1498                      dlm->fs_locking_proto.pv_major,
1499                      dlm->fs_locking_proto.pv_minor,
1500                      node);
1501                 status = -EPROTO;
1502                 *response = packet.code;
1503         } else if (packet.code == JOIN_OK) {
1504                 *response = packet.code;
1505                 /* Use the same locking protocol as the remote node */
1506                 dlm->dlm_locking_proto.pv_minor = packet.dlm_minor;
1507                 dlm->fs_locking_proto.pv_minor = packet.fs_minor;
1508                 mlog(0,
1509                      "Node %d responds JOIN_OK with DLM locking protocol "
1510                      "%u.%u and fs locking protocol %u.%u\n",
1511                      node,
1512                      dlm->dlm_locking_proto.pv_major,
1513                      dlm->dlm_locking_proto.pv_minor,
1514                      dlm->fs_locking_proto.pv_major,
1515                      dlm->fs_locking_proto.pv_minor);
1516         } else {
1517                 status = -EINVAL;
1518                 mlog(ML_ERROR, "invalid response %d from node %u\n",
1519                      packet.code, node);
1520         }
1521
1522         mlog(0, "status %d, node %d response is %d\n", status, node,
1523              *response);
1524
1525 bail:
1526         return status;
1527 }
1528
1529 static int dlm_send_one_join_assert(struct dlm_ctxt *dlm,
1530                                     unsigned int node)
1531 {
1532         int status;
1533         struct dlm_assert_joined assert_msg;
1534
1535         mlog(0, "Sending join assert to node %u\n", node);
1536
1537         memset(&assert_msg, 0, sizeof(assert_msg));
1538         assert_msg.node_idx = dlm->node_num;
1539         assert_msg.name_len = strlen(dlm->name);
1540         memcpy(assert_msg.domain, dlm->name, assert_msg.name_len);
1541
1542         status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1543                                     &assert_msg, sizeof(assert_msg), node,
1544                                     NULL);
1545         if (status < 0)
1546                 mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to "
1547                      "node %u\n", status, DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
1548                      node);
1549
1550         return status;
1551 }
1552
1553 static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
1554                                   unsigned long *node_map)
1555 {
1556         int status, node, live;
1557
1558         status = 0;
1559         node = -1;
1560         while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
1561                                      node + 1)) < O2NM_MAX_NODES) {
1562                 if (node == dlm->node_num)
1563                         continue;
1564
1565                 do {
1566                         /* It is very important that this message be
1567                          * received so we spin until either the node
1568                          * has died or it gets the message. */
1569                         status = dlm_send_one_join_assert(dlm, node);
1570
1571                         spin_lock(&dlm->spinlock);
1572                         live = test_bit(node, dlm->live_nodes_map);
1573                         spin_unlock(&dlm->spinlock);
1574
1575                         if (status) {
1576                                 mlog(ML_ERROR, "Error return %d asserting "
1577                                      "join on node %d\n", status, node);
1578
1579                                 /* give us some time between errors... */
1580                                 if (live)
1581                                         msleep(DLM_DOMAIN_BACKOFF_MS);
1582                         }
1583                 } while (status && live);
1584         }
1585 }
1586
1587 struct domain_join_ctxt {
1588         unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1589         unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
1590 };
1591
1592 static int dlm_should_restart_join(struct dlm_ctxt *dlm,
1593                                    struct domain_join_ctxt *ctxt,
1594                                    enum dlm_query_join_response_code response)
1595 {
1596         int ret;
1597
1598         if (response == JOIN_DISALLOW) {
1599                 mlog(0, "Latest response of disallow -- should restart\n");
1600                 return 1;
1601         }
1602
1603         spin_lock(&dlm->spinlock);
1604         /* For now, we restart the process if the node maps have
1605          * changed at all */
1606         ret = memcmp(ctxt->live_map, dlm->live_nodes_map,
1607                      sizeof(dlm->live_nodes_map));
1608         spin_unlock(&dlm->spinlock);
1609
1610         if (ret)
1611                 mlog(0, "Node maps changed -- should restart\n");
1612
1613         return ret;
1614 }
1615
1616 static int dlm_try_to_join_domain(struct dlm_ctxt *dlm)
1617 {
1618         int status = 0, tmpstat, node;
1619         struct domain_join_ctxt *ctxt;
1620         enum dlm_query_join_response_code response = JOIN_DISALLOW;
1621
1622         mlog(0, "%p", dlm);
1623
1624         ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1625         if (!ctxt) {
1626                 status = -ENOMEM;
1627                 mlog_errno(status);
1628                 goto bail;
1629         }
1630
1631         /* group sem locking should work for us here -- we're already
1632          * registered for heartbeat events so filling this should be
1633          * atomic wrt getting those handlers called. */
1634         o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map));
1635
1636         spin_lock(&dlm->spinlock);
1637         memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map));
1638
1639         __dlm_set_joining_node(dlm, dlm->node_num);
1640
1641         spin_unlock(&dlm->spinlock);
1642
1643         node = -1;
1644         while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES,
1645                                      node + 1)) < O2NM_MAX_NODES) {
1646                 if (node == dlm->node_num)
1647                         continue;
1648
1649                 status = dlm_request_join(dlm, node, &response);
1650                 if (status < 0) {
1651                         mlog_errno(status);
1652                         goto bail;
1653                 }
1654
1655                 /* Ok, either we got a response or the node doesn't have a
1656                  * dlm up. */
1657                 if (response == JOIN_OK)
1658                         set_bit(node, ctxt->yes_resp_map);
1659
1660                 if (dlm_should_restart_join(dlm, ctxt, response)) {
1661                         status = -EAGAIN;
1662                         goto bail;
1663                 }
1664         }
1665
1666         mlog(0, "Yay, done querying nodes!\n");
1667
1668         /* Yay, everyone agree's we can join the domain. My domain is
1669          * comprised of all nodes who were put in the
1670          * yes_resp_map. Copy that into our domain map and send a join
1671          * assert message to clean up everyone elses state. */
1672         spin_lock(&dlm->spinlock);
1673         memcpy(dlm->domain_map, ctxt->yes_resp_map,
1674                sizeof(ctxt->yes_resp_map));
1675         set_bit(dlm->node_num, dlm->domain_map);
1676         spin_unlock(&dlm->spinlock);
1677
1678         /* Support for global heartbeat and node info was added in 1.1 */
1679         if (dlm->dlm_locking_proto.pv_major > 1 ||
1680             dlm->dlm_locking_proto.pv_minor > 0) {
1681                 status = dlm_send_nodeinfo(dlm, ctxt->yes_resp_map);
1682                 if (status) {
1683                         mlog_errno(status);
1684                         goto bail;
1685                 }
1686                 status = dlm_send_regions(dlm, ctxt->yes_resp_map);
1687                 if (status) {
1688                         mlog_errno(status);
1689                         goto bail;
1690                 }
1691         }
1692
1693         dlm_send_join_asserts(dlm, ctxt->yes_resp_map);
1694
1695         /* Joined state *must* be set before the joining node
1696          * information, otherwise the query_join handler may read no
1697          * current joiner but a state of NEW and tell joining nodes
1698          * we're not in the domain. */
1699         spin_lock(&dlm_domain_lock);
1700         dlm->dlm_state = DLM_CTXT_JOINED;
1701         dlm->num_joins++;
1702         spin_unlock(&dlm_domain_lock);
1703
1704 bail:
1705         spin_lock(&dlm->spinlock);
1706         __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1707         if (!status) {
1708                 printk(KERN_NOTICE "o2dlm: Joining domain %s ", dlm->name);
1709                 __dlm_print_nodes(dlm);
1710         }
1711         spin_unlock(&dlm->spinlock);
1712
1713         if (ctxt) {
1714                 /* Do we need to send a cancel message to any nodes? */
1715                 if (status < 0) {
1716                         tmpstat = dlm_send_join_cancels(dlm,
1717                                                         ctxt->yes_resp_map,
1718                                                         sizeof(ctxt->yes_resp_map));
1719                         if (tmpstat < 0)
1720                                 mlog_errno(tmpstat);
1721                 }
1722                 kfree(ctxt);
1723         }
1724
1725         mlog(0, "returning %d\n", status);
1726         return status;
1727 }
1728
1729 static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm)
1730 {
1731         o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_up);
1732         o2hb_unregister_callback(dlm->name, &dlm->dlm_hb_down);
1733         o2net_unregister_handler_list(&dlm->dlm_domain_handlers);
1734 }
1735
1736 static int dlm_register_domain_handlers(struct dlm_ctxt *dlm)
1737 {
1738         int status;
1739
1740         mlog(0, "registering handlers.\n");
1741
1742         o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB,
1743                             dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI);
1744         status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_down);
1745         if (status)
1746                 goto bail;
1747
1748         o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB,
1749                             dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI);
1750         status = o2hb_register_callback(dlm->name, &dlm->dlm_hb_up);
1751         if (status)
1752                 goto bail;
1753
1754         status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key,
1755                                         sizeof(struct dlm_master_request),
1756                                         dlm_master_request_handler,
1757                                         dlm, NULL, &dlm->dlm_domain_handlers);
1758         if (status)
1759                 goto bail;
1760
1761         status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key,
1762                                         sizeof(struct dlm_assert_master),
1763                                         dlm_assert_master_handler,
1764                                         dlm, dlm_assert_master_post_handler,
1765                                         &dlm->dlm_domain_handlers);
1766         if (status)
1767                 goto bail;
1768
1769         status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key,
1770                                         sizeof(struct dlm_create_lock),
1771                                         dlm_create_lock_handler,
1772                                         dlm, NULL, &dlm->dlm_domain_handlers);
1773         if (status)
1774                 goto bail;
1775
1776         status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key,
1777                                         DLM_CONVERT_LOCK_MAX_LEN,
1778                                         dlm_convert_lock_handler,
1779                                         dlm, NULL, &dlm->dlm_domain_handlers);
1780         if (status)
1781                 goto bail;
1782
1783         status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key,
1784                                         DLM_UNLOCK_LOCK_MAX_LEN,
1785                                         dlm_unlock_lock_handler,
1786                                         dlm, NULL, &dlm->dlm_domain_handlers);
1787         if (status)
1788                 goto bail;
1789
1790         status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key,
1791                                         DLM_PROXY_AST_MAX_LEN,
1792                                         dlm_proxy_ast_handler,
1793                                         dlm, NULL, &dlm->dlm_domain_handlers);
1794         if (status)
1795                 goto bail;
1796
1797         status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key,
1798                                         sizeof(struct dlm_exit_domain),
1799                                         dlm_exit_domain_handler,
1800                                         dlm, NULL, &dlm->dlm_domain_handlers);
1801         if (status)
1802                 goto bail;
1803
1804         status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key,
1805                                         sizeof(struct dlm_deref_lockres),
1806                                         dlm_deref_lockres_handler,
1807                                         dlm, NULL, &dlm->dlm_domain_handlers);
1808         if (status)
1809                 goto bail;
1810
1811         status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key,
1812                                         sizeof(struct dlm_migrate_request),
1813                                         dlm_migrate_request_handler,
1814                                         dlm, NULL, &dlm->dlm_domain_handlers);
1815         if (status)
1816                 goto bail;
1817
1818         status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key,
1819                                         DLM_MIG_LOCKRES_MAX_LEN,
1820                                         dlm_mig_lockres_handler,
1821                                         dlm, NULL, &dlm->dlm_domain_handlers);
1822         if (status)
1823                 goto bail;
1824
1825         status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key,
1826                                         sizeof(struct dlm_master_requery),
1827                                         dlm_master_requery_handler,
1828                                         dlm, NULL, &dlm->dlm_domain_handlers);
1829         if (status)
1830                 goto bail;
1831
1832         status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key,
1833                                         sizeof(struct dlm_lock_request),
1834                                         dlm_request_all_locks_handler,
1835                                         dlm, NULL, &dlm->dlm_domain_handlers);
1836         if (status)
1837                 goto bail;
1838
1839         status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key,
1840                                         sizeof(struct dlm_reco_data_done),
1841                                         dlm_reco_data_done_handler,
1842                                         dlm, NULL, &dlm->dlm_domain_handlers);
1843         if (status)
1844                 goto bail;
1845
1846         status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key,
1847                                         sizeof(struct dlm_begin_reco),
1848                                         dlm_begin_reco_handler,
1849                                         dlm, NULL, &dlm->dlm_domain_handlers);
1850         if (status)
1851                 goto bail;
1852
1853         status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key,
1854                                         sizeof(struct dlm_finalize_reco),
1855                                         dlm_finalize_reco_handler,
1856                                         dlm, NULL, &dlm->dlm_domain_handlers);
1857         if (status)
1858                 goto bail;
1859
1860         status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key,
1861                                         sizeof(struct dlm_exit_domain),
1862                                         dlm_begin_exit_domain_handler,
1863                                         dlm, NULL, &dlm->dlm_domain_handlers);
1864         if (status)
1865                 goto bail;
1866
1867 bail:
1868         if (status)
1869                 dlm_unregister_domain_handlers(dlm);
1870
1871         return status;
1872 }
1873
1874 static int dlm_join_domain(struct dlm_ctxt *dlm)
1875 {
1876         int status;
1877         unsigned int backoff;
1878         unsigned int total_backoff = 0;
1879
1880         BUG_ON(!dlm);
1881
1882         mlog(0, "Join domain %s\n", dlm->name);
1883
1884         status = dlm_register_domain_handlers(dlm);
1885         if (status) {
1886                 mlog_errno(status);
1887                 goto bail;
1888         }
1889
1890         status = dlm_debug_init(dlm);
1891         if (status < 0) {
1892                 mlog_errno(status);
1893                 goto bail;
1894         }
1895
1896         status = dlm_launch_thread(dlm);
1897         if (status < 0) {
1898                 mlog_errno(status);
1899                 goto bail;
1900         }
1901
1902         status = dlm_launch_recovery_thread(dlm);
1903         if (status < 0) {
1904                 mlog_errno(status);
1905                 goto bail;
1906         }
1907
1908         dlm->dlm_worker = create_singlethread_workqueue("dlm_wq");
1909         if (!dlm->dlm_worker) {
1910                 status = -ENOMEM;
1911                 mlog_errno(status);
1912                 goto bail;
1913         }
1914
1915         do {
1916                 status = dlm_try_to_join_domain(dlm);
1917
1918                 /* If we're racing another node to the join, then we
1919                  * need to back off temporarily and let them
1920                  * complete. */
1921 #define DLM_JOIN_TIMEOUT_MSECS  90000
1922                 if (status == -EAGAIN) {
1923                         if (signal_pending(current)) {
1924                                 status = -ERESTARTSYS;
1925                                 goto bail;
1926                         }
1927
1928                         if (total_backoff >
1929                             msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) {
1930                                 status = -ERESTARTSYS;
1931                                 mlog(ML_NOTICE, "Timed out joining dlm domain "
1932                                      "%s after %u msecs\n", dlm->name,
1933                                      jiffies_to_msecs(total_backoff));
1934                                 goto bail;
1935                         }
1936
1937                         /*
1938                          * <chip> After you!
1939                          * <dale> No, after you!
1940                          * <chip> I insist!
1941                          * <dale> But you first!
1942                          * ...
1943                          */
1944                         backoff = (unsigned int)(jiffies & 0x3);
1945                         backoff *= DLM_DOMAIN_BACKOFF_MS;
1946                         total_backoff += backoff;
1947                         mlog(0, "backoff %d\n", backoff);
1948                         msleep(backoff);
1949                 }
1950         } while (status == -EAGAIN);
1951
1952         if (status < 0) {
1953                 mlog_errno(status);
1954                 goto bail;
1955         }
1956
1957         status = 0;
1958 bail:
1959         wake_up(&dlm_domain_events);
1960
1961         if (status) {
1962                 dlm_unregister_domain_handlers(dlm);
1963                 dlm_debug_shutdown(dlm);
1964                 dlm_complete_thread(dlm);
1965                 dlm_complete_recovery_thread(dlm);
1966                 dlm_destroy_dlm_worker(dlm);
1967         }
1968
1969         return status;
1970 }
1971
1972 static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain,
1973                                 u32 key)
1974 {
1975         int i;
1976         int ret;
1977         struct dlm_ctxt *dlm = NULL;
1978
1979         dlm = kzalloc(sizeof(*dlm), GFP_KERNEL);
1980         if (!dlm) {
1981                 mlog_errno(-ENOMEM);
1982                 goto leave;
1983         }
1984
1985         dlm->name = kstrdup(domain, GFP_KERNEL);
1986         if (dlm->name == NULL) {
1987                 mlog_errno(-ENOMEM);
1988                 kfree(dlm);
1989                 dlm = NULL;
1990                 goto leave;
1991         }
1992
1993         dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES);
1994         if (!dlm->lockres_hash) {
1995                 mlog_errno(-ENOMEM);
1996                 kfree(dlm->name);
1997                 kfree(dlm);
1998                 dlm = NULL;
1999                 goto leave;
2000         }
2001
2002         for (i = 0; i < DLM_HASH_BUCKETS; i++)
2003                 INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i));
2004
2005         dlm->master_hash = (struct hlist_head **)
2006                                 dlm_alloc_pagevec(DLM_HASH_PAGES);
2007         if (!dlm->master_hash) {
2008                 mlog_errno(-ENOMEM);
2009                 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
2010                 kfree(dlm->name);
2011                 kfree(dlm);
2012                 dlm = NULL;
2013                 goto leave;
2014         }
2015
2016         for (i = 0; i < DLM_HASH_BUCKETS; i++)
2017                 INIT_HLIST_HEAD(dlm_master_hash(dlm, i));
2018
2019         dlm->key = key;
2020         dlm->node_num = o2nm_this_node();
2021
2022         ret = dlm_create_debugfs_subroot(dlm);
2023         if (ret < 0) {
2024                 dlm_free_pagevec((void **)dlm->master_hash, DLM_HASH_PAGES);
2025                 dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES);
2026                 kfree(dlm->name);
2027                 kfree(dlm);
2028                 dlm = NULL;
2029                 goto leave;
2030         }
2031
2032         spin_lock_init(&dlm->spinlock);
2033         spin_lock_init(&dlm->master_lock);
2034         spin_lock_init(&dlm->ast_lock);
2035         spin_lock_init(&dlm->track_lock);
2036         INIT_LIST_HEAD(&dlm->list);
2037         INIT_LIST_HEAD(&dlm->dirty_list);
2038         INIT_LIST_HEAD(&dlm->reco.resources);
2039         INIT_LIST_HEAD(&dlm->reco.received);
2040         INIT_LIST_HEAD(&dlm->reco.node_data);
2041         INIT_LIST_HEAD(&dlm->purge_list);
2042         INIT_LIST_HEAD(&dlm->dlm_domain_handlers);
2043         INIT_LIST_HEAD(&dlm->tracking_list);
2044         dlm->reco.state = 0;
2045
2046         INIT_LIST_HEAD(&dlm->pending_asts);
2047         INIT_LIST_HEAD(&dlm->pending_basts);
2048
2049         mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n",
2050                   dlm->recovery_map, &(dlm->recovery_map[0]));
2051
2052         memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map));
2053         memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map));
2054         memset(dlm->domain_map, 0, sizeof(dlm->domain_map));
2055
2056         dlm->dlm_thread_task = NULL;
2057         dlm->dlm_reco_thread_task = NULL;
2058         dlm->dlm_worker = NULL;
2059         init_waitqueue_head(&dlm->dlm_thread_wq);
2060         init_waitqueue_head(&dlm->dlm_reco_thread_wq);
2061         init_waitqueue_head(&dlm->reco.event);
2062         init_waitqueue_head(&dlm->ast_wq);
2063         init_waitqueue_head(&dlm->migration_wq);
2064         INIT_LIST_HEAD(&dlm->mle_hb_events);
2065
2066         dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN;
2067         init_waitqueue_head(&dlm->dlm_join_events);
2068
2069         dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
2070         dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
2071
2072         atomic_set(&dlm->res_tot_count, 0);
2073         atomic_set(&dlm->res_cur_count, 0);
2074         for (i = 0; i < DLM_MLE_NUM_TYPES; ++i) {
2075                 atomic_set(&dlm->mle_tot_count[i], 0);
2076                 atomic_set(&dlm->mle_cur_count[i], 0);
2077         }
2078
2079         spin_lock_init(&dlm->work_lock);
2080         INIT_LIST_HEAD(&dlm->work_list);
2081         INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work);
2082
2083         kref_init(&dlm->dlm_refs);
2084         dlm->dlm_state = DLM_CTXT_NEW;
2085
2086         INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks);
2087
2088         mlog(0, "context init: refcount %u\n",
2089                   atomic_read(&dlm->dlm_refs.refcount));
2090
2091 leave:
2092         return dlm;
2093 }
2094
2095 /*
2096  * Compare a requested locking protocol version against the current one.
2097  *
2098  * If the major numbers are different, they are incompatible.
2099  * If the current minor is greater than the request, they are incompatible.
2100  * If the current minor is less than or equal to the request, they are
2101  * compatible, and the requester should run at the current minor version.
2102  */
2103 static int dlm_protocol_compare(struct dlm_protocol_version *existing,
2104                                 struct dlm_protocol_version *request)
2105 {
2106         if (existing->pv_major != request->pv_major)
2107                 return 1;
2108
2109         if (existing->pv_minor > request->pv_minor)
2110                 return 1;
2111
2112         if (existing->pv_minor < request->pv_minor)
2113                 request->pv_minor = existing->pv_minor;
2114
2115         return 0;
2116 }
2117
2118 /*
2119  * dlm_register_domain: one-time setup per "domain".
2120  *
2121  * The filesystem passes in the requested locking version via proto.
2122  * If registration was successful, proto will contain the negotiated
2123  * locking protocol.
2124  */
2125 struct dlm_ctxt * dlm_register_domain(const char *domain,
2126                                u32 key,
2127                                struct dlm_protocol_version *fs_proto)
2128 {
2129         int ret;
2130         struct dlm_ctxt *dlm = NULL;
2131         struct dlm_ctxt *new_ctxt = NULL;
2132
2133         if (strlen(domain) >= O2NM_MAX_NAME_LEN) {
2134                 ret = -ENAMETOOLONG;
2135                 mlog(ML_ERROR, "domain name length too long\n");
2136                 goto leave;
2137         }
2138
2139         mlog(0, "register called for domain \"%s\"\n", domain);
2140
2141 retry:
2142         dlm = NULL;
2143         if (signal_pending(current)) {
2144                 ret = -ERESTARTSYS;
2145                 mlog_errno(ret);
2146                 goto leave;
2147         }
2148
2149         spin_lock(&dlm_domain_lock);
2150
2151         dlm = __dlm_lookup_domain(domain);
2152         if (dlm) {
2153                 if (dlm->dlm_state != DLM_CTXT_JOINED) {
2154                         spin_unlock(&dlm_domain_lock);
2155
2156                         mlog(0, "This ctxt is not joined yet!\n");
2157                         wait_event_interruptible(dlm_domain_events,
2158                                                  dlm_wait_on_domain_helper(
2159                                                          domain));
2160                         goto retry;
2161                 }
2162
2163                 if (dlm_protocol_compare(&dlm->fs_locking_proto, fs_proto)) {
2164                         spin_unlock(&dlm_domain_lock);
2165                         mlog(ML_ERROR,
2166                              "Requested locking protocol version is not "
2167                              "compatible with already registered domain "
2168                              "\"%s\"\n", domain);
2169                         ret = -EPROTO;
2170                         goto leave;
2171                 }
2172
2173                 __dlm_get(dlm);
2174                 dlm->num_joins++;
2175
2176                 spin_unlock(&dlm_domain_lock);
2177
2178                 ret = 0;
2179                 goto leave;
2180         }
2181
2182         /* doesn't exist */
2183         if (!new_ctxt) {
2184                 spin_unlock(&dlm_domain_lock);
2185
2186                 new_ctxt = dlm_alloc_ctxt(domain, key);
2187                 if (new_ctxt)
2188                         goto retry;
2189
2190                 ret = -ENOMEM;
2191                 mlog_errno(ret);
2192                 goto leave;
2193         }
2194
2195         /* a little variable switch-a-roo here... */
2196         dlm = new_ctxt;
2197         new_ctxt = NULL;
2198
2199         /* add the new domain */
2200         list_add_tail(&dlm->list, &dlm_domains);
2201         spin_unlock(&dlm_domain_lock);
2202
2203         /*
2204          * Pass the locking protocol version into the join.  If the join
2205          * succeeds, it will have the negotiated protocol set.
2206          */
2207         dlm->dlm_locking_proto = dlm_protocol;
2208         dlm->fs_locking_proto = *fs_proto;
2209
2210         ret = dlm_join_domain(dlm);
2211         if (ret) {
2212                 mlog_errno(ret);
2213                 dlm_put(dlm);
2214                 goto leave;
2215         }
2216
2217         /* Tell the caller what locking protocol we negotiated */
2218         *fs_proto = dlm->fs_locking_proto;
2219
2220         ret = 0;
2221 leave:
2222         if (new_ctxt)
2223                 dlm_free_ctxt_mem(new_ctxt);
2224
2225         if (ret < 0)
2226                 dlm = ERR_PTR(ret);
2227
2228         return dlm;
2229 }
2230 EXPORT_SYMBOL_GPL(dlm_register_domain);
2231
2232 static LIST_HEAD(dlm_join_handlers);
2233
2234 static void dlm_unregister_net_handlers(void)
2235 {
2236         o2net_unregister_handler_list(&dlm_join_handlers);
2237 }
2238
2239 static int dlm_register_net_handlers(void)
2240 {
2241         int status = 0;
2242
2243         status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY,
2244                                         sizeof(struct dlm_query_join_request),
2245                                         dlm_query_join_handler,
2246                                         NULL, NULL, &dlm_join_handlers);
2247         if (status)
2248                 goto bail;
2249
2250         status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY,
2251                                         sizeof(struct dlm_assert_joined),
2252                                         dlm_assert_joined_handler,
2253                                         NULL, NULL, &dlm_join_handlers);
2254         if (status)
2255                 goto bail;
2256
2257         status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY,
2258                                         sizeof(struct dlm_cancel_join),
2259                                         dlm_cancel_join_handler,
2260                                         NULL, NULL, &dlm_join_handlers);
2261         if (status)
2262                 goto bail;
2263
2264         status = o2net_register_handler(DLM_QUERY_REGION, DLM_MOD_KEY,
2265                                         sizeof(struct dlm_query_region),
2266                                         dlm_query_region_handler,
2267                                         NULL, NULL, &dlm_join_handlers);
2268
2269         if (status)
2270                 goto bail;
2271
2272         status = o2net_register_handler(DLM_QUERY_NODEINFO, DLM_MOD_KEY,
2273                                         sizeof(struct dlm_query_nodeinfo),
2274                                         dlm_query_nodeinfo_handler,
2275                                         NULL, NULL, &dlm_join_handlers);
2276 bail:
2277         if (status < 0)
2278                 dlm_unregister_net_handlers();
2279
2280         return status;
2281 }
2282
2283 /* Domain eviction callback handling.
2284  *
2285  * The file system requires notification of node death *before* the
2286  * dlm completes it's recovery work, otherwise it may be able to
2287  * acquire locks on resources requiring recovery. Since the dlm can
2288  * evict a node from it's domain *before* heartbeat fires, a similar
2289  * mechanism is required. */
2290
2291 /* Eviction is not expected to happen often, so a per-domain lock is
2292  * not necessary. Eviction callbacks are allowed to sleep for short
2293  * periods of time. */
2294 static DECLARE_RWSEM(dlm_callback_sem);
2295
2296 void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm,
2297                                         int node_num)
2298 {
2299         struct list_head *iter;
2300         struct dlm_eviction_cb *cb;
2301
2302         down_read(&dlm_callback_sem);
2303         list_for_each(iter, &dlm->dlm_eviction_callbacks) {
2304                 cb = list_entry(iter, struct dlm_eviction_cb, ec_item);
2305
2306                 cb->ec_func(node_num, cb->ec_data);
2307         }
2308         up_read(&dlm_callback_sem);
2309 }
2310
2311 void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb,
2312                            dlm_eviction_func *f,
2313                            void *data)
2314 {
2315         INIT_LIST_HEAD(&cb->ec_item);
2316         cb->ec_func = f;
2317         cb->ec_data = data;
2318 }
2319 EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb);
2320
2321 void dlm_register_eviction_cb(struct dlm_ctxt *dlm,
2322                               struct dlm_eviction_cb *cb)
2323 {
2324         down_write(&dlm_callback_sem);
2325         list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks);
2326         up_write(&dlm_callback_sem);
2327 }
2328 EXPORT_SYMBOL_GPL(dlm_register_eviction_cb);
2329
2330 void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb)
2331 {
2332         down_write(&dlm_callback_sem);
2333         list_del_init(&cb->ec_item);
2334         up_write(&dlm_callback_sem);
2335 }
2336 EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb);
2337
2338 static int __init dlm_init(void)
2339 {
2340         int status;
2341
2342         dlm_print_version();
2343
2344         status = dlm_init_mle_cache();
2345         if (status) {
2346                 mlog(ML_ERROR, "Could not create o2dlm_mle slabcache\n");
2347                 goto error;
2348         }
2349
2350         status = dlm_init_master_caches();
2351         if (status) {
2352                 mlog(ML_ERROR, "Could not create o2dlm_lockres and "
2353                      "o2dlm_lockname slabcaches\n");
2354                 goto error;
2355         }
2356
2357         status = dlm_init_lock_cache();
2358         if (status) {
2359                 mlog(ML_ERROR, "Count not create o2dlm_lock slabcache\n");
2360                 goto error;
2361         }
2362
2363         status = dlm_register_net_handlers();
2364         if (status) {
2365                 mlog(ML_ERROR, "Unable to register network handlers\n");
2366                 goto error;
2367         }
2368
2369         status = dlm_create_debugfs_root();
2370         if (status)
2371                 goto error;
2372
2373         return 0;
2374 error:
2375         dlm_unregister_net_handlers();
2376         dlm_destroy_lock_cache();
2377         dlm_destroy_master_caches();
2378         dlm_destroy_mle_cache();
2379         return -1;
2380 }
2381
2382 static void __exit dlm_exit (void)
2383 {
2384         dlm_destroy_debugfs_root();
2385         dlm_unregister_net_handlers();
2386         dlm_destroy_lock_cache();
2387         dlm_destroy_master_caches();
2388         dlm_destroy_mle_cache();
2389 }
2390
2391 MODULE_AUTHOR("Oracle");
2392 MODULE_LICENSE("GPL");
2393
2394 module_init(dlm_init);
2395 module_exit(dlm_exit);