1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
14 #include "dlm_internal.h"
15 #include "lockspace.h"
27 static void put_free_de(struct dlm_ls *ls, struct dlm_direntry *de)
29 spin_lock(&ls->ls_recover_list_lock);
30 list_add(&de->list, &ls->ls_recover_list);
31 spin_unlock(&ls->ls_recover_list_lock);
34 static struct dlm_direntry *get_free_de(struct dlm_ls *ls, int len)
37 struct dlm_direntry *de;
39 spin_lock(&ls->ls_recover_list_lock);
40 list_for_each_entry(de, &ls->ls_recover_list, list) {
41 if (de->length == len) {
43 de->master_nodeid = 0;
44 memset(de->name, 0, len);
49 spin_unlock(&ls->ls_recover_list_lock);
52 de = kzalloc(sizeof(struct dlm_direntry) + len, GFP_NOFS);
56 void dlm_clear_free_entries(struct dlm_ls *ls)
58 struct dlm_direntry *de;
60 spin_lock(&ls->ls_recover_list_lock);
61 while (!list_empty(&ls->ls_recover_list)) {
62 de = list_entry(ls->ls_recover_list.next, struct dlm_direntry,
67 spin_unlock(&ls->ls_recover_list_lock);
71 * We use the upper 16 bits of the hash value to select the directory node.
72 * Low bits are used for distribution of rsb's among hash buckets on each node.
74 * To give the exact range wanted (0 to num_nodes-1), we apply a modulus of
75 * num_nodes to the hash value. This value in the desired range is used as an
76 * offset into the sorted list of nodeid's to give the particular nodeid.
79 int dlm_hash2nodeid(struct dlm_ls *ls, uint32_t hash)
81 struct list_head *tmp;
82 struct dlm_member *memb = NULL;
86 if (ls->ls_num_nodes == 1) {
87 nodeid = dlm_our_nodeid();
91 if (ls->ls_node_array) {
92 node = (hash >> 16) % ls->ls_total_weight;
93 nodeid = ls->ls_node_array[node];
97 /* make_member_array() failed to kmalloc ls_node_array... */
99 node = (hash >> 16) % ls->ls_num_nodes;
101 list_for_each(tmp, &ls->ls_nodes) {
104 memb = list_entry(tmp, struct dlm_member, list);
108 DLM_ASSERT(memb , printk("num_nodes=%u n=%u node=%u\n",
109 ls->ls_num_nodes, n, node););
110 nodeid = memb->nodeid;
115 int dlm_dir_nodeid(struct dlm_rsb *r)
117 return dlm_hash2nodeid(r->res_ls, r->res_hash);
120 static inline uint32_t dir_hash(struct dlm_ls *ls, char *name, int len)
124 val = jhash(name, len, 0);
125 val &= (ls->ls_dirtbl_size - 1);
130 static void add_entry_to_hash(struct dlm_ls *ls, struct dlm_direntry *de)
134 bucket = dir_hash(ls, de->name, de->length);
135 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
138 static struct dlm_direntry *search_bucket(struct dlm_ls *ls, char *name,
139 int namelen, uint32_t bucket)
141 struct dlm_direntry *de;
143 list_for_each_entry(de, &ls->ls_dirtbl[bucket].list, list) {
144 if (de->length == namelen && !memcmp(name, de->name, namelen))
152 void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen)
154 struct dlm_direntry *de;
157 bucket = dir_hash(ls, name, namelen);
159 spin_lock(&ls->ls_dirtbl[bucket].lock);
161 de = search_bucket(ls, name, namelen, bucket);
164 log_error(ls, "remove fr %u none", nodeid);
168 if (de->master_nodeid != nodeid) {
169 log_error(ls, "remove fr %u ID %u", nodeid, de->master_nodeid);
176 spin_unlock(&ls->ls_dirtbl[bucket].lock);
179 void dlm_dir_clear(struct dlm_ls *ls)
181 struct list_head *head;
182 struct dlm_direntry *de;
185 DLM_ASSERT(list_empty(&ls->ls_recover_list), );
187 for (i = 0; i < ls->ls_dirtbl_size; i++) {
188 spin_lock(&ls->ls_dirtbl[i].lock);
189 head = &ls->ls_dirtbl[i].list;
190 while (!list_empty(head)) {
191 de = list_entry(head->next, struct dlm_direntry, list);
195 spin_unlock(&ls->ls_dirtbl[i].lock);
199 int dlm_recover_directory(struct dlm_ls *ls)
201 struct dlm_member *memb;
202 struct dlm_direntry *de;
203 char *b, *last_name = NULL;
204 int error = -ENOMEM, last_len, count = 0;
207 log_debug(ls, "dlm_recover_directory");
209 if (dlm_no_directory(ls))
214 last_name = kmalloc(DLM_RESNAME_MAXLEN, GFP_NOFS);
218 list_for_each_entry(memb, &ls->ls_nodes, list) {
219 memset(last_name, 0, DLM_RESNAME_MAXLEN);
224 error = dlm_recovery_stopped(ls);
228 error = dlm_rcom_names(ls, memb->nodeid,
229 last_name, last_len);
236 * pick namelen/name pairs out of received buffer
239 b = ls->ls_recover_buf->rc_buf;
240 left = ls->ls_recover_buf->rc_header.h_length;
241 left -= sizeof(struct dlm_rcom);
247 if (left < sizeof(__be16))
250 memcpy(&v, b, sizeof(__be16));
251 namelen = be16_to_cpu(v);
253 left -= sizeof(__be16);
255 /* namelen of 0xFFFFF marks end of names for
256 this node; namelen of 0 marks end of the
259 if (namelen == 0xFFFF)
267 if (namelen > DLM_RESNAME_MAXLEN)
271 de = get_free_de(ls, namelen);
275 de->master_nodeid = memb->nodeid;
276 de->length = namelen;
278 memcpy(de->name, b, namelen);
279 memcpy(last_name, b, namelen);
283 add_entry_to_hash(ls, de);
293 log_debug(ls, "dlm_recover_directory %d entries", count);
297 dlm_clear_free_entries(ls);
301 static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
302 int namelen, int *r_nodeid)
304 struct dlm_direntry *de, *tmp;
307 bucket = dir_hash(ls, name, namelen);
309 spin_lock(&ls->ls_dirtbl[bucket].lock);
310 de = search_bucket(ls, name, namelen, bucket);
312 *r_nodeid = de->master_nodeid;
313 spin_unlock(&ls->ls_dirtbl[bucket].lock);
314 if (*r_nodeid == nodeid)
319 spin_unlock(&ls->ls_dirtbl[bucket].lock);
321 if (namelen > DLM_RESNAME_MAXLEN)
324 de = kzalloc(sizeof(struct dlm_direntry) + namelen, GFP_NOFS);
328 de->master_nodeid = nodeid;
329 de->length = namelen;
330 memcpy(de->name, name, namelen);
332 spin_lock(&ls->ls_dirtbl[bucket].lock);
333 tmp = search_bucket(ls, name, namelen, bucket);
338 list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
340 *r_nodeid = de->master_nodeid;
341 spin_unlock(&ls->ls_dirtbl[bucket].lock);
345 int dlm_dir_lookup(struct dlm_ls *ls, int nodeid, char *name, int namelen,
348 return get_entry(ls, nodeid, name, namelen, r_nodeid);
351 static struct dlm_rsb *find_rsb_root(struct dlm_ls *ls, char *name, int len)
354 uint32_t hash, bucket;
357 hash = jhash(name, len, 0);
358 bucket = hash & (ls->ls_rsbtbl_size - 1);
360 spin_lock(&ls->ls_rsbtbl[bucket].lock);
361 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].keep, name, len, 0, &r);
363 rv = dlm_search_rsb_tree(&ls->ls_rsbtbl[bucket].toss,
365 spin_unlock(&ls->ls_rsbtbl[bucket].lock);
370 down_read(&ls->ls_root_sem);
371 list_for_each_entry(r, &ls->ls_root_list, res_root_list) {
372 if (len == r->res_length && !memcmp(name, r->res_name, len)) {
373 up_read(&ls->ls_root_sem);
374 log_error(ls, "find_rsb_root revert to root_list %s",
379 up_read(&ls->ls_root_sem);
383 /* Find the rsb where we left off (or start again), then send rsb names
384 for rsb's we're master of and whose directory node matches the requesting
385 node. inbuf is the rsb name last sent, inlen is the name's length */
387 void dlm_copy_master_names(struct dlm_ls *ls, char *inbuf, int inlen,
388 char *outbuf, int outlen, int nodeid)
390 struct list_head *list;
392 int offset = 0, dir_nodeid;
395 down_read(&ls->ls_root_sem);
398 r = find_rsb_root(ls, inbuf, inlen);
400 inbuf[inlen - 1] = '\0';
401 log_error(ls, "copy_master_names from %d start %d %s",
402 nodeid, inlen, inbuf);
405 list = r->res_root_list.next;
407 list = ls->ls_root_list.next;
410 for (offset = 0; list != &ls->ls_root_list; list = list->next) {
411 r = list_entry(list, struct dlm_rsb, res_root_list);
415 dir_nodeid = dlm_dir_nodeid(r);
416 if (dir_nodeid != nodeid)
420 * The block ends when we can't fit the following in the
421 * remaining buffer space:
422 * namelen (uint16_t) +
423 * name (r->res_length) +
424 * end-of-block record 0x0000 (uint16_t)
427 if (offset + sizeof(uint16_t)*2 + r->res_length > outlen) {
428 /* Write end-of-block record */
429 be_namelen = cpu_to_be16(0);
430 memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
431 offset += sizeof(__be16);
435 be_namelen = cpu_to_be16(r->res_length);
436 memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
437 offset += sizeof(__be16);
438 memcpy(outbuf + offset, r->res_name, r->res_length);
439 offset += r->res_length;
443 * If we've reached the end of the list (and there's room) write a
444 * terminating record.
447 if ((list == &ls->ls_root_list) &&
448 (offset + sizeof(uint16_t) <= outlen)) {
449 be_namelen = cpu_to_be16(0xFFFF);
450 memcpy(outbuf + offset, &be_namelen, sizeof(__be16));
451 offset += sizeof(__be16);
455 up_read(&ls->ls_root_sem);