1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/krb5/ccache/ccbase.c - Registration functions for ccache */
4 * Copyright 1990,2004,2008 by the Massachusetts Institute of Technology.
7 * Export of this software from the United States of America may
8 * require a specific license from the United States Government.
9 * It is the responsibility of any person or organization contemplating
10 * export to obtain such a license before exporting.
12 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
13 * distribute this software and its documentation for any purpose and
14 * without fee is hereby granted, provided that the above copyright
15 * notice appear in all copies and that both that copyright notice and
16 * this permission notice appear in supporting documentation, and that
17 * the name of M.I.T. not be used in advertising or publicity pertaining
18 * to distribution of the software without specific, written prior
19 * permission. Furthermore if you modify this software you must label
20 * your software as modified software and not distribute it in such a
21 * fashion that it might be confused with the original M.I.T. software.
22 * M.I.T. makes no representations about the suitability of
23 * this software for any purpose. It is provided "as is" without express
24 * or implied warranty.
28 #include "k5-thread.h"
33 struct krb5_cc_typelist {
34 const krb5_cc_ops *ops;
35 struct krb5_cc_typelist *next;
38 struct krb5_cc_typecursor {
39 struct krb5_cc_typelist *tptr;
41 /* typedef krb5_cc_typecursor in k5-int.h */
43 extern const krb5_cc_ops krb5_mcc_ops;
48 extern const krb5_cc_ops krb5_lcc_ops;
49 static struct krb5_cc_typelist cc_lcc_entry = { &krb5_lcc_ops, NEXT };
51 #define NEXT &cc_lcc_entry
55 extern const krb5_cc_ops krb5_cc_stdcc_ops;
56 static struct krb5_cc_typelist cc_stdcc_entry = { &krb5_cc_stdcc_ops, NEXT };
58 #define NEXT &cc_stdcc_entry
61 static struct krb5_cc_typelist cc_mcc_entry = { &krb5_mcc_ops, NEXT };
63 #define NEXT &cc_mcc_entry
65 #ifndef NO_FILE_CCACHE
66 static struct krb5_cc_typelist cc_fcc_entry = { &krb5_cc_file_ops, NEXT };
68 #define NEXT &cc_fcc_entry
71 #ifdef USE_KEYRING_CCACHE
72 extern const krb5_cc_ops krb5_krcc_ops;
73 static struct krb5_cc_typelist cc_krcc_entry = { &krb5_krcc_ops, NEXT };
75 #define NEXT &cc_krcc_entry
76 #endif /* USE_KEYRING_CCACHE */
79 extern const krb5_cc_ops krb5_dcc_ops;
80 static struct krb5_cc_typelist cc_dcc_entry = { &krb5_dcc_ops, NEXT };
82 #define NEXT &cc_dcc_entry
84 extern const krb5_cc_ops krb5_kcm_ops;
85 static struct krb5_cc_typelist cc_kcm_entry = { &krb5_kcm_ops, NEXT };
87 #define NEXT &cc_kcm_entry
88 #endif /* not _WIN32 */
91 #define INITIAL_TYPEHEAD (NEXT)
92 static struct krb5_cc_typelist *cc_typehead = INITIAL_TYPEHEAD;
93 static k5_mutex_t cc_typelist_lock = K5_MUTEX_PARTIAL_INITIALIZER;
95 /* mutex for krb5_cccol_[un]lock */
96 static k5_cc_mutex cccol_lock = K5_CC_MUTEX_PARTIAL_INITIALIZER;
98 static krb5_error_code
99 krb5int_cc_getops(krb5_context, const char *, const krb5_cc_ops **);
102 krb5int_cc_initialize(void)
106 err = k5_cc_mutex_finish_init(&cccol_lock);
109 err = k5_cc_mutex_finish_init(&krb5int_mcc_mutex);
112 err = k5_mutex_finish_init(&cc_typelist_lock);
115 #ifndef NO_FILE_CCACHE
116 err = k5_cc_mutex_finish_init(&krb5int_cc_file_mutex);
120 #ifdef USE_KEYRING_CCACHE
121 err = k5_cc_mutex_finish_init(&krb5int_krcc_mutex);
129 krb5int_cc_finalize(void)
131 struct krb5_cc_typelist *t, *t_next;
132 k5_cccol_force_unlock();
133 k5_cc_mutex_destroy(&cccol_lock);
134 k5_mutex_destroy(&cc_typelist_lock);
135 #ifndef NO_FILE_CCACHE
136 k5_cc_mutex_destroy(&krb5int_cc_file_mutex);
138 k5_cc_mutex_destroy(&krb5int_mcc_mutex);
139 #ifdef USE_KEYRING_CCACHE
140 k5_cc_mutex_destroy(&krb5int_krcc_mutex);
142 for (t = cc_typehead; t != INITIAL_TYPEHEAD; t = t_next) {
150 * Register a new credentials cache type
151 * If override is set, replace any existing ccache with that type tag
154 krb5_error_code KRB5_CALLCONV
155 krb5_cc_register(krb5_context context, const krb5_cc_ops *ops,
156 krb5_boolean override)
158 struct krb5_cc_typelist *t;
160 k5_mutex_lock(&cc_typelist_lock);
161 for (t = cc_typehead;t && strcmp(t->ops->prefix,ops->prefix);t = t->next)
166 k5_mutex_unlock(&cc_typelist_lock);
169 k5_mutex_unlock(&cc_typelist_lock);
170 return KRB5_CC_TYPE_EXISTS;
173 if (!(t = (struct krb5_cc_typelist *) malloc(sizeof(*t)))) {
174 k5_mutex_unlock(&cc_typelist_lock);
177 t->next = cc_typehead;
180 k5_mutex_unlock(&cc_typelist_lock);
185 * Resolve a credential cache name into a cred. cache object.
187 * The name is currently constrained to be of the form "type:residual";
189 * The "type" portion corresponds to one of the predefined credential
190 * cache types, while the "residual" portion is specific to the
191 * particular cache type.
195 krb5_error_code KRB5_CALLCONV
196 krb5_cc_resolve (krb5_context context, const char *name, krb5_ccache *cache)
202 const krb5_cc_ops *ops;
205 return KRB5_CC_BADNAME;
207 cp = strchr (name, ':');
210 return (*krb5_cc_dfl_ops->resolve)(context, cache, name);
212 return KRB5_CC_BADNAME;
217 if ( pfxlen == 1 && isalpha((unsigned char) name[0]) ) {
218 /* We found a drive letter not a prefix - use FILE */
219 pfx = strdup("FILE");
225 resid = name + pfxlen + 1;
226 pfx = k5memdup0(name, pfxlen, &err);
231 *cache = (krb5_ccache) 0;
233 err = krb5int_cc_getops(context, pfx, &ops);
239 return ops->resolve(context, cache, resid);
242 krb5_error_code KRB5_CALLCONV
243 krb5_cc_dup(krb5_context context, krb5_ccache in, krb5_ccache *out)
245 return in->ops->resolve(context, out, in->ops->get_name(context, in));
251 * Internal function to return the ops vector for a given ccache
254 static krb5_error_code
255 krb5int_cc_getops(krb5_context context,
257 const krb5_cc_ops **ops)
259 struct krb5_cc_typelist *tlist;
261 k5_mutex_lock(&cc_typelist_lock);
262 for (tlist = cc_typehead; tlist; tlist = tlist->next) {
263 if (strcmp (tlist->ops->prefix, pfx) == 0) {
265 k5_mutex_unlock(&cc_typelist_lock);
269 k5_mutex_unlock(&cc_typelist_lock);
270 if (krb5_cc_dfl_ops && !strcmp (pfx, krb5_cc_dfl_ops->prefix)) {
271 *ops = krb5_cc_dfl_ops;
274 return KRB5_CC_UNKNOWN_TYPE;
280 * Generate a new unique ccache, given a ccache type and a hint
281 * string. Ignores the hint string for now.
283 krb5_error_code KRB5_CALLCONV
285 krb5_context context,
290 const krb5_cc_ops *ops;
295 TRACE_CC_NEW_UNIQUE(context, type);
296 err = krb5int_cc_getops(context, type, &ops);
300 return ops->gen_new(context, id);
306 * Note: to avoid copying the typelist at cursor creation time, among
307 * other things, we assume that the only additions ever occur to the
311 krb5int_cc_typecursor_new(krb5_context context, krb5_cc_typecursor *t)
313 krb5_cc_typecursor n = NULL;
316 n = malloc(sizeof(*n));
320 k5_mutex_lock(&cc_typelist_lock);
321 n->tptr = cc_typehead;
322 k5_mutex_unlock(&cc_typelist_lock);
328 krb5int_cc_typecursor_next(krb5_context context,
329 krb5_cc_typecursor t,
330 const krb5_cc_ops **ops)
336 k5_mutex_lock(&cc_typelist_lock);
338 t->tptr = t->tptr->next;
339 k5_mutex_unlock(&cc_typelist_lock);
344 krb5int_cc_typecursor_free(krb5_context context, krb5_cc_typecursor *t)
351 krb5_error_code KRB5_CALLCONV
352 krb5_cc_move(krb5_context context, krb5_ccache src, krb5_ccache dst)
354 krb5_error_code ret = 0;
355 krb5_principal princ = NULL;
357 TRACE_CC_MOVE(context, src, dst);
358 ret = krb5_cccol_lock(context);
363 ret = krb5_cc_lock(context, src);
365 krb5_cccol_unlock(context);
369 ret = krb5_cc_get_principal(context, src, &princ);
371 ret = krb5_cc_initialize(context, dst, princ);
374 krb5_cc_unlock(context, src);
375 krb5_cccol_unlock(context);
379 ret = krb5_cc_lock(context, dst);
381 ret = krb5_cc_copy_creds(context, src, dst);
382 krb5_cc_unlock(context, dst);
385 krb5_cc_unlock(context, src);
387 ret = krb5_cc_destroy(context, src);
389 krb5_cccol_unlock(context);
391 krb5_free_principal(context, princ);
398 krb5_boolean KRB5_CALLCONV
399 krb5_cc_support_switch(krb5_context context, const char *type)
401 const krb5_cc_ops *ops;
404 err = krb5int_cc_getops(context, type, &ops);
405 return (err ? FALSE : (ops->switch_to != NULL));
409 k5_cc_mutex_init(k5_cc_mutex *m)
411 krb5_error_code ret = 0;
413 ret = k5_mutex_init(&m->lock);
422 k5_cc_mutex_finish_init(k5_cc_mutex *m)
424 krb5_error_code ret = 0;
426 ret = k5_mutex_finish_init(&m->lock);
435 k5_cc_mutex_assert_locked(krb5_context context, k5_cc_mutex *m)
438 assert(m->refcount > 0);
439 assert(m->owner == context);
441 k5_assert_locked(&m->lock);
445 k5_cc_mutex_assert_unlocked(krb5_context context, k5_cc_mutex *m)
448 assert(m->refcount == 0);
449 assert(m->owner == NULL);
451 k5_assert_unlocked(&m->lock);
455 k5_cc_mutex_lock(krb5_context context, k5_cc_mutex *m)
457 /* not locked or already locked by another context */
458 if (m->owner != context) {
459 /* acquire lock, blocking until available */
460 k5_mutex_lock(&m->lock);
464 /* already locked by this context, just increase refcount */
471 k5_cc_mutex_unlock(krb5_context context, k5_cc_mutex *m)
473 /* verify owner and sanity check refcount */
474 if ((m->owner != context) || (m->refcount < 1)) {
477 /* decrement & unlock when count reaches zero */
479 if (m->refcount == 0) {
481 k5_mutex_unlock(&m->lock);
485 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
487 k5_cc_mutex_force_unlock(k5_cc_mutex *m)
491 if (m->refcount > 0) {
492 k5_mutex_unlock(&m->lock);
497 * holds on to all pertype global locks as well as typelist lock
500 krb5_error_code KRB5_CALLCONV
501 krb5_cccol_lock(krb5_context context)
503 krb5_error_code ret = 0;
505 k5_cc_mutex_lock(context, &cccol_lock);
506 k5_mutex_lock(&cc_typelist_lock);
507 k5_cc_mutex_lock(context, &krb5int_cc_file_mutex);
508 k5_cc_mutex_lock(context, &krb5int_mcc_mutex);
509 #ifdef USE_KEYRING_CCACHE
510 k5_cc_mutex_lock(context, &krb5int_krcc_mutex);
513 ret = krb5_stdccv3_context_lock(context);
516 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
517 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
518 k5_mutex_unlock(&cc_typelist_lock);
519 k5_cc_mutex_unlock(context, &cccol_lock);
522 k5_mutex_unlock(&cc_typelist_lock);
526 krb5_error_code KRB5_CALLCONV
527 krb5_cccol_unlock(krb5_context context)
529 krb5_error_code ret = 0;
532 k5_cc_mutex_assert_locked(context, &cccol_lock);
534 k5_mutex_lock(&cc_typelist_lock);
536 /* unlock each type in the opposite order */
538 krb5_stdccv3_context_unlock(context);
540 #ifdef USE_KEYRING_CCACHE
541 k5_cc_mutex_assert_locked(context, &krb5int_krcc_mutex);
542 k5_cc_mutex_unlock(context, &krb5int_krcc_mutex);
544 k5_cc_mutex_assert_locked(context, &krb5int_mcc_mutex);
545 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
546 k5_cc_mutex_assert_locked(context, &krb5int_cc_file_mutex);
547 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
548 k5_mutex_assert_locked(&cc_typelist_lock);
550 k5_mutex_unlock(&cc_typelist_lock);
551 k5_cc_mutex_unlock(context, &cccol_lock);
556 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
558 k5_cccol_force_unlock()
561 if ((&cccol_lock)->refcount == 0) {
565 k5_mutex_lock(&cc_typelist_lock);
567 /* unlock each type in the opposite order */
568 #ifdef USE_KEYRING_CCACHE
569 k5_cc_mutex_force_unlock(&krb5int_krcc_mutex);
572 krb5_stdccv3_context_unlock(NULL);
574 k5_cc_mutex_force_unlock(&krb5int_mcc_mutex);
575 k5_cc_mutex_force_unlock(&krb5int_cc_file_mutex);
577 k5_mutex_unlock(&cc_typelist_lock);
578 k5_cc_mutex_force_unlock(&cccol_lock);