1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/krb5/ccache/ccbase.c - Registration functions for ccache */
4 * Copyright 1990,2004,2008 by the Massachusetts Institute of Technology.
7 * Export of this software from the United States of America may
8 * require a specific license from the United States Government.
9 * It is the responsibility of any person or organization contemplating
10 * export to obtain such a license before exporting.
12 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
13 * distribute this software and its documentation for any purpose and
14 * without fee is hereby granted, provided that the above copyright
15 * notice appear in all copies and that both that copyright notice and
16 * this permission notice appear in supporting documentation, and that
17 * the name of M.I.T. not be used in advertising or publicity pertaining
18 * to distribution of the software without specific, written prior
19 * permission. Furthermore if you modify this software you must label
20 * your software as modified software and not distribute it in such a
21 * fashion that it might be confused with the original M.I.T. software.
22 * M.I.T. makes no representations about the suitability of
23 * this software for any purpose. It is provided "as is" without express
24 * or implied warranty.
28 #include "k5-thread.h"
33 struct krb5_cc_typelist {
34 const krb5_cc_ops *ops;
35 struct krb5_cc_typelist *next;
38 struct krb5_cc_typecursor {
39 struct krb5_cc_typelist *tptr;
41 /* typedef krb5_cc_typecursor in k5-int.h */
43 extern const krb5_cc_ops krb5_mcc_ops;
48 extern const krb5_cc_ops krb5_lcc_ops;
49 static struct krb5_cc_typelist cc_lcc_entry = { &krb5_lcc_ops, NEXT };
51 #define NEXT &cc_lcc_entry
55 extern const krb5_cc_ops krb5_cc_stdcc_ops;
56 static struct krb5_cc_typelist cc_stdcc_entry = { &krb5_cc_stdcc_ops, NEXT };
58 #define NEXT &cc_stdcc_entry
61 static struct krb5_cc_typelist cc_mcc_entry = { &krb5_mcc_ops, NEXT };
63 #define NEXT &cc_mcc_entry
65 #ifndef NO_FILE_CCACHE
66 static struct krb5_cc_typelist cc_fcc_entry = { &krb5_cc_file_ops, NEXT };
68 #define NEXT &cc_fcc_entry
71 #ifdef USE_KEYRING_CCACHE
72 extern const krb5_cc_ops krb5_krcc_ops;
73 static struct krb5_cc_typelist cc_krcc_entry = { &krb5_krcc_ops, NEXT };
75 #define NEXT &cc_krcc_entry
76 #endif /* USE_KEYRING_CCACHE */
79 extern const krb5_cc_ops krb5_dcc_ops;
80 static struct krb5_cc_typelist cc_dcc_entry = { &krb5_dcc_ops, NEXT };
82 #define NEXT &cc_dcc_entry
84 extern const krb5_cc_ops krb5_kcm_ops;
85 static struct krb5_cc_typelist cc_kcm_entry = { &krb5_kcm_ops, NEXT };
87 #define NEXT &cc_kcm_entry
88 #endif /* not _WIN32 */
91 #define INITIAL_TYPEHEAD (NEXT)
92 static struct krb5_cc_typelist *cc_typehead = INITIAL_TYPEHEAD;
93 static k5_mutex_t cc_typelist_lock = K5_MUTEX_PARTIAL_INITIALIZER;
95 /* mutex for krb5_cccol_[un]lock */
96 static k5_cc_mutex cccol_lock = K5_CC_MUTEX_PARTIAL_INITIALIZER;
98 static krb5_error_code
99 krb5int_cc_getops(krb5_context, const char *, const krb5_cc_ops **);
102 krb5int_cc_initialize(void)
106 err = k5_cc_mutex_finish_init(&cccol_lock);
109 err = k5_cc_mutex_finish_init(&krb5int_mcc_mutex);
112 err = k5_mutex_finish_init(&cc_typelist_lock);
115 #ifndef NO_FILE_CCACHE
116 err = k5_cc_mutex_finish_init(&krb5int_cc_file_mutex);
120 #ifdef USE_KEYRING_CCACHE
121 err = k5_cc_mutex_finish_init(&krb5int_krcc_mutex);
129 krb5int_cc_finalize(void)
131 struct krb5_cc_typelist *t, *t_next;
132 k5_cccol_force_unlock();
133 k5_cc_mutex_destroy(&cccol_lock);
134 k5_mutex_destroy(&cc_typelist_lock);
135 #ifndef NO_FILE_CCACHE
136 k5_cc_mutex_destroy(&krb5int_cc_file_mutex);
138 k5_cc_mutex_destroy(&krb5int_mcc_mutex);
139 #ifdef USE_KEYRING_CCACHE
140 k5_cc_mutex_destroy(&krb5int_krcc_mutex);
142 for (t = cc_typehead; t != INITIAL_TYPEHEAD; t = t_next) {
150 * Register a new credentials cache type
151 * If override is set, replace any existing ccache with that type tag
154 krb5_error_code KRB5_CALLCONV
155 krb5_cc_register(krb5_context context, const krb5_cc_ops *ops,
156 krb5_boolean override)
158 struct krb5_cc_typelist *t;
160 k5_mutex_lock(&cc_typelist_lock);
161 for (t = cc_typehead;t && strcmp(t->ops->prefix,ops->prefix);t = t->next)
166 k5_mutex_unlock(&cc_typelist_lock);
169 k5_mutex_unlock(&cc_typelist_lock);
170 return KRB5_CC_TYPE_EXISTS;
173 if (!(t = (struct krb5_cc_typelist *) malloc(sizeof(*t)))) {
174 k5_mutex_unlock(&cc_typelist_lock);
177 t->next = cc_typehead;
180 k5_mutex_unlock(&cc_typelist_lock);
185 * Resolve a credential cache name into a cred. cache object.
187 * The name is currently constrained to be of the form "type:residual";
189 * The "type" portion corresponds to one of the predefined credential
190 * cache types, while the "residual" portion is specific to the
191 * particular cache type.
195 krb5_error_code KRB5_CALLCONV
196 krb5_cc_resolve (krb5_context context, const char *name, krb5_ccache *cache)
202 const krb5_cc_ops *ops;
205 return KRB5_CC_BADNAME;
207 cp = strchr (name, ':');
210 return (*krb5_cc_dfl_ops->resolve)(context, cache, name);
212 return KRB5_CC_BADNAME;
217 if ( pfxlen == 1 && isalpha((unsigned char) name[0]) ) {
218 /* We found a drive letter not a prefix - use FILE */
219 pfx = strdup("FILE");
225 resid = name + pfxlen + 1;
226 pfx = k5memdup0(name, pfxlen, &err);
231 *cache = (krb5_ccache) 0;
233 err = krb5int_cc_getops(context, pfx, &ops);
239 return ops->resolve(context, cache, resid);
242 krb5_error_code KRB5_CALLCONV
243 krb5_cc_dup(krb5_context context, krb5_ccache in, krb5_ccache *out)
245 return in->ops->resolve(context, out, in->ops->get_name(context, in));
251 * Internal function to return the ops vector for a given ccache
254 static krb5_error_code
255 krb5int_cc_getops(krb5_context context,
257 const krb5_cc_ops **ops)
259 struct krb5_cc_typelist *tlist;
261 k5_mutex_lock(&cc_typelist_lock);
262 for (tlist = cc_typehead; tlist; tlist = tlist->next) {
263 if (strcmp (tlist->ops->prefix, pfx) == 0) {
265 k5_mutex_unlock(&cc_typelist_lock);
269 k5_mutex_unlock(&cc_typelist_lock);
270 if (krb5_cc_dfl_ops && !strcmp (pfx, krb5_cc_dfl_ops->prefix)) {
271 *ops = krb5_cc_dfl_ops;
274 return KRB5_CC_UNKNOWN_TYPE;
280 * Generate a new unique ccache, given a ccache type and a hint
281 * string. Ignores the hint string for now.
283 krb5_error_code KRB5_CALLCONV
285 krb5_context context,
290 const krb5_cc_ops *ops;
295 TRACE_CC_NEW_UNIQUE(context, type);
296 err = krb5int_cc_getops(context, type, &ops);
300 return ops->gen_new(context, id);
306 * Note: to avoid copying the typelist at cursor creation time, among
307 * other things, we assume that the only additions ever occur to the
311 krb5int_cc_typecursor_new(krb5_context context, krb5_cc_typecursor *t)
313 krb5_cc_typecursor n = NULL;
316 n = malloc(sizeof(*n));
320 k5_mutex_lock(&cc_typelist_lock);
321 n->tptr = cc_typehead;
322 k5_mutex_unlock(&cc_typelist_lock);
328 krb5int_cc_typecursor_next(krb5_context context,
329 krb5_cc_typecursor t,
330 const krb5_cc_ops **ops)
336 k5_mutex_lock(&cc_typelist_lock);
338 t->tptr = t->tptr->next;
339 k5_mutex_unlock(&cc_typelist_lock);
344 krb5int_cc_typecursor_free(krb5_context context, krb5_cc_typecursor *t)
352 k5_nonatomic_replace(krb5_context context, krb5_ccache ccache,
353 krb5_principal princ, krb5_creds **creds)
358 ret = krb5_cc_initialize(context, ccache, princ);
359 for (i = 0; !ret && creds[i] != NULL; creds++)
360 ret = krb5_cc_store_cred(context, ccache, creds[i]);
364 static krb5_error_code
365 read_creds(krb5_context context, krb5_ccache ccache, krb5_creds ***creds_out)
368 krb5_cc_cursor cur = NULL;
369 krb5_creds **list = NULL, *cred = NULL, **newptr;
374 ret = krb5_cc_start_seq_get(context, ccache, &cur);
378 /* Allocate one extra entry so that list remains valid for freeing after
379 * we add the next entry and before we reallocate it. */
380 list = k5calloc(2, sizeof(*list), &ret);
386 cred = k5alloc(sizeof(*cred), &ret);
389 ret = krb5_cc_next_cred(context, ccache, &cur, cred);
390 if (ret == KRB5_CC_END)
398 newptr = realloc(list, (i + 2) * sizeof(*list));
399 if (newptr == NULL) {
413 (void)krb5_cc_end_seq_get(context, ccache, &cur);
414 krb5_free_tgt_creds(context, list);
419 krb5_error_code KRB5_CALLCONV
420 krb5_cc_move(krb5_context context, krb5_ccache src, krb5_ccache dst)
423 krb5_principal princ = NULL;
424 krb5_creds **creds = NULL;
426 TRACE_CC_MOVE(context, src, dst);
428 ret = krb5_cc_get_principal(context, src, &princ);
432 ret = read_creds(context, src, &creds);
436 if (dst->ops->replace == NULL)
437 ret = k5_nonatomic_replace(context, dst, princ, creds);
439 ret = dst->ops->replace(context, dst, princ, creds);
443 ret = krb5_cc_destroy(context, src);
446 krb5_free_principal(context, princ);
447 krb5_free_tgt_creds(context, creds);
451 krb5_boolean KRB5_CALLCONV
452 krb5_cc_support_switch(krb5_context context, const char *type)
454 const krb5_cc_ops *ops;
457 err = krb5int_cc_getops(context, type, &ops);
458 return (err ? FALSE : (ops->switch_to != NULL));
462 k5_cc_mutex_init(k5_cc_mutex *m)
464 krb5_error_code ret = 0;
466 ret = k5_mutex_init(&m->lock);
475 k5_cc_mutex_finish_init(k5_cc_mutex *m)
477 krb5_error_code ret = 0;
479 ret = k5_mutex_finish_init(&m->lock);
488 k5_cc_mutex_assert_locked(krb5_context context, k5_cc_mutex *m)
491 assert(m->refcount > 0);
492 assert(m->owner == context);
494 k5_assert_locked(&m->lock);
498 k5_cc_mutex_assert_unlocked(krb5_context context, k5_cc_mutex *m)
501 assert(m->refcount == 0);
502 assert(m->owner == NULL);
504 k5_assert_unlocked(&m->lock);
508 k5_cc_mutex_lock(krb5_context context, k5_cc_mutex *m)
510 /* not locked or already locked by another context */
511 if (m->owner != context) {
512 /* acquire lock, blocking until available */
513 k5_mutex_lock(&m->lock);
517 /* already locked by this context, just increase refcount */
524 k5_cc_mutex_unlock(krb5_context context, k5_cc_mutex *m)
526 /* verify owner and sanity check refcount */
527 if ((m->owner != context) || (m->refcount < 1)) {
530 /* decrement & unlock when count reaches zero */
532 if (m->refcount == 0) {
534 k5_mutex_unlock(&m->lock);
538 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
540 k5_cc_mutex_force_unlock(k5_cc_mutex *m)
544 if (m->refcount > 0) {
545 k5_mutex_unlock(&m->lock);
550 * holds on to all pertype global locks as well as typelist lock
554 k5_cccol_lock(krb5_context context)
556 krb5_error_code ret = 0;
558 k5_cc_mutex_lock(context, &cccol_lock);
559 k5_mutex_lock(&cc_typelist_lock);
560 k5_cc_mutex_lock(context, &krb5int_cc_file_mutex);
561 k5_cc_mutex_lock(context, &krb5int_mcc_mutex);
562 #ifdef USE_KEYRING_CCACHE
563 k5_cc_mutex_lock(context, &krb5int_krcc_mutex);
566 ret = krb5_stdccv3_context_lock(context);
568 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
569 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
570 k5_mutex_unlock(&cc_typelist_lock);
571 k5_cc_mutex_unlock(context, &cccol_lock);
575 k5_mutex_unlock(&cc_typelist_lock);
580 k5_cccol_unlock(krb5_context context)
582 krb5_error_code ret = 0;
585 k5_cc_mutex_assert_locked(context, &cccol_lock);
587 k5_mutex_lock(&cc_typelist_lock);
589 /* unlock each type in the opposite order */
591 krb5_stdccv3_context_unlock(context);
593 #ifdef USE_KEYRING_CCACHE
594 k5_cc_mutex_assert_locked(context, &krb5int_krcc_mutex);
595 k5_cc_mutex_unlock(context, &krb5int_krcc_mutex);
597 k5_cc_mutex_assert_locked(context, &krb5int_mcc_mutex);
598 k5_cc_mutex_unlock(context, &krb5int_mcc_mutex);
599 k5_cc_mutex_assert_locked(context, &krb5int_cc_file_mutex);
600 k5_cc_mutex_unlock(context, &krb5int_cc_file_mutex);
601 k5_mutex_assert_locked(&cc_typelist_lock);
603 k5_mutex_unlock(&cc_typelist_lock);
604 k5_cc_mutex_unlock(context, &cccol_lock);
609 /* necessary to make reentrant locks play nice with krb5int_cc_finalize */
611 k5_cccol_force_unlock()
614 if ((&cccol_lock)->refcount == 0) {
618 k5_mutex_lock(&cc_typelist_lock);
620 /* unlock each type in the opposite order */
621 #ifdef USE_KEYRING_CCACHE
622 k5_cc_mutex_force_unlock(&krb5int_krcc_mutex);
625 krb5_stdccv3_context_unlock(NULL);
627 k5_cc_mutex_force_unlock(&krb5int_mcc_mutex);
628 k5_cc_mutex_force_unlock(&krb5int_cc_file_mutex);
630 k5_mutex_unlock(&cc_typelist_lock);
631 k5_cc_mutex_force_unlock(&cccol_lock);