1 /* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2 /* lib/krb5/ccache/cc_kcm.c - KCM cache type (client side) */
4 * Copyright (C) 2014 by the Massachusetts Institute of Technology.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
23 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
24 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
25 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
28 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
30 * OF THE POSSIBILITY OF SUCH DAMAGE.
34 * This cache type contacts a daemon for each cache operation, using Heimdal's
35 * KCM protocol. On OS X, the preferred transport is Mach RPC; on other
36 * Unix-like platforms or if the daemon is not available via RPC, Unix domain
37 * sockets are used instead.
45 #include <sys/socket.h>
48 #include <mach/mach.h>
49 #include <servers/bootstrap.h>
53 #define MAX_REPLY_SIZE (10 * 1024 * 1024)
55 const krb5_cc_ops krb5_kcm_ops;
58 unsigned char *uuidbytes; /* all of the uuids concatenated together */
70 /* This structure bundles together a KCM request and reply, to minimize how
71 * much we have to declare and clean up in each method. */
77 #define EMPTY_KCMREQ { EMPTY_K5BUF }
79 struct kcm_cache_data {
80 char *residual; /* immutable; may be accessed without lock */
81 k5_cc_mutex lock; /* protects io and changetime */
83 krb5_timestamp changetime;
87 char *residual; /* primary or singleton subsidiary */
88 struct uuid_list *uuids; /* NULL for singleton subsidiary */
93 /* Map EINVAL or KRB5_CC_FORMAT to KRB5_KCM_MALFORMED_REPLY; pass through all
95 static inline krb5_error_code
96 map_invalid(krb5_error_code code)
98 return (code == EINVAL || code == KRB5_CC_FORMAT) ?
99 KRB5_KCM_MALFORMED_REPLY : code;
102 /* Begin a request for the given opcode. If cache is non-null, supply the
103 * cache name as a request parameter. */
105 kcmreq_init(struct kcmreq *req, kcm_opcode opcode, krb5_ccache cache)
107 unsigned char bytes[4];
110 memset(req, 0, sizeof(*req));
112 bytes[0] = KCM_PROTOCOL_VERSION_MAJOR;
113 bytes[1] = KCM_PROTOCOL_VERSION_MINOR;
114 store_16_be(opcode, bytes + 2);
116 k5_buf_init_dynamic(&req->reqbuf);
117 k5_buf_add_len(&req->reqbuf, bytes, 4);
119 name = ((struct kcm_cache_data *)cache->data)->residual;
120 k5_buf_add_len(&req->reqbuf, name, strlen(name) + 1);
124 /* Add a 32-bit value to the request in big-endian byte order. */
126 kcmreq_put32(struct kcmreq *req, uint32_t val)
128 unsigned char bytes[4];
130 store_32_be(val, bytes);
131 k5_buf_add_len(&req->reqbuf, bytes, 4);
136 /* The maximum length of an in-band request or reply as defined by the RPC
138 #define MAX_INBAND_SIZE 2048
140 /* Connect or reconnect to the KCM daemon via Mach RPC, if possible. */
141 static krb5_error_code
142 kcmio_mach_connect(krb5_context context, struct kcmio *io)
149 ret = profile_get_string(context->profile, KRB5_CONF_LIBDEFAULTS,
150 KRB5_CONF_KCM_MACH_SERVICE, NULL,
151 DEFAULT_KCM_MACH_SERVICE, &service);
154 if (strcmp(service, "-") == 0) {
155 profile_release_string(service);
156 return KRB5_KCM_NO_SERVER;
159 st = bootstrap_look_up(bootstrap_port, service, &mport);
160 profile_release_string(service);
162 return KRB5_KCM_NO_SERVER;
163 if (io->mport != MACH_PORT_NULL)
164 mach_port_deallocate(mach_task_self(), io->mport);
169 /* Invoke the Mach RPC to get a reply from the KCM daemon. */
170 static krb5_error_code
171 kcmio_mach_call(krb5_context context, struct kcmio *io, void *data,
172 size_t len, void **reply_out, size_t *len_out)
175 size_t inband_req_len = 0, outband_req_len = 0, reply_len;
176 char *inband_req = NULL, *outband_req = NULL, *outband_reply, *copy;
177 char inband_reply[MAX_INBAND_SIZE];
178 mach_msg_type_number_t inband_reply_len, outband_reply_len;
186 /* Use the in-band or out-of-band request buffer depending on len. */
187 if (len <= MAX_INBAND_SIZE) {
189 inband_req_len = len;
192 outband_req_len = len;
195 st = k5_kcmrpc_call(io->mport, inband_req, inband_req_len, outband_req,
196 outband_req_len, &code, inband_reply,
197 &inband_reply_len, &outband_reply, &outband_reply_len);
198 if (st == MACH_SEND_INVALID_DEST) {
199 /* Get a new port and try again. */
200 st = kcmio_mach_connect(context, io);
202 return KRB5_KCM_RPC_ERROR;
203 st = k5_kcmrpc_call(io->mport, inband_req, inband_req_len, outband_req,
204 outband_req_len, &code, inband_reply,
205 &inband_reply_len, &outband_reply,
209 return KRB5_KCM_RPC_ERROR;
216 /* The reply could be in the in-band or out-of-band reply buffer. */
217 reply = outband_reply_len ? outband_reply : inband_reply;
218 reply_len = outband_reply_len ? outband_reply_len : inband_reply_len;
219 copy = k5memdup(reply, reply_len, &ret);
224 *len_out = reply_len;
227 if (outband_reply_len) {
228 vm_deallocate(mach_task_self(), (vm_address_t)outband_reply,
234 /* Release any Mach RPC state within io. */
236 kcmio_mach_close(struct kcmio *io)
238 if (io->mport != MACH_PORT_NULL)
239 mach_port_deallocate(mach_task_self(), io->mport);
242 #else /* __APPLE__ */
244 #define kcmio_mach_connect(context, io) EINVAL
245 #define kcmio_mach_call(context, io, data, len, reply_out, len_out) EINVAL
246 #define kcmio_mach_close(io)
250 /* Connect to the KCM daemon via a Unix domain socket. */
251 static krb5_error_code
252 kcmio_unix_socket_connect(krb5_context context, struct kcmio *io)
256 struct sockaddr_un addr;
259 ret = profile_get_string(context->profile, KRB5_CONF_LIBDEFAULTS,
260 KRB5_CONF_KCM_SOCKET, NULL,
261 DEFAULT_KCM_SOCKET_PATH, &path);
264 if (strcmp(path, "-") == 0) {
265 ret = KRB5_KCM_NO_SERVER;
269 fd = socket(AF_UNIX, SOCK_STREAM, 0);
275 memset(&addr, 0, sizeof(addr));
276 addr.sun_family = AF_UNIX;
277 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
278 if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) != 0) {
279 ret = (errno == ENOENT) ? KRB5_KCM_NO_SERVER : errno;
289 profile_release_string(path);
293 /* Write a KCM request: 4-byte big-endian length, then the marshalled
295 static krb5_error_code
296 kcmio_unix_socket_write(krb5_context context, struct kcmio *io, void *request,
301 store_32_be(len, lenbytes);
302 if (krb5_net_write(context, io->fd, lenbytes, 4) < 0)
304 if (krb5_net_write(context, io->fd, request, len) < 0)
309 /* Read a KCM reply: 4-byte big-endian length, 4-byte big-endian status code,
310 * then the marshalled reply. */
311 static krb5_error_code
312 kcmio_unix_socket_read(krb5_context context, struct kcmio *io,
313 void **reply_out, size_t *len_out)
315 krb5_error_code code;
316 char lenbytes[4], codebytes[4], *reply;
323 st = krb5_net_read(context, io->fd, lenbytes, 4);
325 return (st == -1) ? errno : KRB5_CC_IO;
326 len = load_32_be(lenbytes);
327 if (len > MAX_REPLY_SIZE)
328 return KRB5_KCM_REPLY_TOO_BIG;
330 st = krb5_net_read(context, io->fd, codebytes, 4);
332 return (st == -1) ? errno : KRB5_CC_IO;
333 code = load_32_be(codebytes);
340 st = krb5_net_read(context, io->fd, reply, len);
341 if (st == -1 || (size_t)st != len) {
343 return (st < 0) ? errno : KRB5_CC_IO;
351 static krb5_error_code
352 kcmio_connect(krb5_context context, struct kcmio **io_out)
358 io = calloc(1, sizeof(*io));
363 /* Try Mach RPC (OS X only), then fall back to Unix domain sockets */
364 ret = kcmio_mach_connect(context, io);
366 ret = kcmio_unix_socket_connect(context, io);
376 /* Check req->reqbuf for an error condition and return it. Otherwise, send the
377 * request to the KCM daemon and get a response. */
378 static krb5_error_code
379 kcmio_call(krb5_context context, struct kcmio *io, struct kcmreq *req)
382 size_t reply_len = 0;
384 if (k5_buf_status(&req->reqbuf) != 0)
388 ret = kcmio_unix_socket_write(context, io, req->reqbuf.data,
392 ret = kcmio_unix_socket_read(context, io, &req->reply_mem, &reply_len);
396 /* We must be using Mach RPC. */
397 ret = kcmio_mach_call(context, io, req->reqbuf.data, req->reqbuf.len,
398 &req->reply_mem, &reply_len);
403 /* Read the status code from the marshalled reply. */
404 k5_input_init(&req->reply, req->reply_mem, reply_len);
405 ret = k5_input_get_uint32_be(&req->reply);
406 return req->reply.status ? KRB5_KCM_MALFORMED_REPLY : ret;
410 kcmio_close(struct kcmio *io)
413 kcmio_mach_close(io);
420 /* Fetch a zero-terminated name string from req->reply. The returned pointer
421 * is an alias and must not be freed by the caller. */
422 static krb5_error_code
423 kcmreq_get_name(struct kcmreq *req, const char **name_out)
425 const unsigned char *end;
426 struct k5input *in = &req->reply;
429 end = memchr(in->ptr, '\0', in->len);
431 return KRB5_KCM_MALFORMED_REPLY;
432 *name_out = (const char *)in->ptr;
433 (void)k5_input_get_bytes(in, end + 1 - in->ptr);
437 /* Fetch a UUID list from req->reply. UUID lists are not delimited, so we
438 * consume the rest of the input. */
439 static krb5_error_code
440 kcmreq_get_uuid_list(struct kcmreq *req, struct uuid_list **uuids_out)
442 struct uuid_list *uuids;
446 if (req->reply.len % KCM_UUID_LEN != 0)
447 return KRB5_KCM_MALFORMED_REPLY;
449 uuids = malloc(sizeof(*uuids));
452 uuids->count = req->reply.len / KCM_UUID_LEN;
455 if (req->reply.len > 0) {
456 uuids->uuidbytes = malloc(req->reply.len);
457 if (uuids->uuidbytes == NULL) {
461 memcpy(uuids->uuidbytes, req->reply.ptr, req->reply.len);
462 (void)k5_input_get_bytes(&req->reply, req->reply.len);
464 uuids->uuidbytes = NULL;
472 free_uuid_list(struct uuid_list *uuids)
475 free(uuids->uuidbytes);
480 kcmreq_free(struct kcmreq *req)
482 k5_buf_free(&req->reqbuf);
483 free(req->reply_mem);
486 /* Create a krb5_ccache structure. If io is NULL, make a new connection for
487 * the cache. Otherwise, always take ownership of io. */
488 static krb5_error_code
489 make_cache(krb5_context context, const char *residual, struct kcmio *io,
490 krb5_ccache *cache_out)
493 krb5_ccache cache = NULL;
494 struct kcm_cache_data *data = NULL;
495 char *residual_copy = NULL;
500 ret = kcmio_connect(context, &io);
505 cache = malloc(sizeof(*cache));
508 data = calloc(1, sizeof(*data));
511 residual_copy = strdup(residual);
512 if (residual_copy == NULL)
514 if (k5_cc_mutex_init(&data->lock) != 0)
517 data->residual = residual_copy;
519 data->changetime = 0;
520 cache->ops = &krb5_kcm_ops;
522 cache->magic = KV5M_CCACHE;
534 /* Lock cache's I/O structure and use it to call the KCM daemon. If modify is
535 * true, update the last change time. */
536 static krb5_error_code
537 cache_call(krb5_context context, krb5_ccache cache, struct kcmreq *req,
541 struct kcm_cache_data *data = cache->data;
543 k5_cc_mutex_lock(context, &data->lock);
544 ret = kcmio_call(context, data->io, req);
546 data->changetime = time(NULL);
547 k5_cc_mutex_unlock(context, &data->lock);
551 /* Try to propagate the KDC time offset from the cache to the krb5 context. */
553 get_kdc_offset(krb5_context context, krb5_ccache cache)
555 struct kcmreq req = EMPTY_KCMREQ;
558 kcmreq_init(&req, KCM_OP_GET_KDC_OFFSET, cache);
559 if (cache_call(context, cache, &req, FALSE) != 0)
561 time_offset = k5_input_get_uint32_be(&req.reply);
562 if (!req.reply.status)
564 context->os_context.time_offset = time_offset;
565 context->os_context.usec_offset = 0;
566 context->os_context.os_flags &= ~KRB5_OS_TOFFSET_TIME;
567 context->os_context.os_flags |= KRB5_OS_TOFFSET_VALID;
573 /* Try to propagate the KDC offset from the krb5 context to the cache. */
575 set_kdc_offset(krb5_context context, krb5_ccache cache)
579 if (context->os_context.os_flags & KRB5_OS_TOFFSET_VALID) {
580 kcmreq_init(&req, KCM_OP_SET_KDC_OFFSET, cache);
581 kcmreq_put32(&req, context->os_context.time_offset);
582 (void)cache_call(context, cache, &req, TRUE);
587 static const char * KRB5_CALLCONV
588 kcm_get_name(krb5_context context, krb5_ccache cache)
590 return ((struct kcm_cache_data *)cache->data)->residual;
593 static krb5_error_code KRB5_CALLCONV
594 kcm_resolve(krb5_context context, krb5_ccache *cache_out, const char *residual)
597 struct kcmreq req = EMPTY_KCMREQ;
598 struct kcmio *io = NULL;
599 const char *defname = NULL;
603 ret = kcmio_connect(context, &io);
607 if (*residual == '\0') {
608 kcmreq_init(&req, KCM_OP_GET_DEFAULT_CACHE, NULL);
609 ret = kcmio_call(context, io, &req);
612 ret = kcmreq_get_name(&req, &defname);
618 ret = make_cache(context, residual, io, cache_out);
627 static krb5_error_code KRB5_CALLCONV
628 kcm_gen_new(krb5_context context, krb5_ccache *cache_out)
631 struct kcmreq req = EMPTY_KCMREQ;
632 struct kcmio *io = NULL;
637 ret = kcmio_connect(context, &io);
640 kcmreq_init(&req, KCM_OP_GEN_NEW, NULL);
641 ret = kcmio_call(context, io, &req);
644 ret = kcmreq_get_name(&req, &name);
647 ret = make_cache(context, name, io, cache_out);
656 static krb5_error_code KRB5_CALLCONV
657 kcm_initialize(krb5_context context, krb5_ccache cache, krb5_principal princ)
662 kcmreq_init(&req, KCM_OP_INITIALIZE, cache);
663 k5_marshal_princ(&req.reqbuf, 4, princ);
664 ret = cache_call(context, cache, &req, TRUE);
666 set_kdc_offset(context, cache);
670 static krb5_error_code KRB5_CALLCONV
671 kcm_close(krb5_context context, krb5_ccache cache)
673 struct kcm_cache_data *data = cache->data;
675 k5_cc_mutex_destroy(&data->lock);
676 kcmio_close(data->io);
677 free(data->residual);
683 static krb5_error_code KRB5_CALLCONV
684 kcm_destroy(krb5_context context, krb5_ccache cache)
689 kcmreq_init(&req, KCM_OP_DESTROY, cache);
690 ret = cache_call(context, cache, &req, TRUE);
692 (void)kcm_close(context, cache);
696 static krb5_error_code KRB5_CALLCONV
697 kcm_store(krb5_context context, krb5_ccache cache, krb5_creds *cred)
702 kcmreq_init(&req, KCM_OP_STORE, cache);
703 k5_marshal_cred(&req.reqbuf, 4, cred);
704 ret = cache_call(context, cache, &req, TRUE);
709 static krb5_error_code KRB5_CALLCONV
710 kcm_retrieve(krb5_context context, krb5_ccache cache, krb5_flags flags,
711 krb5_creds *mcred, krb5_creds *cred_out)
713 /* There is a KCM opcode for retrieving creds, but Heimdal's client doesn't
714 * use it. It causes the KCM daemon to actually make a TGS request. */
715 return k5_cc_retrieve_cred_default(context, cache, flags, mcred, cred_out);
718 static krb5_error_code KRB5_CALLCONV
719 kcm_get_princ(krb5_context context, krb5_ccache cache,
720 krb5_principal *princ_out)
725 kcmreq_init(&req, KCM_OP_GET_PRINCIPAL, cache);
726 ret = cache_call(context, cache, &req, FALSE);
727 /* Heimdal KCM can respond with code 0 and no principal. */
728 if (!ret && req.reply.len == 0)
729 ret = KRB5_FCC_NOFILE;
731 ret = k5_unmarshal_princ(req.reply.ptr, req.reply.len, 4, princ_out);
733 return map_invalid(ret);
736 static krb5_error_code KRB5_CALLCONV
737 kcm_start_seq_get(krb5_context context, krb5_ccache cache,
738 krb5_cc_cursor *cursor_out)
741 struct kcmreq req = EMPTY_KCMREQ;
742 struct uuid_list *uuids;
746 get_kdc_offset(context, cache);
748 kcmreq_init(&req, KCM_OP_GET_CRED_UUID_LIST, cache);
749 ret = cache_call(context, cache, &req, FALSE);
752 ret = kcmreq_get_uuid_list(&req, &uuids);
755 *cursor_out = (krb5_cc_cursor)uuids;
762 static krb5_error_code KRB5_CALLCONV
763 kcm_next_cred(krb5_context context, krb5_ccache cache, krb5_cc_cursor *cursor,
764 krb5_creds *cred_out)
768 struct uuid_list *uuids = (struct uuid_list *)*cursor;
770 memset(cred_out, 0, sizeof(*cred_out));
772 if (uuids->pos >= uuids->count)
775 kcmreq_init(&req, KCM_OP_GET_CRED_BY_UUID, cache);
776 k5_buf_add_len(&req.reqbuf, uuids->uuidbytes + (uuids->pos * KCM_UUID_LEN),
779 ret = cache_call(context, cache, &req, FALSE);
781 ret = k5_unmarshal_cred(req.reply.ptr, req.reply.len, 4, cred_out);
783 return map_invalid(ret);
786 static krb5_error_code KRB5_CALLCONV
787 kcm_end_seq_get(krb5_context context, krb5_ccache cache,
788 krb5_cc_cursor *cursor)
790 free_uuid_list((struct uuid_list *)*cursor);
795 static krb5_error_code KRB5_CALLCONV
796 kcm_remove_cred(krb5_context context, krb5_ccache cache, krb5_flags flags,
802 kcmreq_init(&req, KCM_OP_REMOVE_CRED, cache);
803 kcmreq_put32(&req, flags);
804 k5_marshal_mcred(&req.reqbuf, mcred);
805 ret = cache_call(context, cache, &req, TRUE);
810 static krb5_error_code KRB5_CALLCONV
811 kcm_set_flags(krb5_context context, krb5_ccache cache, krb5_flags flags)
813 /* We don't currently care about any flags for this type. */
817 static krb5_error_code KRB5_CALLCONV
818 kcm_get_flags(krb5_context context, krb5_ccache cache, krb5_flags *flags_out)
820 /* We don't currently have any operational flags for this type. */
825 /* Construct a per-type cursor, always taking ownership of io and uuids. */
826 static krb5_error_code
827 make_ptcursor(const char *residual, struct uuid_list *uuids, struct kcmio *io,
828 krb5_cc_ptcursor *cursor_out)
830 krb5_cc_ptcursor cursor = NULL;
831 struct kcm_ptcursor *data = NULL;
832 char *residual_copy = NULL;
836 if (residual != NULL) {
837 residual_copy = strdup(residual);
838 if (residual_copy == NULL)
841 cursor = malloc(sizeof(*cursor));
844 data = malloc(sizeof(*data));
848 data->residual = residual_copy;
852 cursor->ops = &krb5_kcm_ops;
854 *cursor_out = cursor;
859 free_uuid_list(uuids);
866 static krb5_error_code KRB5_CALLCONV
867 kcm_ptcursor_new(krb5_context context, krb5_cc_ptcursor *cursor_out)
870 struct kcmreq req = EMPTY_KCMREQ;
871 struct kcmio *io = NULL;
872 struct uuid_list *uuids = NULL;
873 const char *defname, *primary;
877 /* Don't try to use KCM for the cache collection unless the default cache
878 * name has the KCM type. */
879 defname = krb5_cc_default_name(context);
880 if (defname == NULL || strncmp(defname, "KCM:", 4) != 0)
881 return make_ptcursor(NULL, NULL, NULL, cursor_out);
883 ret = kcmio_connect(context, &io);
887 /* If defname is a subsidiary cache, return a singleton cursor. */
888 if (strlen(defname) > 4)
889 return make_ptcursor(defname + 4, NULL, io, cursor_out);
891 kcmreq_init(&req, KCM_OP_GET_CACHE_UUID_LIST, NULL);
892 ret = kcmio_call(context, io, &req);
893 if (ret == KRB5_FCC_NOFILE) {
894 /* There are no accessible caches; return an empty cursor. */
895 ret = make_ptcursor(NULL, NULL, NULL, cursor_out);
900 ret = kcmreq_get_uuid_list(&req, &uuids);
905 kcmreq_init(&req, KCM_OP_GET_DEFAULT_CACHE, NULL);
906 ret = kcmio_call(context, io, &req);
909 ret = kcmreq_get_name(&req, &primary);
913 ret = make_ptcursor(primary, uuids, io, cursor_out);
918 free_uuid_list(uuids);
924 /* Return true if name is an initialized cache. */
926 name_exists(krb5_context context, struct kcmio *io, const char *name)
931 kcmreq_init(&req, KCM_OP_GET_PRINCIPAL, NULL);
932 k5_buf_add_len(&req.reqbuf, name, strlen(name) + 1);
933 ret = kcmio_call(context, io, &req);
938 static krb5_error_code KRB5_CALLCONV
939 kcm_ptcursor_next(krb5_context context, krb5_cc_ptcursor cursor,
940 krb5_ccache *cache_out)
942 krb5_error_code ret = 0;
943 struct kcmreq req = EMPTY_KCMREQ;
944 struct kcm_ptcursor *data = cursor->data;
945 struct uuid_list *uuids;
946 const unsigned char *id;
951 /* Return the primary or specified subsidiary cache if we haven't yet. */
952 if (data->first && data->residual != NULL) {
954 if (name_exists(context, data->io, data->residual))
955 return make_cache(context, data->residual, NULL, cache_out);
962 while (uuids->pos < uuids->count) {
963 /* Get the name of the next cache. */
964 id = &uuids->uuidbytes[KCM_UUID_LEN * uuids->pos++];
966 kcmreq_init(&req, KCM_OP_GET_CACHE_BY_UUID, NULL);
967 k5_buf_add_len(&req.reqbuf, id, KCM_UUID_LEN);
968 ret = kcmio_call(context, data->io, &req);
971 ret = kcmreq_get_name(&req, &name);
975 /* Don't yield the primary cache twice. */
976 if (strcmp(name, data->residual) == 0)
979 ret = make_cache(context, name, NULL, cache_out);
988 static krb5_error_code KRB5_CALLCONV
989 kcm_ptcursor_free(krb5_context context, krb5_cc_ptcursor *cursor)
991 struct kcm_ptcursor *data = (*cursor)->data;
993 free(data->residual);
994 free_uuid_list(data->uuids);
995 kcmio_close(data->io);
1002 static krb5_error_code KRB5_CALLCONV
1003 kcm_lastchange(krb5_context context, krb5_ccache cache,
1004 krb5_timestamp *time_out)
1006 struct kcm_cache_data *data = cache->data;
1009 * KCM has no support for retrieving the last change time. Return the time
1010 * of the last change made through this handle, which isn't very useful,
1011 * but is the best we can do for now.
1013 k5_cc_mutex_lock(context, &data->lock);
1014 *time_out = data->changetime;
1015 k5_cc_mutex_unlock(context, &data->lock);
1019 static krb5_error_code KRB5_CALLCONV
1020 kcm_lock(krb5_context context, krb5_ccache cache)
1022 k5_cc_mutex_lock(context, &((struct kcm_cache_data *)cache->data)->lock);
1026 static krb5_error_code KRB5_CALLCONV
1027 kcm_unlock(krb5_context context, krb5_ccache cache)
1029 k5_cc_mutex_unlock(context, &((struct kcm_cache_data *)cache->data)->lock);
1033 static krb5_error_code KRB5_CALLCONV
1034 kcm_switch_to(krb5_context context, krb5_ccache cache)
1036 krb5_error_code ret;
1039 kcmreq_init(&req, KCM_OP_SET_DEFAULT_CACHE, cache);
1040 ret = cache_call(context, cache, &req, FALSE);
1045 const krb5_cc_ops krb5_kcm_ops = {
1068 NULL, /* wasdefault */
1074 #endif /* not _WIN32 */