1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/crc32c.h>
5 #include <linux/ctype.h>
6 #include <linux/highmem.h>
7 #include <linux/inet.h>
8 #include <linux/kthread.h>
10 #include <linux/nsproxy.h>
11 #include <linux/sched/mm.h>
12 #include <linux/slab.h>
13 #include <linux/socket.h>
14 #include <linux/string.h>
16 #include <linux/bio.h>
17 #endif /* CONFIG_BLOCK */
18 #include <linux/dns_resolver.h>
20 #include <trace/events/sock.h>
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/libceph.h>
24 #include <linux/ceph/messenger.h>
25 #include <linux/ceph/decode.h>
26 #include <linux/ceph/pagelist.h>
27 #include <linux/export.h>
30 * Ceph uses the messenger to exchange ceph_msg messages with other
31 * hosts in the system. The messenger provides ordered and reliable
32 * delivery. We tolerate TCP disconnects by reconnecting (with
33 * exponential backoff) in the case of a fault (disconnection, bad
34 * crc, protocol error). Acks allow sent messages to be discarded by
39 * We track the state of the socket on a given connection using
40 * values defined below. The transition to a new socket state is
41 * handled by a function which verifies we aren't coming from an
45 * | NEW* | transient initial state
47 * | con_sock_state_init()
50 * | CLOSED | initialized, but no socket (and no
51 * ---------- TCP connection)
53 * | \ con_sock_state_connecting()
54 * | ----------------------
56 * + con_sock_state_closed() \
57 * |+--------------------------- \
60 * | | CLOSING | socket event; \ \
61 * | ----------- await close \ \
64 * | + con_sock_state_closing() \ |
66 * | / --------------- | |
69 * | / -----------------| CONNECTING | socket created, TCP
70 * | | / -------------- connect initiated
71 * | | | con_sock_state_connected()
74 * | CONNECTED | TCP connection established
77 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
80 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
81 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
82 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
83 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
84 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
86 static bool con_flag_valid(unsigned long con_flag)
89 case CEPH_CON_F_LOSSYTX:
90 case CEPH_CON_F_KEEPALIVE_PENDING:
91 case CEPH_CON_F_WRITE_PENDING:
92 case CEPH_CON_F_SOCK_CLOSED:
93 case CEPH_CON_F_BACKOFF:
100 void ceph_con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
102 BUG_ON(!con_flag_valid(con_flag));
104 clear_bit(con_flag, &con->flags);
107 void ceph_con_flag_set(struct ceph_connection *con, unsigned long con_flag)
109 BUG_ON(!con_flag_valid(con_flag));
111 set_bit(con_flag, &con->flags);
114 bool ceph_con_flag_test(struct ceph_connection *con, unsigned long con_flag)
116 BUG_ON(!con_flag_valid(con_flag));
118 return test_bit(con_flag, &con->flags);
121 bool ceph_con_flag_test_and_clear(struct ceph_connection *con,
122 unsigned long con_flag)
124 BUG_ON(!con_flag_valid(con_flag));
126 return test_and_clear_bit(con_flag, &con->flags);
129 bool ceph_con_flag_test_and_set(struct ceph_connection *con,
130 unsigned long con_flag)
132 BUG_ON(!con_flag_valid(con_flag));
134 return test_and_set_bit(con_flag, &con->flags);
137 /* Slab caches for frequently-allocated structures */
139 static struct kmem_cache *ceph_msg_cache;
141 #ifdef CONFIG_LOCKDEP
142 static struct lock_class_key socket_class;
145 static void queue_con(struct ceph_connection *con);
146 static void cancel_con(struct ceph_connection *con);
147 static void ceph_con_workfn(struct work_struct *);
148 static void con_fault(struct ceph_connection *con);
151 * Nicely render a sockaddr as a string. An array of formatted
152 * strings is used, to approximate reentrancy.
154 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
155 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
156 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
157 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
159 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
160 static atomic_t addr_str_seq = ATOMIC_INIT(0);
162 struct page *ceph_zero_page; /* used in certain error cases */
164 const char *ceph_pr_addr(const struct ceph_entity_addr *addr)
168 struct sockaddr_storage ss = addr->in_addr; /* align */
169 struct sockaddr_in *in4 = (struct sockaddr_in *)&ss;
170 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *)&ss;
172 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
175 switch (ss.ss_family) {
177 snprintf(s, MAX_ADDR_STR_LEN, "(%d)%pI4:%hu",
178 le32_to_cpu(addr->type), &in4->sin_addr,
179 ntohs(in4->sin_port));
183 snprintf(s, MAX_ADDR_STR_LEN, "(%d)[%pI6c]:%hu",
184 le32_to_cpu(addr->type), &in6->sin6_addr,
185 ntohs(in6->sin6_port));
189 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
195 EXPORT_SYMBOL(ceph_pr_addr);
197 void ceph_encode_my_addr(struct ceph_messenger *msgr)
199 if (!ceph_msgr2(from_msgr(msgr))) {
200 memcpy(&msgr->my_enc_addr, &msgr->inst.addr,
201 sizeof(msgr->my_enc_addr));
202 ceph_encode_banner_addr(&msgr->my_enc_addr);
207 * work queue for all reading and writing to/from the socket.
209 static struct workqueue_struct *ceph_msgr_wq;
211 static int ceph_msgr_slab_init(void)
213 BUG_ON(ceph_msg_cache);
214 ceph_msg_cache = KMEM_CACHE(ceph_msg, 0);
221 static void ceph_msgr_slab_exit(void)
223 BUG_ON(!ceph_msg_cache);
224 kmem_cache_destroy(ceph_msg_cache);
225 ceph_msg_cache = NULL;
228 static void _ceph_msgr_exit(void)
231 destroy_workqueue(ceph_msgr_wq);
235 BUG_ON(!ceph_zero_page);
236 put_page(ceph_zero_page);
237 ceph_zero_page = NULL;
239 ceph_msgr_slab_exit();
242 int __init ceph_msgr_init(void)
244 if (ceph_msgr_slab_init())
247 BUG_ON(ceph_zero_page);
248 ceph_zero_page = ZERO_PAGE(0);
249 get_page(ceph_zero_page);
252 * The number of active work items is limited by the number of
253 * connections, so leave @max_active at default.
255 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
259 pr_err("msgr_init failed to create workqueue\n");
265 void ceph_msgr_exit(void)
267 BUG_ON(ceph_msgr_wq == NULL);
272 void ceph_msgr_flush(void)
274 flush_workqueue(ceph_msgr_wq);
276 EXPORT_SYMBOL(ceph_msgr_flush);
278 /* Connection socket state transition functions */
280 static void con_sock_state_init(struct ceph_connection *con)
284 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
285 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
286 printk("%s: unexpected old state %d\n", __func__, old_state);
287 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
288 CON_SOCK_STATE_CLOSED);
291 static void con_sock_state_connecting(struct ceph_connection *con)
295 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
296 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
297 printk("%s: unexpected old state %d\n", __func__, old_state);
298 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
299 CON_SOCK_STATE_CONNECTING);
302 static void con_sock_state_connected(struct ceph_connection *con)
306 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
307 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
308 printk("%s: unexpected old state %d\n", __func__, old_state);
309 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
310 CON_SOCK_STATE_CONNECTED);
313 static void con_sock_state_closing(struct ceph_connection *con)
317 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
318 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
319 old_state != CON_SOCK_STATE_CONNECTED &&
320 old_state != CON_SOCK_STATE_CLOSING))
321 printk("%s: unexpected old state %d\n", __func__, old_state);
322 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
323 CON_SOCK_STATE_CLOSING);
326 static void con_sock_state_closed(struct ceph_connection *con)
330 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
331 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
332 old_state != CON_SOCK_STATE_CLOSING &&
333 old_state != CON_SOCK_STATE_CONNECTING &&
334 old_state != CON_SOCK_STATE_CLOSED))
335 printk("%s: unexpected old state %d\n", __func__, old_state);
336 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
337 CON_SOCK_STATE_CLOSED);
341 * socket callback functions
344 /* data available on socket, or listen socket received a connect */
345 static void ceph_sock_data_ready(struct sock *sk)
347 struct ceph_connection *con = sk->sk_user_data;
349 trace_sk_data_ready(sk);
351 if (atomic_read(&con->msgr->stopping)) {
355 if (sk->sk_state != TCP_CLOSE_WAIT) {
356 dout("%s %p state = %d, queueing work\n", __func__,
362 /* socket has buffer space for writing */
363 static void ceph_sock_write_space(struct sock *sk)
365 struct ceph_connection *con = sk->sk_user_data;
367 /* only queue to workqueue if there is data we want to write,
368 * and there is sufficient space in the socket buffer to accept
369 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
370 * doesn't get called again until try_write() fills the socket
371 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
372 * and net/core/stream.c:sk_stream_write_space().
374 if (ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING)) {
375 if (sk_stream_is_writeable(sk)) {
376 dout("%s %p queueing write work\n", __func__, con);
377 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
381 dout("%s %p nothing to write\n", __func__, con);
385 /* socket's state has changed */
386 static void ceph_sock_state_change(struct sock *sk)
388 struct ceph_connection *con = sk->sk_user_data;
390 dout("%s %p state = %d sk_state = %u\n", __func__,
391 con, con->state, sk->sk_state);
393 switch (sk->sk_state) {
395 dout("%s TCP_CLOSE\n", __func__);
398 dout("%s TCP_CLOSE_WAIT\n", __func__);
399 con_sock_state_closing(con);
400 ceph_con_flag_set(con, CEPH_CON_F_SOCK_CLOSED);
403 case TCP_ESTABLISHED:
404 dout("%s TCP_ESTABLISHED\n", __func__);
405 con_sock_state_connected(con);
408 default: /* Everything else is uninteresting */
414 * set up socket callbacks
416 static void set_sock_callbacks(struct socket *sock,
417 struct ceph_connection *con)
419 struct sock *sk = sock->sk;
420 sk->sk_user_data = con;
421 sk->sk_data_ready = ceph_sock_data_ready;
422 sk->sk_write_space = ceph_sock_write_space;
423 sk->sk_state_change = ceph_sock_state_change;
432 * initiate connection to a remote socket.
434 int ceph_tcp_connect(struct ceph_connection *con)
436 struct sockaddr_storage ss = con->peer_addr.in_addr; /* align */
438 unsigned int noio_flag;
441 dout("%s con %p peer_addr %s\n", __func__, con,
442 ceph_pr_addr(&con->peer_addr));
445 /* sock_create_kern() allocates with GFP_KERNEL */
446 noio_flag = memalloc_noio_save();
447 ret = sock_create_kern(read_pnet(&con->msgr->net), ss.ss_family,
448 SOCK_STREAM, IPPROTO_TCP, &sock);
449 memalloc_noio_restore(noio_flag);
452 sock->sk->sk_allocation = GFP_NOFS;
453 sock->sk->sk_use_task_frag = false;
455 #ifdef CONFIG_LOCKDEP
456 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
459 set_sock_callbacks(sock, con);
461 con_sock_state_connecting(con);
462 ret = kernel_connect(sock, (struct sockaddr *)&ss, sizeof(ss),
464 if (ret == -EINPROGRESS) {
465 dout("connect %s EINPROGRESS sk_state = %u\n",
466 ceph_pr_addr(&con->peer_addr),
468 } else if (ret < 0) {
469 pr_err("connect %s error %d\n",
470 ceph_pr_addr(&con->peer_addr), ret);
475 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY))
476 tcp_sock_set_nodelay(sock->sk);
483 * Shutdown/close the socket for the given connection.
485 int ceph_con_close_socket(struct ceph_connection *con)
489 dout("%s con %p sock %p\n", __func__, con, con->sock);
491 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
492 sock_release(con->sock);
497 * Forcibly clear the SOCK_CLOSED flag. It gets set
498 * independent of the connection mutex, and we could have
499 * received a socket close event before we had the chance to
500 * shut the socket down.
502 ceph_con_flag_clear(con, CEPH_CON_F_SOCK_CLOSED);
504 con_sock_state_closed(con);
508 static void ceph_con_reset_protocol(struct ceph_connection *con)
510 dout("%s con %p\n", __func__, con);
512 ceph_con_close_socket(con);
514 WARN_ON(con->in_msg->con != con);
515 ceph_msg_put(con->in_msg);
519 WARN_ON(con->out_msg->con != con);
520 ceph_msg_put(con->out_msg);
523 if (con->bounce_page) {
524 __free_page(con->bounce_page);
525 con->bounce_page = NULL;
528 if (ceph_msgr2(from_msgr(con->msgr)))
529 ceph_con_v2_reset_protocol(con);
531 ceph_con_v1_reset_protocol(con);
535 * Reset a connection. Discard all incoming and outgoing messages
536 * and clear *_seq state.
538 static void ceph_msg_remove(struct ceph_msg *msg)
540 list_del_init(&msg->list_head);
545 static void ceph_msg_remove_list(struct list_head *head)
547 while (!list_empty(head)) {
548 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
550 ceph_msg_remove(msg);
554 void ceph_con_reset_session(struct ceph_connection *con)
556 dout("%s con %p\n", __func__, con);
558 WARN_ON(con->in_msg);
559 WARN_ON(con->out_msg);
560 ceph_msg_remove_list(&con->out_queue);
561 ceph_msg_remove_list(&con->out_sent);
564 con->in_seq_acked = 0;
566 if (ceph_msgr2(from_msgr(con->msgr)))
567 ceph_con_v2_reset_session(con);
569 ceph_con_v1_reset_session(con);
573 * mark a peer down. drop any open connections.
575 void ceph_con_close(struct ceph_connection *con)
577 mutex_lock(&con->mutex);
578 dout("con_close %p peer %s\n", con, ceph_pr_addr(&con->peer_addr));
579 con->state = CEPH_CON_S_CLOSED;
581 ceph_con_flag_clear(con, CEPH_CON_F_LOSSYTX); /* so we retry next
583 ceph_con_flag_clear(con, CEPH_CON_F_KEEPALIVE_PENDING);
584 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
585 ceph_con_flag_clear(con, CEPH_CON_F_BACKOFF);
587 ceph_con_reset_protocol(con);
588 ceph_con_reset_session(con);
590 mutex_unlock(&con->mutex);
592 EXPORT_SYMBOL(ceph_con_close);
595 * Reopen a closed connection, with a new peer address.
597 void ceph_con_open(struct ceph_connection *con,
598 __u8 entity_type, __u64 entity_num,
599 struct ceph_entity_addr *addr)
601 mutex_lock(&con->mutex);
602 dout("con_open %p %s\n", con, ceph_pr_addr(addr));
604 WARN_ON(con->state != CEPH_CON_S_CLOSED);
605 con->state = CEPH_CON_S_PREOPEN;
607 con->peer_name.type = (__u8) entity_type;
608 con->peer_name.num = cpu_to_le64(entity_num);
610 memcpy(&con->peer_addr, addr, sizeof(*addr));
611 con->delay = 0; /* reset backoff memory */
612 mutex_unlock(&con->mutex);
615 EXPORT_SYMBOL(ceph_con_open);
618 * return true if this connection ever successfully opened
620 bool ceph_con_opened(struct ceph_connection *con)
622 if (ceph_msgr2(from_msgr(con->msgr)))
623 return ceph_con_v2_opened(con);
625 return ceph_con_v1_opened(con);
629 * initialize a new connection.
631 void ceph_con_init(struct ceph_connection *con, void *private,
632 const struct ceph_connection_operations *ops,
633 struct ceph_messenger *msgr)
635 dout("con_init %p\n", con);
636 memset(con, 0, sizeof(*con));
637 con->private = private;
641 con_sock_state_init(con);
643 mutex_init(&con->mutex);
644 INIT_LIST_HEAD(&con->out_queue);
645 INIT_LIST_HEAD(&con->out_sent);
646 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
648 con->state = CEPH_CON_S_CLOSED;
650 EXPORT_SYMBOL(ceph_con_init);
653 * We maintain a global counter to order connection attempts. Get
654 * a unique seq greater than @gt.
656 u32 ceph_get_global_seq(struct ceph_messenger *msgr, u32 gt)
660 spin_lock(&msgr->global_seq_lock);
661 if (msgr->global_seq < gt)
662 msgr->global_seq = gt;
663 ret = ++msgr->global_seq;
664 spin_unlock(&msgr->global_seq_lock);
669 * Discard messages that have been acked by the server.
671 void ceph_con_discard_sent(struct ceph_connection *con, u64 ack_seq)
673 struct ceph_msg *msg;
676 dout("%s con %p ack_seq %llu\n", __func__, con, ack_seq);
677 while (!list_empty(&con->out_sent)) {
678 msg = list_first_entry(&con->out_sent, struct ceph_msg,
680 WARN_ON(msg->needs_out_seq);
681 seq = le64_to_cpu(msg->hdr.seq);
685 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
687 ceph_msg_remove(msg);
692 * Discard messages that have been requeued in con_fault(), up to
693 * reconnect_seq. This avoids gratuitously resending messages that
694 * the server had received and handled prior to reconnect.
696 void ceph_con_discard_requeued(struct ceph_connection *con, u64 reconnect_seq)
698 struct ceph_msg *msg;
701 dout("%s con %p reconnect_seq %llu\n", __func__, con, reconnect_seq);
702 while (!list_empty(&con->out_queue)) {
703 msg = list_first_entry(&con->out_queue, struct ceph_msg,
705 if (msg->needs_out_seq)
707 seq = le64_to_cpu(msg->hdr.seq);
708 if (seq > reconnect_seq)
711 dout("%s con %p discarding msg %p seq %llu\n", __func__, con,
713 ceph_msg_remove(msg);
720 * For a bio data item, a piece is whatever remains of the next
721 * entry in the current bio iovec, or the first entry in the next
724 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
727 struct ceph_msg_data *data = cursor->data;
728 struct ceph_bio_iter *it = &cursor->bio_iter;
730 cursor->resid = min_t(size_t, length, data->bio_length);
732 if (cursor->resid < it->iter.bi_size)
733 it->iter.bi_size = cursor->resid;
735 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
738 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
742 struct bio_vec bv = bio_iter_iovec(cursor->bio_iter.bio,
743 cursor->bio_iter.iter);
745 *page_offset = bv.bv_offset;
750 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
753 struct ceph_bio_iter *it = &cursor->bio_iter;
754 struct page *page = bio_iter_page(it->bio, it->iter);
756 BUG_ON(bytes > cursor->resid);
757 BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
758 cursor->resid -= bytes;
759 bio_advance_iter(it->bio, &it->iter, bytes);
762 return false; /* no more data */
764 if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
765 page == bio_iter_page(it->bio, it->iter)))
766 return false; /* more bytes to process in this segment */
768 if (!it->iter.bi_size) {
769 it->bio = it->bio->bi_next;
770 it->iter = it->bio->bi_iter;
771 if (cursor->resid < it->iter.bi_size)
772 it->iter.bi_size = cursor->resid;
775 BUG_ON(cursor->resid < bio_iter_len(it->bio, it->iter));
778 #endif /* CONFIG_BLOCK */
780 static void ceph_msg_data_bvecs_cursor_init(struct ceph_msg_data_cursor *cursor,
783 struct ceph_msg_data *data = cursor->data;
784 struct bio_vec *bvecs = data->bvec_pos.bvecs;
786 cursor->resid = min_t(size_t, length, data->bvec_pos.iter.bi_size);
787 cursor->bvec_iter = data->bvec_pos.iter;
788 cursor->bvec_iter.bi_size = cursor->resid;
790 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
793 static struct page *ceph_msg_data_bvecs_next(struct ceph_msg_data_cursor *cursor,
797 struct bio_vec bv = bvec_iter_bvec(cursor->data->bvec_pos.bvecs,
800 *page_offset = bv.bv_offset;
805 static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
808 struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
809 struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
811 BUG_ON(bytes > cursor->resid);
812 BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
813 cursor->resid -= bytes;
814 bvec_iter_advance(bvecs, &cursor->bvec_iter, bytes);
817 return false; /* no more data */
819 if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
820 page == bvec_iter_page(bvecs, cursor->bvec_iter)))
821 return false; /* more bytes to process in this segment */
823 BUG_ON(cursor->resid < bvec_iter_len(bvecs, cursor->bvec_iter));
828 * For a page array, a piece comes from the first page in the array
829 * that has not already been fully consumed.
831 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
834 struct ceph_msg_data *data = cursor->data;
837 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
839 BUG_ON(!data->pages);
840 BUG_ON(!data->length);
842 cursor->resid = min(length, data->length);
843 page_count = calc_pages_for(data->alignment, (u64)data->length);
844 cursor->page_offset = data->alignment & ~PAGE_MASK;
845 cursor->page_index = 0;
846 BUG_ON(page_count > (int)USHRT_MAX);
847 cursor->page_count = (unsigned short)page_count;
848 BUG_ON(length > SIZE_MAX - cursor->page_offset);
852 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
853 size_t *page_offset, size_t *length)
855 struct ceph_msg_data *data = cursor->data;
857 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
859 BUG_ON(cursor->page_index >= cursor->page_count);
860 BUG_ON(cursor->page_offset >= PAGE_SIZE);
862 *page_offset = cursor->page_offset;
863 *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
864 return data->pages[cursor->page_index];
867 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
870 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
872 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
874 /* Advance the cursor page offset */
876 cursor->resid -= bytes;
877 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
878 if (!bytes || cursor->page_offset)
879 return false; /* more bytes to process in the current page */
882 return false; /* no more data */
884 /* Move on to the next page; offset is already at 0 */
886 BUG_ON(cursor->page_index >= cursor->page_count);
887 cursor->page_index++;
892 * For a pagelist, a piece is whatever remains to be consumed in the
893 * first page in the list, or the front of the next page.
896 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
899 struct ceph_msg_data *data = cursor->data;
900 struct ceph_pagelist *pagelist;
903 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
905 pagelist = data->pagelist;
909 return; /* pagelist can be assigned but empty */
911 BUG_ON(list_empty(&pagelist->head));
912 page = list_first_entry(&pagelist->head, struct page, lru);
914 cursor->resid = min(length, pagelist->length);
920 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
921 size_t *page_offset, size_t *length)
923 struct ceph_msg_data *data = cursor->data;
924 struct ceph_pagelist *pagelist;
926 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
928 pagelist = data->pagelist;
931 BUG_ON(!cursor->page);
932 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
934 /* offset of first page in pagelist is always 0 */
935 *page_offset = cursor->offset & ~PAGE_MASK;
936 *length = min_t(size_t, cursor->resid, PAGE_SIZE - *page_offset);
940 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
943 struct ceph_msg_data *data = cursor->data;
944 struct ceph_pagelist *pagelist;
946 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
948 pagelist = data->pagelist;
951 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
952 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
954 /* Advance the cursor offset */
956 cursor->resid -= bytes;
957 cursor->offset += bytes;
958 /* offset of first page in pagelist is always 0 */
959 if (!bytes || cursor->offset & ~PAGE_MASK)
960 return false; /* more bytes to process in the current page */
963 return false; /* no more data */
965 /* Move on to the next page */
967 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
968 cursor->page = list_next_entry(cursor->page, lru);
972 static void ceph_msg_data_iter_cursor_init(struct ceph_msg_data_cursor *cursor,
975 struct ceph_msg_data *data = cursor->data;
977 cursor->iov_iter = data->iter;
979 iov_iter_truncate(&cursor->iov_iter, length);
980 cursor->resid = iov_iter_count(&cursor->iov_iter);
983 static struct page *ceph_msg_data_iter_next(struct ceph_msg_data_cursor *cursor,
984 size_t *page_offset, size_t *length)
990 iov_iter_revert(&cursor->iov_iter, cursor->lastlen);
992 len = iov_iter_get_pages2(&cursor->iov_iter, &page, PAGE_SIZE,
996 cursor->lastlen = len;
999 * FIXME: The assumption is that the pages represented by the iov_iter
1000 * are pinned, with the references held by the upper-level
1001 * callers, or by virtue of being under writeback. Eventually,
1002 * we'll get an iov_iter_get_pages2 variant that doesn't take
1003 * page refs. Until then, just put the page ref.
1005 VM_BUG_ON_PAGE(!PageWriteback(page) && page_count(page) < 2, page);
1008 *length = min_t(size_t, len, cursor->resid);
1012 static bool ceph_msg_data_iter_advance(struct ceph_msg_data_cursor *cursor,
1015 BUG_ON(bytes > cursor->resid);
1016 cursor->resid -= bytes;
1018 if (bytes < cursor->lastlen) {
1019 cursor->lastlen -= bytes;
1021 iov_iter_advance(&cursor->iov_iter, bytes - cursor->lastlen);
1022 cursor->lastlen = 0;
1025 return cursor->resid;
1029 * Message data is handled (sent or received) in pieces, where each
1030 * piece resides on a single page. The network layer might not
1031 * consume an entire piece at once. A data item's cursor keeps
1032 * track of which piece is next to process and how much remains to
1033 * be processed in that piece. It also tracks whether the current
1034 * piece is the last one in the data item.
1036 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1038 size_t length = cursor->total_resid;
1040 switch (cursor->data->type) {
1041 case CEPH_MSG_DATA_PAGELIST:
1042 ceph_msg_data_pagelist_cursor_init(cursor, length);
1044 case CEPH_MSG_DATA_PAGES:
1045 ceph_msg_data_pages_cursor_init(cursor, length);
1048 case CEPH_MSG_DATA_BIO:
1049 ceph_msg_data_bio_cursor_init(cursor, length);
1051 #endif /* CONFIG_BLOCK */
1052 case CEPH_MSG_DATA_BVECS:
1053 ceph_msg_data_bvecs_cursor_init(cursor, length);
1055 case CEPH_MSG_DATA_ITER:
1056 ceph_msg_data_iter_cursor_init(cursor, length);
1058 case CEPH_MSG_DATA_NONE:
1063 cursor->need_crc = true;
1066 void ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor,
1067 struct ceph_msg *msg, size_t length)
1070 BUG_ON(length > msg->data_length);
1071 BUG_ON(!msg->num_data_items);
1073 cursor->total_resid = length;
1074 cursor->data = msg->data;
1075 cursor->sr_resid = 0;
1077 __ceph_msg_data_cursor_init(cursor);
1081 * Return the page containing the next piece to process for a given
1082 * data item, and supply the page offset and length of that piece.
1083 * Indicate whether this is the last piece in this data item.
1085 struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1086 size_t *page_offset, size_t *length)
1090 switch (cursor->data->type) {
1091 case CEPH_MSG_DATA_PAGELIST:
1092 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1094 case CEPH_MSG_DATA_PAGES:
1095 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1098 case CEPH_MSG_DATA_BIO:
1099 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1101 #endif /* CONFIG_BLOCK */
1102 case CEPH_MSG_DATA_BVECS:
1103 page = ceph_msg_data_bvecs_next(cursor, page_offset, length);
1105 case CEPH_MSG_DATA_ITER:
1106 page = ceph_msg_data_iter_next(cursor, page_offset, length);
1108 case CEPH_MSG_DATA_NONE:
1115 BUG_ON(*page_offset + *length > PAGE_SIZE);
1117 BUG_ON(*length > cursor->resid);
1123 * Returns true if the result moves the cursor on to the next piece
1126 void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, size_t bytes)
1130 BUG_ON(bytes > cursor->resid);
1131 switch (cursor->data->type) {
1132 case CEPH_MSG_DATA_PAGELIST:
1133 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1135 case CEPH_MSG_DATA_PAGES:
1136 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1139 case CEPH_MSG_DATA_BIO:
1140 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1142 #endif /* CONFIG_BLOCK */
1143 case CEPH_MSG_DATA_BVECS:
1144 new_piece = ceph_msg_data_bvecs_advance(cursor, bytes);
1146 case CEPH_MSG_DATA_ITER:
1147 new_piece = ceph_msg_data_iter_advance(cursor, bytes);
1149 case CEPH_MSG_DATA_NONE:
1154 cursor->total_resid -= bytes;
1156 if (!cursor->resid && cursor->total_resid) {
1158 __ceph_msg_data_cursor_init(cursor);
1161 cursor->need_crc = new_piece;
1164 u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset,
1165 unsigned int length)
1170 BUG_ON(kaddr == NULL);
1171 crc = crc32c(crc, kaddr + page_offset, length);
1177 bool ceph_addr_is_blank(const struct ceph_entity_addr *addr)
1179 struct sockaddr_storage ss = addr->in_addr; /* align */
1180 struct in_addr *addr4 = &((struct sockaddr_in *)&ss)->sin_addr;
1181 struct in6_addr *addr6 = &((struct sockaddr_in6 *)&ss)->sin6_addr;
1183 switch (ss.ss_family) {
1185 return addr4->s_addr == htonl(INADDR_ANY);
1187 return ipv6_addr_any(addr6);
1192 EXPORT_SYMBOL(ceph_addr_is_blank);
1194 int ceph_addr_port(const struct ceph_entity_addr *addr)
1196 switch (get_unaligned(&addr->in_addr.ss_family)) {
1198 return ntohs(get_unaligned(&((struct sockaddr_in *)&addr->in_addr)->sin_port));
1200 return ntohs(get_unaligned(&((struct sockaddr_in6 *)&addr->in_addr)->sin6_port));
1205 void ceph_addr_set_port(struct ceph_entity_addr *addr, int p)
1207 switch (get_unaligned(&addr->in_addr.ss_family)) {
1209 put_unaligned(htons(p), &((struct sockaddr_in *)&addr->in_addr)->sin_port);
1212 put_unaligned(htons(p), &((struct sockaddr_in6 *)&addr->in_addr)->sin6_port);
1218 * Unlike other *_pton function semantics, zero indicates success.
1220 static int ceph_pton(const char *str, size_t len, struct ceph_entity_addr *addr,
1221 char delim, const char **ipend)
1223 memset(&addr->in_addr, 0, sizeof(addr->in_addr));
1225 if (in4_pton(str, len, (u8 *)&((struct sockaddr_in *)&addr->in_addr)->sin_addr.s_addr, delim, ipend)) {
1226 put_unaligned(AF_INET, &addr->in_addr.ss_family);
1230 if (in6_pton(str, len, (u8 *)&((struct sockaddr_in6 *)&addr->in_addr)->sin6_addr.s6_addr, delim, ipend)) {
1231 put_unaligned(AF_INET6, &addr->in_addr.ss_family);
1239 * Extract hostname string and resolve using kernel DNS facility.
1241 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1242 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1243 struct ceph_entity_addr *addr, char delim, const char **ipend)
1245 const char *end, *delim_p;
1246 char *colon_p, *ip_addr = NULL;
1250 * The end of the hostname occurs immediately preceding the delimiter or
1251 * the port marker (':') where the delimiter takes precedence.
1253 delim_p = memchr(name, delim, namelen);
1254 colon_p = memchr(name, ':', namelen);
1256 if (delim_p && colon_p)
1257 end = delim_p < colon_p ? delim_p : colon_p;
1258 else if (!delim_p && colon_p)
1262 if (!end) /* case: hostname:/ */
1263 end = name + namelen;
1269 /* do dns_resolve upcall */
1270 ip_len = dns_query(current->nsproxy->net_ns,
1271 NULL, name, end - name, NULL, &ip_addr, NULL, false);
1273 ret = ceph_pton(ip_addr, ip_len, addr, -1, NULL);
1281 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1282 ret, ret ? "failed" : ceph_pr_addr(addr));
1287 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1288 struct ceph_entity_addr *addr, char delim, const char **ipend)
1295 * Parse a server name (IP or hostname). If a valid IP address is not found
1296 * then try to extract a hostname to resolve using userspace DNS upcall.
1298 static int ceph_parse_server_name(const char *name, size_t namelen,
1299 struct ceph_entity_addr *addr, char delim, const char **ipend)
1303 ret = ceph_pton(name, namelen, addr, delim, ipend);
1305 ret = ceph_dns_resolve_name(name, namelen, addr, delim, ipend);
1311 * Parse an ip[:port] list into an addr array. Use the default
1312 * monitor port if a port isn't specified.
1314 int ceph_parse_ips(const char *c, const char *end,
1315 struct ceph_entity_addr *addr,
1316 int max_count, int *count, char delim)
1318 int i, ret = -EINVAL;
1321 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1322 for (i = 0; i < max_count; i++) {
1323 char cur_delim = delim;
1332 ret = ceph_parse_server_name(p, end - p, &addr[i], cur_delim,
1340 if (cur_delim == ']') {
1342 dout("missing matching ']'\n");
1349 if (p < end && *p == ':') {
1352 while (p < end && *p >= '0' && *p <= '9') {
1353 port = (port * 10) + (*p - '0');
1357 port = CEPH_MON_PORT;
1358 else if (port > 65535)
1361 port = CEPH_MON_PORT;
1364 ceph_addr_set_port(&addr[i], port);
1366 * We want the type to be set according to ms_mode
1367 * option, but options are normally parsed after mon
1368 * addresses. Rather than complicating parsing, set
1369 * to LEGACY and override in build_initial_monmap()
1370 * for mon addresses and ceph_messenger_init() for
1373 addr[i].type = CEPH_ENTITY_ADDR_TYPE_LEGACY;
1376 dout("%s got %s\n", __func__, ceph_pr_addr(&addr[i]));
1397 * Process message. This happens in the worker thread. The callback should
1398 * be careful not to do anything that waits on other incoming messages or it
1401 void ceph_con_process_message(struct ceph_connection *con)
1403 struct ceph_msg *msg = con->in_msg;
1405 BUG_ON(con->in_msg->con != con);
1408 /* if first message, set peer_name */
1409 if (con->peer_name.type == 0)
1410 con->peer_name = msg->hdr.src;
1413 mutex_unlock(&con->mutex);
1415 dout("===== %p %llu from %s%lld %d=%s len %d+%d+%d (%u %u %u) =====\n",
1416 msg, le64_to_cpu(msg->hdr.seq),
1417 ENTITY_NAME(msg->hdr.src),
1418 le16_to_cpu(msg->hdr.type),
1419 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1420 le32_to_cpu(msg->hdr.front_len),
1421 le32_to_cpu(msg->hdr.middle_len),
1422 le32_to_cpu(msg->hdr.data_len),
1423 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1424 con->ops->dispatch(con, msg);
1426 mutex_lock(&con->mutex);
1430 * Atomically queue work on a connection after the specified delay.
1431 * Bump @con reference to avoid races with connection teardown.
1432 * Returns 0 if work was queued, or an error code otherwise.
1434 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
1436 if (!con->ops->get(con)) {
1437 dout("%s %p ref count 0\n", __func__, con);
1442 delay = round_jiffies_relative(delay);
1444 dout("%s %p %lu\n", __func__, con, delay);
1445 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
1446 dout("%s %p - already queued\n", __func__, con);
1454 static void queue_con(struct ceph_connection *con)
1456 (void) queue_con_delay(con, 0);
1459 static void cancel_con(struct ceph_connection *con)
1461 if (cancel_delayed_work(&con->work)) {
1462 dout("%s %p\n", __func__, con);
1467 static bool con_sock_closed(struct ceph_connection *con)
1469 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_SOCK_CLOSED))
1473 case CEPH_CON_S_ ## x: \
1474 con->error_msg = "socket closed (con state " #x ")"; \
1477 switch (con->state) {
1481 CASE(V1_CONNECT_MSG);
1482 CASE(V2_BANNER_PREFIX);
1483 CASE(V2_BANNER_PAYLOAD);
1486 CASE(V2_AUTH_SIGNATURE);
1487 CASE(V2_SESSION_CONNECT);
1488 CASE(V2_SESSION_RECONNECT);
1499 static bool con_backoff(struct ceph_connection *con)
1503 if (!ceph_con_flag_test_and_clear(con, CEPH_CON_F_BACKOFF))
1506 ret = queue_con_delay(con, con->delay);
1508 dout("%s: con %p FAILED to back off %lu\n", __func__,
1510 BUG_ON(ret == -ENOENT);
1511 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1517 /* Finish fault handling; con->mutex must *not* be held here */
1519 static void con_fault_finish(struct ceph_connection *con)
1521 dout("%s %p\n", __func__, con);
1524 * in case we faulted due to authentication, invalidate our
1525 * current tickets so that we can get new ones.
1527 if (con->v1.auth_retry) {
1528 dout("auth_retry %d, invalidating\n", con->v1.auth_retry);
1529 if (con->ops->invalidate_authorizer)
1530 con->ops->invalidate_authorizer(con);
1531 con->v1.auth_retry = 0;
1534 if (con->ops->fault)
1535 con->ops->fault(con);
1539 * Do some work on a connection. Drop a connection ref when we're done.
1541 static void ceph_con_workfn(struct work_struct *work)
1543 struct ceph_connection *con = container_of(work, struct ceph_connection,
1547 mutex_lock(&con->mutex);
1551 if ((fault = con_sock_closed(con))) {
1552 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
1555 if (con_backoff(con)) {
1556 dout("%s: con %p BACKOFF\n", __func__, con);
1559 if (con->state == CEPH_CON_S_STANDBY) {
1560 dout("%s: con %p STANDBY\n", __func__, con);
1563 if (con->state == CEPH_CON_S_CLOSED) {
1564 dout("%s: con %p CLOSED\n", __func__, con);
1568 if (con->state == CEPH_CON_S_PREOPEN) {
1569 dout("%s: con %p PREOPEN\n", __func__, con);
1573 if (ceph_msgr2(from_msgr(con->msgr)))
1574 ret = ceph_con_v2_try_read(con);
1576 ret = ceph_con_v1_try_read(con);
1580 if (!con->error_msg)
1581 con->error_msg = "socket error on read";
1586 if (ceph_msgr2(from_msgr(con->msgr)))
1587 ret = ceph_con_v2_try_write(con);
1589 ret = ceph_con_v1_try_write(con);
1593 if (!con->error_msg)
1594 con->error_msg = "socket error on write";
1598 break; /* If we make it to here, we're done */
1602 mutex_unlock(&con->mutex);
1605 con_fault_finish(con);
1611 * Generic error/fault handler. A retry mechanism is used with
1612 * exponential backoff
1614 static void con_fault(struct ceph_connection *con)
1616 dout("fault %p state %d to peer %s\n",
1617 con, con->state, ceph_pr_addr(&con->peer_addr));
1619 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1620 ceph_pr_addr(&con->peer_addr), con->error_msg);
1621 con->error_msg = NULL;
1623 WARN_ON(con->state == CEPH_CON_S_STANDBY ||
1624 con->state == CEPH_CON_S_CLOSED);
1626 ceph_con_reset_protocol(con);
1628 if (ceph_con_flag_test(con, CEPH_CON_F_LOSSYTX)) {
1629 dout("fault on LOSSYTX channel, marking CLOSED\n");
1630 con->state = CEPH_CON_S_CLOSED;
1634 /* Requeue anything that hasn't been acked */
1635 list_splice_init(&con->out_sent, &con->out_queue);
1637 /* If there are no messages queued or keepalive pending, place
1638 * the connection in a STANDBY state */
1639 if (list_empty(&con->out_queue) &&
1640 !ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
1641 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
1642 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
1643 con->state = CEPH_CON_S_STANDBY;
1645 /* retry after a delay. */
1646 con->state = CEPH_CON_S_PREOPEN;
1648 con->delay = BASE_DELAY_INTERVAL;
1649 } else if (con->delay < MAX_DELAY_INTERVAL) {
1651 if (con->delay > MAX_DELAY_INTERVAL)
1652 con->delay = MAX_DELAY_INTERVAL;
1654 ceph_con_flag_set(con, CEPH_CON_F_BACKOFF);
1659 void ceph_messenger_reset_nonce(struct ceph_messenger *msgr)
1661 u32 nonce = le32_to_cpu(msgr->inst.addr.nonce) + 1000000;
1662 msgr->inst.addr.nonce = cpu_to_le32(nonce);
1663 ceph_encode_my_addr(msgr);
1667 * initialize a new messenger instance
1669 void ceph_messenger_init(struct ceph_messenger *msgr,
1670 struct ceph_entity_addr *myaddr)
1672 spin_lock_init(&msgr->global_seq_lock);
1675 memcpy(&msgr->inst.addr.in_addr, &myaddr->in_addr,
1676 sizeof(msgr->inst.addr.in_addr));
1677 ceph_addr_set_port(&msgr->inst.addr, 0);
1681 * Since nautilus, clients are identified using type ANY.
1682 * For msgr1, ceph_encode_banner_addr() munges it to NONE.
1684 msgr->inst.addr.type = CEPH_ENTITY_ADDR_TYPE_ANY;
1686 /* generate a random non-zero nonce */
1688 get_random_bytes(&msgr->inst.addr.nonce,
1689 sizeof(msgr->inst.addr.nonce));
1690 } while (!msgr->inst.addr.nonce);
1691 ceph_encode_my_addr(msgr);
1693 atomic_set(&msgr->stopping, 0);
1694 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
1696 dout("%s %p\n", __func__, msgr);
1699 void ceph_messenger_fini(struct ceph_messenger *msgr)
1701 put_net(read_pnet(&msgr->net));
1704 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
1707 msg->con->ops->put(msg->con);
1709 msg->con = con ? con->ops->get(con) : NULL;
1710 BUG_ON(msg->con != con);
1713 static void clear_standby(struct ceph_connection *con)
1715 /* come back from STANDBY? */
1716 if (con->state == CEPH_CON_S_STANDBY) {
1717 dout("clear_standby %p and ++connect_seq\n", con);
1718 con->state = CEPH_CON_S_PREOPEN;
1719 con->v1.connect_seq++;
1720 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_WRITE_PENDING));
1721 WARN_ON(ceph_con_flag_test(con, CEPH_CON_F_KEEPALIVE_PENDING));
1726 * Queue up an outgoing message on the given connection.
1728 * Consumes a ref on @msg.
1730 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1733 msg->hdr.src = con->msgr->inst.name;
1734 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1735 msg->needs_out_seq = true;
1737 mutex_lock(&con->mutex);
1739 if (con->state == CEPH_CON_S_CLOSED) {
1740 dout("con_send %p closed, dropping %p\n", con, msg);
1742 mutex_unlock(&con->mutex);
1746 msg_con_set(msg, con);
1748 BUG_ON(!list_empty(&msg->list_head));
1749 list_add_tail(&msg->list_head, &con->out_queue);
1750 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1751 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1752 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1753 le32_to_cpu(msg->hdr.front_len),
1754 le32_to_cpu(msg->hdr.middle_len),
1755 le32_to_cpu(msg->hdr.data_len));
1758 mutex_unlock(&con->mutex);
1760 /* if there wasn't anything waiting to send before, queue
1762 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1765 EXPORT_SYMBOL(ceph_con_send);
1768 * Revoke a message that was previously queued for send
1770 void ceph_msg_revoke(struct ceph_msg *msg)
1772 struct ceph_connection *con = msg->con;
1775 dout("%s msg %p null con\n", __func__, msg);
1776 return; /* Message not in our possession */
1779 mutex_lock(&con->mutex);
1780 if (list_empty(&msg->list_head)) {
1781 WARN_ON(con->out_msg == msg);
1782 dout("%s con %p msg %p not linked\n", __func__, con, msg);
1783 mutex_unlock(&con->mutex);
1787 dout("%s con %p msg %p was linked\n", __func__, con, msg);
1789 ceph_msg_remove(msg);
1791 if (con->out_msg == msg) {
1792 WARN_ON(con->state != CEPH_CON_S_OPEN);
1793 dout("%s con %p msg %p was sending\n", __func__, con, msg);
1794 if (ceph_msgr2(from_msgr(con->msgr)))
1795 ceph_con_v2_revoke(con);
1797 ceph_con_v1_revoke(con);
1798 ceph_msg_put(con->out_msg);
1799 con->out_msg = NULL;
1801 dout("%s con %p msg %p not current, out_msg %p\n", __func__,
1802 con, msg, con->out_msg);
1804 mutex_unlock(&con->mutex);
1808 * Revoke a message that we may be reading data into
1810 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
1812 struct ceph_connection *con = msg->con;
1815 dout("%s msg %p null con\n", __func__, msg);
1816 return; /* Message not in our possession */
1819 mutex_lock(&con->mutex);
1820 if (con->in_msg == msg) {
1821 WARN_ON(con->state != CEPH_CON_S_OPEN);
1822 dout("%s con %p msg %p was recving\n", __func__, con, msg);
1823 if (ceph_msgr2(from_msgr(con->msgr)))
1824 ceph_con_v2_revoke_incoming(con);
1826 ceph_con_v1_revoke_incoming(con);
1827 ceph_msg_put(con->in_msg);
1830 dout("%s con %p msg %p not current, in_msg %p\n", __func__,
1831 con, msg, con->in_msg);
1833 mutex_unlock(&con->mutex);
1837 * Queue a keepalive byte to ensure the tcp connection is alive.
1839 void ceph_con_keepalive(struct ceph_connection *con)
1841 dout("con_keepalive %p\n", con);
1842 mutex_lock(&con->mutex);
1844 ceph_con_flag_set(con, CEPH_CON_F_KEEPALIVE_PENDING);
1845 mutex_unlock(&con->mutex);
1847 if (!ceph_con_flag_test_and_set(con, CEPH_CON_F_WRITE_PENDING))
1850 EXPORT_SYMBOL(ceph_con_keepalive);
1852 bool ceph_con_keepalive_expired(struct ceph_connection *con,
1853 unsigned long interval)
1856 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
1857 struct timespec64 now;
1858 struct timespec64 ts;
1859 ktime_get_real_ts64(&now);
1860 jiffies_to_timespec64(interval, &ts);
1861 ts = timespec64_add(con->last_keepalive_ack, ts);
1862 return timespec64_compare(&now, &ts) >= 0;
1867 static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
1869 BUG_ON(msg->num_data_items >= msg->max_data_items);
1870 return &msg->data[msg->num_data_items++];
1873 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
1875 if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
1876 int num_pages = calc_pages_for(data->alignment, data->length);
1877 ceph_release_page_vector(data->pages, num_pages);
1878 } else if (data->type == CEPH_MSG_DATA_PAGELIST) {
1879 ceph_pagelist_release(data->pagelist);
1883 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
1884 size_t length, size_t alignment, bool own_pages)
1886 struct ceph_msg_data *data;
1891 data = ceph_msg_data_add(msg);
1892 data->type = CEPH_MSG_DATA_PAGES;
1893 data->pages = pages;
1894 data->length = length;
1895 data->alignment = alignment & ~PAGE_MASK;
1896 data->own_pages = own_pages;
1898 msg->data_length += length;
1900 EXPORT_SYMBOL(ceph_msg_data_add_pages);
1902 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
1903 struct ceph_pagelist *pagelist)
1905 struct ceph_msg_data *data;
1908 BUG_ON(!pagelist->length);
1910 data = ceph_msg_data_add(msg);
1911 data->type = CEPH_MSG_DATA_PAGELIST;
1912 refcount_inc(&pagelist->refcnt);
1913 data->pagelist = pagelist;
1915 msg->data_length += pagelist->length;
1917 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
1920 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
1923 struct ceph_msg_data *data;
1925 data = ceph_msg_data_add(msg);
1926 data->type = CEPH_MSG_DATA_BIO;
1927 data->bio_pos = *bio_pos;
1928 data->bio_length = length;
1930 msg->data_length += length;
1932 EXPORT_SYMBOL(ceph_msg_data_add_bio);
1933 #endif /* CONFIG_BLOCK */
1935 void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
1936 struct ceph_bvec_iter *bvec_pos)
1938 struct ceph_msg_data *data;
1940 data = ceph_msg_data_add(msg);
1941 data->type = CEPH_MSG_DATA_BVECS;
1942 data->bvec_pos = *bvec_pos;
1944 msg->data_length += bvec_pos->iter.bi_size;
1946 EXPORT_SYMBOL(ceph_msg_data_add_bvecs);
1948 void ceph_msg_data_add_iter(struct ceph_msg *msg,
1949 struct iov_iter *iter)
1951 struct ceph_msg_data *data;
1953 data = ceph_msg_data_add(msg);
1954 data->type = CEPH_MSG_DATA_ITER;
1957 msg->data_length += iov_iter_count(&data->iter);
1961 * construct a new message with given type, size
1962 * the new msg has a ref count of 1.
1964 struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
1965 gfp_t flags, bool can_fail)
1969 m = kmem_cache_zalloc(ceph_msg_cache, flags);
1973 m->hdr.type = cpu_to_le16(type);
1974 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
1975 m->hdr.front_len = cpu_to_le32(front_len);
1977 INIT_LIST_HEAD(&m->list_head);
1978 kref_init(&m->kref);
1982 m->front.iov_base = kvmalloc(front_len, flags);
1983 if (m->front.iov_base == NULL) {
1984 dout("ceph_msg_new can't allocate %d bytes\n",
1989 m->front.iov_base = NULL;
1991 m->front_alloc_len = m->front.iov_len = front_len;
1993 if (max_data_items) {
1994 m->data = kmalloc_array(max_data_items, sizeof(*m->data),
1999 m->max_data_items = max_data_items;
2002 dout("ceph_msg_new %p front %d\n", m, front_len);
2009 pr_err("msg_new can't create type %d front %d\n", type,
2013 dout("msg_new can't create type %d front %d\n", type,
2018 EXPORT_SYMBOL(ceph_msg_new2);
2020 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2023 return ceph_msg_new2(type, front_len, 0, flags, can_fail);
2025 EXPORT_SYMBOL(ceph_msg_new);
2028 * Allocate "middle" portion of a message, if it is needed and wasn't
2029 * allocated by alloc_msg. This allows us to read a small fixed-size
2030 * per-type header in the front and then gracefully fail (i.e.,
2031 * propagate the error to the caller based on info in the front) when
2032 * the middle is too large.
2034 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2036 int type = le16_to_cpu(msg->hdr.type);
2037 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2039 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2040 ceph_msg_type_name(type), middle_len);
2041 BUG_ON(!middle_len);
2042 BUG_ON(msg->middle);
2044 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2051 * Allocate a message for receiving an incoming message on a
2052 * connection, and save the result in con->in_msg. Uses the
2053 * connection's private alloc_msg op if available.
2055 * Returns 0 on success, or a negative error code.
2057 * On success, if we set *skip = 1:
2058 * - the next message should be skipped and ignored.
2059 * - con->in_msg == NULL
2060 * or if we set *skip = 0:
2061 * - con->in_msg is non-null.
2062 * On error (ENOMEM, EAGAIN, ...),
2063 * - con->in_msg == NULL
2065 int ceph_con_in_msg_alloc(struct ceph_connection *con,
2066 struct ceph_msg_header *hdr, int *skip)
2068 int middle_len = le32_to_cpu(hdr->middle_len);
2069 struct ceph_msg *msg;
2072 BUG_ON(con->in_msg != NULL);
2073 BUG_ON(!con->ops->alloc_msg);
2075 mutex_unlock(&con->mutex);
2076 msg = con->ops->alloc_msg(con, hdr, skip);
2077 mutex_lock(&con->mutex);
2078 if (con->state != CEPH_CON_S_OPEN) {
2085 msg_con_set(msg, con);
2089 * Null message pointer means either we should skip
2090 * this message or we couldn't allocate memory. The
2091 * former is not an error.
2096 con->error_msg = "error allocating memory for incoming message";
2099 memcpy(&con->in_msg->hdr, hdr, sizeof(*hdr));
2101 if (middle_len && !con->in_msg->middle) {
2102 ret = ceph_alloc_middle(con, con->in_msg);
2104 ceph_msg_put(con->in_msg);
2112 void ceph_con_get_out_msg(struct ceph_connection *con)
2114 struct ceph_msg *msg;
2116 BUG_ON(list_empty(&con->out_queue));
2117 msg = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
2118 WARN_ON(msg->con != con);
2121 * Put the message on "sent" list using a ref from ceph_con_send().
2122 * It is put when the message is acked or revoked.
2124 list_move_tail(&msg->list_head, &con->out_sent);
2127 * Only assign outgoing seq # if we haven't sent this message
2128 * yet. If it is requeued, resend with it's original seq.
2130 if (msg->needs_out_seq) {
2131 msg->hdr.seq = cpu_to_le64(++con->out_seq);
2132 msg->needs_out_seq = false;
2134 if (con->ops->reencode_message)
2135 con->ops->reencode_message(msg);
2139 * Get a ref for out_msg. It is put when we are done sending the
2140 * message or in case of a fault.
2142 WARN_ON(con->out_msg);
2143 con->out_msg = ceph_msg_get(msg);
2147 * Free a generically kmalloc'd message.
2149 static void ceph_msg_free(struct ceph_msg *m)
2151 dout("%s %p\n", __func__, m);
2152 kvfree(m->front.iov_base);
2154 kmem_cache_free(ceph_msg_cache, m);
2157 static void ceph_msg_release(struct kref *kref)
2159 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2162 dout("%s %p\n", __func__, m);
2163 WARN_ON(!list_empty(&m->list_head));
2165 msg_con_set(m, NULL);
2167 /* drop middle, data, if any */
2169 ceph_buffer_put(m->middle);
2173 for (i = 0; i < m->num_data_items; i++)
2174 ceph_msg_data_destroy(&m->data[i]);
2177 ceph_msgpool_put(m->pool, m);
2182 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
2184 dout("%s %p (was %d)\n", __func__, msg,
2185 kref_read(&msg->kref));
2186 kref_get(&msg->kref);
2189 EXPORT_SYMBOL(ceph_msg_get);
2191 void ceph_msg_put(struct ceph_msg *msg)
2193 dout("%s %p (was %d)\n", __func__, msg,
2194 kref_read(&msg->kref));
2195 kref_put(&msg->kref, ceph_msg_release);
2197 EXPORT_SYMBOL(ceph_msg_put);
2199 void ceph_msg_dump(struct ceph_msg *msg)
2201 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
2202 msg->front_alloc_len, msg->data_length);
2203 print_hex_dump(KERN_DEBUG, "header: ",
2204 DUMP_PREFIX_OFFSET, 16, 1,
2205 &msg->hdr, sizeof(msg->hdr), true);
2206 print_hex_dump(KERN_DEBUG, " front: ",
2207 DUMP_PREFIX_OFFSET, 16, 1,
2208 msg->front.iov_base, msg->front.iov_len, true);
2210 print_hex_dump(KERN_DEBUG, "middle: ",
2211 DUMP_PREFIX_OFFSET, 16, 1,
2212 msg->middle->vec.iov_base,
2213 msg->middle->vec.iov_len, true);
2214 print_hex_dump(KERN_DEBUG, "footer: ",
2215 DUMP_PREFIX_OFFSET, 16, 1,
2216 &msg->footer, sizeof(msg->footer), true);
2218 EXPORT_SYMBOL(ceph_msg_dump);