1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
32 /* static tag bytes (protocol control messages) */
33 static char tag_msg = CEPH_MSGR_TAG_MSG;
34 static char tag_ack = CEPH_MSGR_TAG_ACK;
35 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
38 static struct lock_class_key socket_class;
42 * When skipping (ignoring) a block of input we read it into a "skip
43 * buffer," which is this many bytes in size.
45 #define SKIP_BUF_SIZE 1024
47 static void queue_con(struct ceph_connection *con);
48 static void con_work(struct work_struct *);
49 static void ceph_fault(struct ceph_connection *con);
52 * Nicely render a sockaddr as a string. An array of formatted
53 * strings is used, to approximate reentrancy.
55 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
56 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
57 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
58 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
60 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
61 static atomic_t addr_str_seq = ATOMIC_INIT(0);
63 static struct page *zero_page; /* used in certain error cases */
65 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
69 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
70 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
72 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
75 switch (ss->ss_family) {
77 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
78 ntohs(in4->sin_port));
82 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
83 ntohs(in6->sin6_port));
87 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
93 EXPORT_SYMBOL(ceph_pr_addr);
95 static void encode_my_addr(struct ceph_messenger *msgr)
97 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
98 ceph_encode_addr(&msgr->my_enc_addr);
102 * work queue for all reading and writing to/from the socket.
104 static struct workqueue_struct *ceph_msgr_wq;
106 void _ceph_msgr_exit(void)
109 destroy_workqueue(ceph_msgr_wq);
113 BUG_ON(zero_page == NULL);
115 page_cache_release(zero_page);
119 int ceph_msgr_init(void)
121 BUG_ON(zero_page != NULL);
122 zero_page = ZERO_PAGE(0);
123 page_cache_get(zero_page);
125 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
129 pr_err("msgr_init failed to create workqueue\n");
134 EXPORT_SYMBOL(ceph_msgr_init);
136 void ceph_msgr_exit(void)
138 BUG_ON(ceph_msgr_wq == NULL);
142 EXPORT_SYMBOL(ceph_msgr_exit);
144 void ceph_msgr_flush(void)
146 flush_workqueue(ceph_msgr_wq);
148 EXPORT_SYMBOL(ceph_msgr_flush);
152 * socket callback functions
155 /* data available on socket, or listen socket received a connect */
156 static void ceph_data_ready(struct sock *sk, int count_unused)
158 struct ceph_connection *con = sk->sk_user_data;
160 if (sk->sk_state != TCP_CLOSE_WAIT) {
161 dout("ceph_data_ready on %p state = %lu, queueing work\n",
167 /* socket has buffer space for writing */
168 static void ceph_write_space(struct sock *sk)
170 struct ceph_connection *con = sk->sk_user_data;
172 /* only queue to workqueue if there is data we want to write,
173 * and there is sufficient space in the socket buffer to accept
174 * more data. clear SOCK_NOSPACE so that ceph_write_space()
175 * doesn't get called again until try_write() fills the socket
176 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
177 * and net/core/stream.c:sk_stream_write_space().
179 if (test_bit(WRITE_PENDING, &con->state)) {
180 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
181 dout("ceph_write_space %p queueing write work\n", con);
182 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
186 dout("ceph_write_space %p nothing to write\n", con);
190 /* socket's state has changed */
191 static void ceph_state_change(struct sock *sk)
193 struct ceph_connection *con = sk->sk_user_data;
195 dout("ceph_state_change %p state = %lu sk_state = %u\n",
196 con, con->state, sk->sk_state);
198 if (test_bit(CLOSED, &con->state))
201 switch (sk->sk_state) {
203 dout("ceph_state_change TCP_CLOSE\n");
205 dout("ceph_state_change TCP_CLOSE_WAIT\n");
206 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
207 if (test_bit(CONNECTING, &con->state))
208 con->error_msg = "connection failed";
210 con->error_msg = "socket closed";
214 case TCP_ESTABLISHED:
215 dout("ceph_state_change TCP_ESTABLISHED\n");
218 default: /* Everything else is uninteresting */
224 * set up socket callbacks
226 static void set_sock_callbacks(struct socket *sock,
227 struct ceph_connection *con)
229 struct sock *sk = sock->sk;
230 sk->sk_user_data = con;
231 sk->sk_data_ready = ceph_data_ready;
232 sk->sk_write_space = ceph_write_space;
233 sk->sk_state_change = ceph_state_change;
242 * initiate connection to a remote socket.
244 static int ceph_tcp_connect(struct ceph_connection *con)
246 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
251 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
255 sock->sk->sk_allocation = GFP_NOFS;
257 #ifdef CONFIG_LOCKDEP
258 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
261 set_sock_callbacks(sock, con);
263 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
265 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
267 if (ret == -EINPROGRESS) {
268 dout("connect %s EINPROGRESS sk_state = %u\n",
269 ceph_pr_addr(&con->peer_addr.in_addr),
271 } else if (ret < 0) {
272 pr_err("connect %s error %d\n",
273 ceph_pr_addr(&con->peer_addr.in_addr), ret);
275 con->error_msg = "connect error";
284 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
286 struct kvec iov = {buf, len};
287 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
290 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
297 * write something. @more is true if caller will be sending more data
300 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
301 size_t kvlen, size_t len, int more)
303 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
307 msg.msg_flags |= MSG_MORE;
309 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
311 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
317 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
318 int offset, size_t size, int more)
320 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
323 ret = kernel_sendpage(sock, page, offset, size, flags);
332 * Shutdown/close the socket for the given connection.
334 static int con_close_socket(struct ceph_connection *con)
338 dout("con_close_socket on %p sock %p\n", con, con->sock);
341 set_bit(SOCK_CLOSED, &con->state);
342 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
343 sock_release(con->sock);
345 clear_bit(SOCK_CLOSED, &con->state);
350 * Reset a connection. Discard all incoming and outgoing messages
351 * and clear *_seq state.
353 static void ceph_msg_remove(struct ceph_msg *msg)
355 list_del_init(&msg->list_head);
358 static void ceph_msg_remove_list(struct list_head *head)
360 while (!list_empty(head)) {
361 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
363 ceph_msg_remove(msg);
367 static void reset_connection(struct ceph_connection *con)
369 /* reset connection, out_queue, msg_ and connect_seq */
370 /* discard existing out_queue and msg_seq */
371 ceph_msg_remove_list(&con->out_queue);
372 ceph_msg_remove_list(&con->out_sent);
375 ceph_msg_put(con->in_msg);
379 con->connect_seq = 0;
382 ceph_msg_put(con->out_msg);
386 con->in_seq_acked = 0;
390 * mark a peer down. drop any open connections.
392 void ceph_con_close(struct ceph_connection *con)
394 dout("con_close %p peer %s\n", con,
395 ceph_pr_addr(&con->peer_addr.in_addr));
396 set_bit(CLOSED, &con->state); /* in case there's queued work */
397 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
398 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
399 clear_bit(KEEPALIVE_PENDING, &con->state);
400 clear_bit(WRITE_PENDING, &con->state);
401 mutex_lock(&con->mutex);
402 reset_connection(con);
403 con->peer_global_seq = 0;
404 cancel_delayed_work(&con->work);
405 mutex_unlock(&con->mutex);
408 EXPORT_SYMBOL(ceph_con_close);
411 * Reopen a closed connection, with a new peer address.
413 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
415 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
416 set_bit(OPENING, &con->state);
417 clear_bit(CLOSED, &con->state);
418 memcpy(&con->peer_addr, addr, sizeof(*addr));
419 con->delay = 0; /* reset backoff memory */
422 EXPORT_SYMBOL(ceph_con_open);
425 * return true if this connection ever successfully opened
427 bool ceph_con_opened(struct ceph_connection *con)
429 return con->connect_seq > 0;
435 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
437 int nref = __atomic_add_unless(&con->nref, 1, 0);
439 dout("con_get %p nref = %d -> %d\n", con, nref, nref + 1);
441 return nref ? con : NULL;
444 void ceph_con_put(struct ceph_connection *con)
446 int nref = atomic_dec_return(&con->nref);
453 dout("con_put %p nref = %d -> %d\n", con, nref + 1, nref);
457 * initialize a new connection.
459 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
461 dout("con_init %p\n", con);
462 memset(con, 0, sizeof(*con));
463 atomic_set(&con->nref, 1);
465 mutex_init(&con->mutex);
466 INIT_LIST_HEAD(&con->out_queue);
467 INIT_LIST_HEAD(&con->out_sent);
468 INIT_DELAYED_WORK(&con->work, con_work);
470 EXPORT_SYMBOL(ceph_con_init);
474 * We maintain a global counter to order connection attempts. Get
475 * a unique seq greater than @gt.
477 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
481 spin_lock(&msgr->global_seq_lock);
482 if (msgr->global_seq < gt)
483 msgr->global_seq = gt;
484 ret = ++msgr->global_seq;
485 spin_unlock(&msgr->global_seq_lock);
489 static void ceph_con_out_kvec_reset(struct ceph_connection *con)
491 con->out_kvec_left = 0;
492 con->out_kvec_bytes = 0;
493 con->out_kvec_cur = &con->out_kvec[0];
496 static void ceph_con_out_kvec_add(struct ceph_connection *con,
497 size_t size, void *data)
501 index = con->out_kvec_left;
502 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
504 con->out_kvec[index].iov_len = size;
505 con->out_kvec[index].iov_base = data;
506 con->out_kvec_left++;
507 con->out_kvec_bytes += size;
511 * Prepare footer for currently outgoing message, and finish things
512 * off. Assumes out_kvec* are already valid.. we just add on to the end.
514 static void prepare_write_message_footer(struct ceph_connection *con)
516 struct ceph_msg *m = con->out_msg;
517 int v = con->out_kvec_left;
519 dout("prepare_write_message_footer %p\n", con);
520 con->out_kvec_is_msg = true;
521 con->out_kvec[v].iov_base = &m->footer;
522 con->out_kvec[v].iov_len = sizeof(m->footer);
523 con->out_kvec_bytes += sizeof(m->footer);
524 con->out_kvec_left++;
525 con->out_more = m->more_to_follow;
526 con->out_msg_done = true;
530 * Prepare headers for the next outgoing message.
532 static void prepare_write_message(struct ceph_connection *con)
537 ceph_con_out_kvec_reset(con);
538 con->out_kvec_is_msg = true;
539 con->out_msg_done = false;
541 /* Sneak an ack in there first? If we can get it into the same
542 * TCP packet that's a good thing. */
543 if (con->in_seq > con->in_seq_acked) {
544 con->in_seq_acked = con->in_seq;
545 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
546 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
547 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
551 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
554 /* put message on sent list */
556 list_move_tail(&m->list_head, &con->out_sent);
559 * only assign outgoing seq # if we haven't sent this message
560 * yet. if it is requeued, resend with it's original seq.
562 if (m->needs_out_seq) {
563 m->hdr.seq = cpu_to_le64(++con->out_seq);
564 m->needs_out_seq = false;
567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
568 m, con->out_seq, le16_to_cpu(m->hdr.type),
569 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
570 le32_to_cpu(m->hdr.data_len),
572 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
574 /* tag + hdr + front + middle */
575 ceph_con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
576 ceph_con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
577 ceph_con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
580 ceph_con_out_kvec_add(con, m->middle->vec.iov_len,
581 m->middle->vec.iov_base);
583 /* fill in crc (except data pages), footer */
584 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
585 con->out_msg->hdr.crc = cpu_to_le32(crc);
586 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
588 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
589 con->out_msg->footer.front_crc = cpu_to_le32(crc);
591 crc = crc32c(0, m->middle->vec.iov_base,
592 m->middle->vec.iov_len);
593 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
595 con->out_msg->footer.middle_crc = 0;
596 con->out_msg->footer.data_crc = 0;
597 dout("prepare_write_message front_crc %u data_crc %u\n",
598 le32_to_cpu(con->out_msg->footer.front_crc),
599 le32_to_cpu(con->out_msg->footer.middle_crc));
601 /* is there a data payload? */
602 if (le32_to_cpu(m->hdr.data_len) > 0) {
603 /* initialize page iterator */
604 con->out_msg_pos.page = 0;
606 con->out_msg_pos.page_pos = m->page_alignment;
608 con->out_msg_pos.page_pos = 0;
609 con->out_msg_pos.data_pos = 0;
610 con->out_msg_pos.did_page_crc = false;
611 con->out_more = 1; /* data + footer will follow */
613 /* no, queue up footer too and be done */
614 prepare_write_message_footer(con);
617 set_bit(WRITE_PENDING, &con->state);
623 static void prepare_write_ack(struct ceph_connection *con)
625 dout("prepare_write_ack %p %llu -> %llu\n", con,
626 con->in_seq_acked, con->in_seq);
627 con->in_seq_acked = con->in_seq;
629 ceph_con_out_kvec_reset(con);
631 ceph_con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
633 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
634 ceph_con_out_kvec_add(con, sizeof (con->out_temp_ack),
637 con->out_more = 1; /* more will follow.. eventually.. */
638 set_bit(WRITE_PENDING, &con->state);
642 * Prepare to write keepalive byte.
644 static void prepare_write_keepalive(struct ceph_connection *con)
646 dout("prepare_write_keepalive %p\n", con);
647 ceph_con_out_kvec_reset(con);
648 ceph_con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
649 set_bit(WRITE_PENDING, &con->state);
653 * Connection negotiation.
656 static int prepare_connect_authorizer(struct ceph_connection *con)
660 int auth_protocol = 0;
662 mutex_unlock(&con->mutex);
663 if (con->ops->get_authorizer)
664 con->ops->get_authorizer(con, &auth_buf, &auth_len,
665 &auth_protocol, &con->auth_reply_buf,
666 &con->auth_reply_buf_len,
668 mutex_lock(&con->mutex);
670 if (test_bit(CLOSED, &con->state) ||
671 test_bit(OPENING, &con->state))
674 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
675 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
678 ceph_con_out_kvec_add(con, auth_len, auth_buf);
684 * We connected to a peer and are saying hello.
686 static void prepare_write_banner(struct ceph_messenger *msgr,
687 struct ceph_connection *con)
689 ceph_con_out_kvec_reset(con);
690 ceph_con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
691 ceph_con_out_kvec_add(con, sizeof (msgr->my_enc_addr),
695 set_bit(WRITE_PENDING, &con->state);
698 static int prepare_write_connect(struct ceph_messenger *msgr,
699 struct ceph_connection *con,
702 unsigned global_seq = get_global_seq(con->msgr, 0);
705 switch (con->peer_name.type) {
706 case CEPH_ENTITY_TYPE_MON:
707 proto = CEPH_MONC_PROTOCOL;
709 case CEPH_ENTITY_TYPE_OSD:
710 proto = CEPH_OSDC_PROTOCOL;
712 case CEPH_ENTITY_TYPE_MDS:
713 proto = CEPH_MDSC_PROTOCOL;
719 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
720 con->connect_seq, global_seq, proto);
722 con->out_connect.features = cpu_to_le64(msgr->supported_features);
723 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
724 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
725 con->out_connect.global_seq = cpu_to_le32(global_seq);
726 con->out_connect.protocol_version = cpu_to_le32(proto);
727 con->out_connect.flags = 0;
730 prepare_write_banner(msgr, con);
732 ceph_con_out_kvec_reset(con);
733 ceph_con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect);
736 set_bit(WRITE_PENDING, &con->state);
738 return prepare_connect_authorizer(con);
742 * write as much of pending kvecs to the socket as we can.
744 * 0 -> socket full, but more to do
747 static int write_partial_kvec(struct ceph_connection *con)
751 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
752 while (con->out_kvec_bytes > 0) {
753 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
754 con->out_kvec_left, con->out_kvec_bytes,
758 con->out_kvec_bytes -= ret;
759 if (con->out_kvec_bytes == 0)
762 /* account for full iov entries consumed */
763 while (ret >= con->out_kvec_cur->iov_len) {
764 BUG_ON(!con->out_kvec_left);
765 ret -= con->out_kvec_cur->iov_len;
767 con->out_kvec_left--;
769 /* and for a partially-consumed entry */
771 con->out_kvec_cur->iov_len -= ret;
772 con->out_kvec_cur->iov_base += ret;
775 con->out_kvec_left = 0;
776 con->out_kvec_is_msg = false;
779 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
780 con->out_kvec_bytes, con->out_kvec_left, ret);
781 return ret; /* done! */
785 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
796 static void iter_bio_next(struct bio **bio_iter, int *seg)
798 if (*bio_iter == NULL)
801 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
804 if (*seg == (*bio_iter)->bi_vcnt)
805 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
810 * Write as much message data payload as we can. If we finish, queue
812 * 1 -> done, footer is now queued in out_kvec[].
813 * 0 -> socket full, but more to do
816 static int write_partial_msg_pages(struct ceph_connection *con)
818 struct ceph_msg *msg = con->out_msg;
819 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
821 bool do_datacrc = !con->msgr->nocrc;
825 size_t trail_len = (msg->trail ? msg->trail->length : 0);
827 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
828 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
829 con->out_msg_pos.page_pos);
832 if (msg->bio && !msg->bio_iter)
833 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
836 while (data_len > con->out_msg_pos.data_pos) {
837 struct page *page = NULL;
839 int max_write = PAGE_SIZE;
842 total_max_write = data_len - trail_len -
843 con->out_msg_pos.data_pos;
846 * if we are calculating the data crc (the default), we need
847 * to map the page. if our pages[] has been revoked, use the
851 /* have we reached the trail part of the data? */
852 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
855 total_max_write = data_len - con->out_msg_pos.data_pos;
857 page = list_first_entry(&msg->trail->head,
861 max_write = PAGE_SIZE;
862 } else if (msg->pages) {
863 page = msg->pages[con->out_msg_pos.page];
866 } else if (msg->pagelist) {
867 page = list_first_entry(&msg->pagelist->head,
872 } else if (msg->bio) {
875 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
877 bio_offset = bv->bv_offset;
880 max_write = bv->bv_len;
887 len = min_t(int, max_write - con->out_msg_pos.page_pos,
890 if (do_datacrc && !con->out_msg_pos.did_page_crc) {
893 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
895 BUG_ON(kaddr == NULL);
896 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
897 crc = crc32c(tmpcrc, base, len);
898 con->out_msg->footer.data_crc = cpu_to_le32(crc);
899 con->out_msg_pos.did_page_crc = true;
901 ret = ceph_tcp_sendpage(con->sock, page,
902 con->out_msg_pos.page_pos + bio_offset,
911 con->out_msg_pos.data_pos += ret;
912 con->out_msg_pos.page_pos += ret;
914 con->out_msg_pos.page_pos = 0;
915 con->out_msg_pos.page++;
916 con->out_msg_pos.did_page_crc = false;
918 list_move_tail(&page->lru,
920 else if (msg->pagelist)
921 list_move_tail(&page->lru,
922 &msg->pagelist->head);
925 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
930 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
932 /* prepare and queue up footer, too */
934 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
935 ceph_con_out_kvec_reset(con);
936 prepare_write_message_footer(con);
945 static int write_partial_skip(struct ceph_connection *con)
949 while (con->out_skip > 0) {
950 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
952 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1);
955 con->out_skip -= ret;
963 * Prepare to read connection handshake, or an ack.
965 static void prepare_read_banner(struct ceph_connection *con)
967 dout("prepare_read_banner %p\n", con);
968 con->in_base_pos = 0;
971 static void prepare_read_connect(struct ceph_connection *con)
973 dout("prepare_read_connect %p\n", con);
974 con->in_base_pos = 0;
977 static void prepare_read_ack(struct ceph_connection *con)
979 dout("prepare_read_ack %p\n", con);
980 con->in_base_pos = 0;
983 static void prepare_read_tag(struct ceph_connection *con)
985 dout("prepare_read_tag %p\n", con);
986 con->in_base_pos = 0;
987 con->in_tag = CEPH_MSGR_TAG_READY;
991 * Prepare to read a message.
993 static int prepare_read_message(struct ceph_connection *con)
995 dout("prepare_read_message %p\n", con);
996 BUG_ON(con->in_msg != NULL);
997 con->in_base_pos = 0;
998 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1003 static int read_partial(struct ceph_connection *con,
1004 int *to, int size, void *object)
1007 while (con->in_base_pos < *to) {
1008 int left = *to - con->in_base_pos;
1009 int have = size - left;
1010 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1013 con->in_base_pos += ret;
1020 * Read all or part of the connect-side handshake on a new connection
1022 static int read_partial_banner(struct ceph_connection *con)
1026 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1029 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
1032 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
1033 &con->actual_peer_addr);
1036 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
1037 &con->peer_addr_for_me);
1044 static int read_partial_connect(struct ceph_connection *con)
1048 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1050 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
1053 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
1054 con->auth_reply_buf);
1058 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1059 con, (int)con->in_reply.tag,
1060 le32_to_cpu(con->in_reply.connect_seq),
1061 le32_to_cpu(con->in_reply.global_seq));
1068 * Verify the hello banner looks okay.
1070 static int verify_hello(struct ceph_connection *con)
1072 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1073 pr_err("connect to %s got bad banner\n",
1074 ceph_pr_addr(&con->peer_addr.in_addr));
1075 con->error_msg = "protocol error, bad banner";
1081 static bool addr_is_blank(struct sockaddr_storage *ss)
1083 switch (ss->ss_family) {
1085 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1088 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1089 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1090 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1091 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1096 static int addr_port(struct sockaddr_storage *ss)
1098 switch (ss->ss_family) {
1100 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1102 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1107 static void addr_set_port(struct sockaddr_storage *ss, int p)
1109 switch (ss->ss_family) {
1111 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1114 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1120 * Unlike other *_pton function semantics, zero indicates success.
1122 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1123 char delim, const char **ipend)
1125 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1126 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1128 memset(ss, 0, sizeof(*ss));
1130 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1131 ss->ss_family = AF_INET;
1135 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1136 ss->ss_family = AF_INET6;
1144 * Extract hostname string and resolve using kernel DNS facility.
1146 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1147 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1148 struct sockaddr_storage *ss, char delim, const char **ipend)
1150 const char *end, *delim_p;
1151 char *colon_p, *ip_addr = NULL;
1155 * The end of the hostname occurs immediately preceding the delimiter or
1156 * the port marker (':') where the delimiter takes precedence.
1158 delim_p = memchr(name, delim, namelen);
1159 colon_p = memchr(name, ':', namelen);
1161 if (delim_p && colon_p)
1162 end = delim_p < colon_p ? delim_p : colon_p;
1163 else if (!delim_p && colon_p)
1167 if (!end) /* case: hostname:/ */
1168 end = name + namelen;
1174 /* do dns_resolve upcall */
1175 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1177 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1185 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1186 ret, ret ? "failed" : ceph_pr_addr(ss));
1191 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1192 struct sockaddr_storage *ss, char delim, const char **ipend)
1199 * Parse a server name (IP or hostname). If a valid IP address is not found
1200 * then try to extract a hostname to resolve using userspace DNS upcall.
1202 static int ceph_parse_server_name(const char *name, size_t namelen,
1203 struct sockaddr_storage *ss, char delim, const char **ipend)
1207 ret = ceph_pton(name, namelen, ss, delim, ipend);
1209 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1215 * Parse an ip[:port] list into an addr array. Use the default
1216 * monitor port if a port isn't specified.
1218 int ceph_parse_ips(const char *c, const char *end,
1219 struct ceph_entity_addr *addr,
1220 int max_count, int *count)
1222 int i, ret = -EINVAL;
1225 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1226 for (i = 0; i < max_count; i++) {
1228 struct sockaddr_storage *ss = &addr[i].in_addr;
1237 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1246 dout("missing matching ']'\n");
1253 if (p < end && *p == ':') {
1256 while (p < end && *p >= '0' && *p <= '9') {
1257 port = (port * 10) + (*p - '0');
1260 if (port > 65535 || port == 0)
1263 port = CEPH_MON_PORT;
1266 addr_set_port(ss, port);
1268 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1285 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1288 EXPORT_SYMBOL(ceph_parse_ips);
1290 static int process_banner(struct ceph_connection *con)
1292 dout("process_banner on %p\n", con);
1294 if (verify_hello(con) < 0)
1297 ceph_decode_addr(&con->actual_peer_addr);
1298 ceph_decode_addr(&con->peer_addr_for_me);
1301 * Make sure the other end is who we wanted. note that the other
1302 * end may not yet know their ip address, so if it's 0.0.0.0, give
1303 * them the benefit of the doubt.
1305 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1306 sizeof(con->peer_addr)) != 0 &&
1307 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1308 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1309 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1310 ceph_pr_addr(&con->peer_addr.in_addr),
1311 (int)le32_to_cpu(con->peer_addr.nonce),
1312 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1313 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1314 con->error_msg = "wrong peer at address";
1319 * did we learn our address?
1321 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1322 int port = addr_port(&con->msgr->inst.addr.in_addr);
1324 memcpy(&con->msgr->inst.addr.in_addr,
1325 &con->peer_addr_for_me.in_addr,
1326 sizeof(con->peer_addr_for_me.in_addr));
1327 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1328 encode_my_addr(con->msgr);
1329 dout("process_banner learned my addr is %s\n",
1330 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1333 set_bit(NEGOTIATING, &con->state);
1334 prepare_read_connect(con);
1338 static void fail_protocol(struct ceph_connection *con)
1340 reset_connection(con);
1341 set_bit(CLOSED, &con->state); /* in case there's queued work */
1343 mutex_unlock(&con->mutex);
1344 if (con->ops->bad_proto)
1345 con->ops->bad_proto(con);
1346 mutex_lock(&con->mutex);
1349 static int process_connect(struct ceph_connection *con)
1351 u64 sup_feat = con->msgr->supported_features;
1352 u64 req_feat = con->msgr->required_features;
1353 u64 server_feat = le64_to_cpu(con->in_reply.features);
1356 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1358 switch (con->in_reply.tag) {
1359 case CEPH_MSGR_TAG_FEATURES:
1360 pr_err("%s%lld %s feature set mismatch,"
1361 " my %llx < server's %llx, missing %llx\n",
1362 ENTITY_NAME(con->peer_name),
1363 ceph_pr_addr(&con->peer_addr.in_addr),
1364 sup_feat, server_feat, server_feat & ~sup_feat);
1365 con->error_msg = "missing required protocol features";
1369 case CEPH_MSGR_TAG_BADPROTOVER:
1370 pr_err("%s%lld %s protocol version mismatch,"
1371 " my %d != server's %d\n",
1372 ENTITY_NAME(con->peer_name),
1373 ceph_pr_addr(&con->peer_addr.in_addr),
1374 le32_to_cpu(con->out_connect.protocol_version),
1375 le32_to_cpu(con->in_reply.protocol_version));
1376 con->error_msg = "protocol version mismatch";
1380 case CEPH_MSGR_TAG_BADAUTHORIZER:
1382 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1384 if (con->auth_retry == 2) {
1385 con->error_msg = "connect authorization failure";
1388 con->auth_retry = 1;
1389 ret = prepare_write_connect(con->msgr, con, 0);
1392 prepare_read_connect(con);
1395 case CEPH_MSGR_TAG_RESETSESSION:
1397 * If we connected with a large connect_seq but the peer
1398 * has no record of a session with us (no connection, or
1399 * connect_seq == 0), they will send RESETSESION to indicate
1400 * that they must have reset their session, and may have
1403 dout("process_connect got RESET peer seq %u\n",
1404 le32_to_cpu(con->in_connect.connect_seq));
1405 pr_err("%s%lld %s connection reset\n",
1406 ENTITY_NAME(con->peer_name),
1407 ceph_pr_addr(&con->peer_addr.in_addr));
1408 reset_connection(con);
1409 prepare_write_connect(con->msgr, con, 0);
1410 prepare_read_connect(con);
1412 /* Tell ceph about it. */
1413 mutex_unlock(&con->mutex);
1414 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1415 if (con->ops->peer_reset)
1416 con->ops->peer_reset(con);
1417 mutex_lock(&con->mutex);
1418 if (test_bit(CLOSED, &con->state) ||
1419 test_bit(OPENING, &con->state))
1423 case CEPH_MSGR_TAG_RETRY_SESSION:
1425 * If we sent a smaller connect_seq than the peer has, try
1426 * again with a larger value.
1428 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1429 le32_to_cpu(con->out_connect.connect_seq),
1430 le32_to_cpu(con->in_connect.connect_seq));
1431 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1432 prepare_write_connect(con->msgr, con, 0);
1433 prepare_read_connect(con);
1436 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1438 * If we sent a smaller global_seq than the peer has, try
1439 * again with a larger value.
1441 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1442 con->peer_global_seq,
1443 le32_to_cpu(con->in_connect.global_seq));
1444 get_global_seq(con->msgr,
1445 le32_to_cpu(con->in_connect.global_seq));
1446 prepare_write_connect(con->msgr, con, 0);
1447 prepare_read_connect(con);
1450 case CEPH_MSGR_TAG_READY:
1451 if (req_feat & ~server_feat) {
1452 pr_err("%s%lld %s protocol feature mismatch,"
1453 " my required %llx > server's %llx, need %llx\n",
1454 ENTITY_NAME(con->peer_name),
1455 ceph_pr_addr(&con->peer_addr.in_addr),
1456 req_feat, server_feat, req_feat & ~server_feat);
1457 con->error_msg = "missing required protocol features";
1461 clear_bit(CONNECTING, &con->state);
1462 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1464 con->peer_features = server_feat;
1465 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1466 con->peer_global_seq,
1467 le32_to_cpu(con->in_reply.connect_seq),
1469 WARN_ON(con->connect_seq !=
1470 le32_to_cpu(con->in_reply.connect_seq));
1472 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1473 set_bit(LOSSYTX, &con->state);
1475 prepare_read_tag(con);
1478 case CEPH_MSGR_TAG_WAIT:
1480 * If there is a connection race (we are opening
1481 * connections to each other), one of us may just have
1482 * to WAIT. This shouldn't happen if we are the
1485 pr_err("process_connect got WAIT as client\n");
1486 con->error_msg = "protocol error, got WAIT as client";
1490 pr_err("connect protocol error, will retry\n");
1491 con->error_msg = "protocol error, garbage tag during connect";
1499 * read (part of) an ack
1501 static int read_partial_ack(struct ceph_connection *con)
1505 return read_partial(con, &to, sizeof(con->in_temp_ack),
1511 * We can finally discard anything that's been acked.
1513 static void process_ack(struct ceph_connection *con)
1516 u64 ack = le64_to_cpu(con->in_temp_ack);
1519 while (!list_empty(&con->out_sent)) {
1520 m = list_first_entry(&con->out_sent, struct ceph_msg,
1522 seq = le64_to_cpu(m->hdr.seq);
1525 dout("got ack for seq %llu type %d at %p\n", seq,
1526 le16_to_cpu(m->hdr.type), m);
1527 m->ack_stamp = jiffies;
1530 prepare_read_tag(con);
1536 static int read_partial_message_section(struct ceph_connection *con,
1537 struct kvec *section,
1538 unsigned int sec_len, u32 *crc)
1544 while (section->iov_len < sec_len) {
1545 BUG_ON(section->iov_base == NULL);
1546 left = sec_len - section->iov_len;
1547 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1548 section->iov_len, left);
1551 section->iov_len += ret;
1553 if (section->iov_len == sec_len)
1554 *crc = crc32c(0, section->iov_base, section->iov_len);
1559 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1560 struct ceph_msg_header *hdr,
1564 static int read_partial_message_pages(struct ceph_connection *con,
1565 struct page **pages,
1566 unsigned data_len, bool do_datacrc)
1572 left = min((int)(data_len - con->in_msg_pos.data_pos),
1573 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1575 BUG_ON(pages == NULL);
1576 p = kmap(pages[con->in_msg_pos.page]);
1577 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1579 if (ret > 0 && do_datacrc)
1581 crc32c(con->in_data_crc,
1582 p + con->in_msg_pos.page_pos, ret);
1583 kunmap(pages[con->in_msg_pos.page]);
1586 con->in_msg_pos.data_pos += ret;
1587 con->in_msg_pos.page_pos += ret;
1588 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1589 con->in_msg_pos.page_pos = 0;
1590 con->in_msg_pos.page++;
1597 static int read_partial_message_bio(struct ceph_connection *con,
1598 struct bio **bio_iter, int *bio_seg,
1599 unsigned data_len, bool do_datacrc)
1601 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1608 left = min((int)(data_len - con->in_msg_pos.data_pos),
1609 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1611 p = kmap(bv->bv_page) + bv->bv_offset;
1613 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1615 if (ret > 0 && do_datacrc)
1617 crc32c(con->in_data_crc,
1618 p + con->in_msg_pos.page_pos, ret);
1619 kunmap(bv->bv_page);
1622 con->in_msg_pos.data_pos += ret;
1623 con->in_msg_pos.page_pos += ret;
1624 if (con->in_msg_pos.page_pos == bv->bv_len) {
1625 con->in_msg_pos.page_pos = 0;
1626 iter_bio_next(bio_iter, bio_seg);
1634 * read (part of) a message.
1636 static int read_partial_message(struct ceph_connection *con)
1638 struct ceph_msg *m = con->in_msg;
1641 unsigned front_len, middle_len, data_len;
1642 bool do_datacrc = !con->msgr->nocrc;
1647 dout("read_partial_message con %p msg %p\n", con, m);
1650 while (con->in_base_pos < sizeof(con->in_hdr)) {
1651 left = sizeof(con->in_hdr) - con->in_base_pos;
1652 ret = ceph_tcp_recvmsg(con->sock,
1653 (char *)&con->in_hdr + con->in_base_pos,
1657 con->in_base_pos += ret;
1660 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
1661 if (cpu_to_le32(crc) != con->in_hdr.crc) {
1662 pr_err("read_partial_message bad hdr "
1663 " crc %u != expected %u\n",
1664 crc, con->in_hdr.crc);
1668 front_len = le32_to_cpu(con->in_hdr.front_len);
1669 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1671 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1672 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1674 data_len = le32_to_cpu(con->in_hdr.data_len);
1675 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1679 seq = le64_to_cpu(con->in_hdr.seq);
1680 if ((s64)seq - (s64)con->in_seq < 1) {
1681 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1682 ENTITY_NAME(con->peer_name),
1683 ceph_pr_addr(&con->peer_addr.in_addr),
1684 seq, con->in_seq + 1);
1685 con->in_base_pos = -front_len - middle_len - data_len -
1687 con->in_tag = CEPH_MSGR_TAG_READY;
1689 } else if ((s64)seq - (s64)con->in_seq > 1) {
1690 pr_err("read_partial_message bad seq %lld expected %lld\n",
1691 seq, con->in_seq + 1);
1692 con->error_msg = "bad message sequence # for incoming message";
1696 /* allocate message? */
1698 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1699 con->in_hdr.front_len, con->in_hdr.data_len);
1701 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1703 /* skip this message */
1704 dout("alloc_msg said skip message\n");
1705 BUG_ON(con->in_msg);
1706 con->in_base_pos = -front_len - middle_len - data_len -
1708 con->in_tag = CEPH_MSGR_TAG_READY;
1714 "error allocating memory for incoming message";
1718 m->front.iov_len = 0; /* haven't read it yet */
1720 m->middle->vec.iov_len = 0;
1722 con->in_msg_pos.page = 0;
1724 con->in_msg_pos.page_pos = m->page_alignment;
1726 con->in_msg_pos.page_pos = 0;
1727 con->in_msg_pos.data_pos = 0;
1731 ret = read_partial_message_section(con, &m->front, front_len,
1732 &con->in_front_crc);
1738 ret = read_partial_message_section(con, &m->middle->vec,
1740 &con->in_middle_crc);
1745 if (m->bio && !m->bio_iter)
1746 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1750 while (con->in_msg_pos.data_pos < data_len) {
1752 ret = read_partial_message_pages(con, m->pages,
1753 data_len, do_datacrc);
1757 } else if (m->bio) {
1759 ret = read_partial_message_bio(con,
1760 &m->bio_iter, &m->bio_seg,
1761 data_len, do_datacrc);
1771 to = sizeof(m->hdr) + sizeof(m->footer);
1772 while (con->in_base_pos < to) {
1773 left = to - con->in_base_pos;
1774 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1775 (con->in_base_pos - sizeof(m->hdr)),
1779 con->in_base_pos += ret;
1781 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1782 m, front_len, m->footer.front_crc, middle_len,
1783 m->footer.middle_crc, data_len, m->footer.data_crc);
1786 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1787 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1788 m, con->in_front_crc, m->footer.front_crc);
1791 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1792 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1793 m, con->in_middle_crc, m->footer.middle_crc);
1797 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1798 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1799 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1800 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1804 return 1; /* done! */
1808 * Process message. This happens in the worker thread. The callback should
1809 * be careful not to do anything that waits on other incoming messages or it
1812 static void process_message(struct ceph_connection *con)
1814 struct ceph_msg *msg;
1819 /* if first message, set peer_name */
1820 if (con->peer_name.type == 0)
1821 con->peer_name = msg->hdr.src;
1824 mutex_unlock(&con->mutex);
1826 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1827 msg, le64_to_cpu(msg->hdr.seq),
1828 ENTITY_NAME(msg->hdr.src),
1829 le16_to_cpu(msg->hdr.type),
1830 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1831 le32_to_cpu(msg->hdr.front_len),
1832 le32_to_cpu(msg->hdr.data_len),
1833 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1834 con->ops->dispatch(con, msg);
1836 mutex_lock(&con->mutex);
1837 prepare_read_tag(con);
1842 * Write something to the socket. Called in a worker thread when the
1843 * socket appears to be writeable and we have something ready to send.
1845 static int try_write(struct ceph_connection *con)
1847 struct ceph_messenger *msgr = con->msgr;
1850 dout("try_write start %p state %lu nref %d\n", con, con->state,
1851 atomic_read(&con->nref));
1854 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1856 /* open the socket first? */
1857 if (con->sock == NULL) {
1858 prepare_write_connect(msgr, con, 1);
1859 prepare_read_banner(con);
1860 set_bit(CONNECTING, &con->state);
1861 clear_bit(NEGOTIATING, &con->state);
1863 BUG_ON(con->in_msg);
1864 con->in_tag = CEPH_MSGR_TAG_READY;
1865 dout("try_write initiating connect on %p new state %lu\n",
1867 ret = ceph_tcp_connect(con);
1869 con->error_msg = "connect error";
1875 /* kvec data queued? */
1876 if (con->out_skip) {
1877 ret = write_partial_skip(con);
1881 if (con->out_kvec_left) {
1882 ret = write_partial_kvec(con);
1889 if (con->out_msg_done) {
1890 ceph_msg_put(con->out_msg);
1891 con->out_msg = NULL; /* we're done with this one */
1895 ret = write_partial_msg_pages(con);
1897 goto more_kvec; /* we need to send the footer, too! */
1901 dout("try_write write_partial_msg_pages err %d\n",
1908 if (!test_bit(CONNECTING, &con->state)) {
1909 /* is anything else pending? */
1910 if (!list_empty(&con->out_queue)) {
1911 prepare_write_message(con);
1914 if (con->in_seq > con->in_seq_acked) {
1915 prepare_write_ack(con);
1918 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1919 prepare_write_keepalive(con);
1924 /* Nothing to do! */
1925 clear_bit(WRITE_PENDING, &con->state);
1926 dout("try_write nothing else to write.\n");
1929 dout("try_write done on %p ret %d\n", con, ret);
1936 * Read what we can from the socket.
1938 static int try_read(struct ceph_connection *con)
1945 if (test_bit(STANDBY, &con->state))
1948 dout("try_read start on %p\n", con);
1951 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1955 * process_connect and process_message drop and re-take
1956 * con->mutex. make sure we handle a racing close or reopen.
1958 if (test_bit(CLOSED, &con->state) ||
1959 test_bit(OPENING, &con->state)) {
1964 if (test_bit(CONNECTING, &con->state)) {
1965 if (!test_bit(NEGOTIATING, &con->state)) {
1966 dout("try_read connecting\n");
1967 ret = read_partial_banner(con);
1970 ret = process_banner(con);
1974 ret = read_partial_connect(con);
1977 ret = process_connect(con);
1983 if (con->in_base_pos < 0) {
1985 * skipping + discarding content.
1987 * FIXME: there must be a better way to do this!
1989 static char buf[SKIP_BUF_SIZE];
1990 int skip = min((int) sizeof (buf), -con->in_base_pos);
1992 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1993 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1996 con->in_base_pos += ret;
1997 if (con->in_base_pos)
2000 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2004 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2007 dout("try_read got tag %d\n", (int)con->in_tag);
2008 switch (con->in_tag) {
2009 case CEPH_MSGR_TAG_MSG:
2010 prepare_read_message(con);
2012 case CEPH_MSGR_TAG_ACK:
2013 prepare_read_ack(con);
2015 case CEPH_MSGR_TAG_CLOSE:
2016 set_bit(CLOSED, &con->state); /* fixme */
2022 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2023 ret = read_partial_message(con);
2027 con->error_msg = "bad crc";
2031 con->error_msg = "io error";
2036 if (con->in_tag == CEPH_MSGR_TAG_READY)
2038 process_message(con);
2041 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2042 ret = read_partial_ack(con);
2050 dout("try_read done on %p ret %d\n", con, ret);
2054 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2055 con->error_msg = "protocol error, garbage tag";
2062 * Atomically queue work on a connection. Bump @con reference to
2063 * avoid races with connection teardown.
2065 static void queue_con(struct ceph_connection *con)
2067 if (test_bit(DEAD, &con->state)) {
2068 dout("queue_con %p ignoring: DEAD\n",
2073 if (!con->ops->get(con)) {
2074 dout("queue_con %p ref count 0\n", con);
2078 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
2079 dout("queue_con %p - already queued\n", con);
2082 dout("queue_con %p\n", con);
2087 * Do some work on a connection. Drop a connection ref when we're done.
2089 static void con_work(struct work_struct *work)
2091 struct ceph_connection *con = container_of(work, struct ceph_connection,
2095 mutex_lock(&con->mutex);
2097 if (test_and_clear_bit(BACKOFF, &con->state)) {
2098 dout("con_work %p backing off\n", con);
2099 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2100 round_jiffies_relative(con->delay))) {
2101 dout("con_work %p backoff %lu\n", con, con->delay);
2102 mutex_unlock(&con->mutex);
2106 dout("con_work %p FAILED to back off %lu\n", con,
2111 if (test_bit(STANDBY, &con->state)) {
2112 dout("con_work %p STANDBY\n", con);
2115 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
2116 dout("con_work CLOSED\n");
2117 con_close_socket(con);
2120 if (test_and_clear_bit(OPENING, &con->state)) {
2121 /* reopen w/ new peer */
2122 dout("con_work OPENING\n");
2123 con_close_socket(con);
2126 if (test_and_clear_bit(SOCK_CLOSED, &con->state))
2129 ret = try_read(con);
2135 ret = try_write(con);
2142 mutex_unlock(&con->mutex);
2148 mutex_unlock(&con->mutex);
2149 ceph_fault(con); /* error/fault path */
2155 * Generic error/fault handler. A retry mechanism is used with
2156 * exponential backoff
2158 static void ceph_fault(struct ceph_connection *con)
2160 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2161 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2162 dout("fault %p state %lu to peer %s\n",
2163 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2165 if (test_bit(LOSSYTX, &con->state)) {
2166 dout("fault on LOSSYTX channel\n");
2170 mutex_lock(&con->mutex);
2171 if (test_bit(CLOSED, &con->state))
2174 con_close_socket(con);
2177 ceph_msg_put(con->in_msg);
2181 /* Requeue anything that hasn't been acked */
2182 list_splice_init(&con->out_sent, &con->out_queue);
2184 /* If there are no messages queued or keepalive pending, place
2185 * the connection in a STANDBY state */
2186 if (list_empty(&con->out_queue) &&
2187 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2188 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2189 clear_bit(WRITE_PENDING, &con->state);
2190 set_bit(STANDBY, &con->state);
2192 /* retry after a delay. */
2193 if (con->delay == 0)
2194 con->delay = BASE_DELAY_INTERVAL;
2195 else if (con->delay < MAX_DELAY_INTERVAL)
2198 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2199 round_jiffies_relative(con->delay))) {
2200 dout("fault queued %p delay %lu\n", con, con->delay);
2203 dout("fault failed to queue %p delay %lu, backoff\n",
2206 * In many cases we see a socket state change
2207 * while con_work is running and end up
2208 * queuing (non-delayed) work, such that we
2209 * can't backoff with a delay. Set a flag so
2210 * that when con_work restarts we schedule the
2213 set_bit(BACKOFF, &con->state);
2218 mutex_unlock(&con->mutex);
2221 * in case we faulted due to authentication, invalidate our
2222 * current tickets so that we can get new ones.
2224 if (con->auth_retry && con->ops->invalidate_authorizer) {
2225 dout("calling invalidate_authorizer()\n");
2226 con->ops->invalidate_authorizer(con);
2229 if (con->ops->fault)
2230 con->ops->fault(con);
2236 * create a new messenger instance
2238 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr,
2239 u32 supported_features,
2240 u32 required_features)
2242 struct ceph_messenger *msgr;
2244 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2246 return ERR_PTR(-ENOMEM);
2248 msgr->supported_features = supported_features;
2249 msgr->required_features = required_features;
2251 spin_lock_init(&msgr->global_seq_lock);
2254 msgr->inst.addr = *myaddr;
2256 /* select a random nonce */
2257 msgr->inst.addr.type = 0;
2258 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2259 encode_my_addr(msgr);
2261 dout("messenger_create %p\n", msgr);
2264 EXPORT_SYMBOL(ceph_messenger_create);
2266 void ceph_messenger_destroy(struct ceph_messenger *msgr)
2268 dout("destroy %p\n", msgr);
2270 dout("destroyed messenger %p\n", msgr);
2272 EXPORT_SYMBOL(ceph_messenger_destroy);
2274 static void clear_standby(struct ceph_connection *con)
2276 /* come back from STANDBY? */
2277 if (test_and_clear_bit(STANDBY, &con->state)) {
2278 mutex_lock(&con->mutex);
2279 dout("clear_standby %p and ++connect_seq\n", con);
2281 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2282 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2283 mutex_unlock(&con->mutex);
2288 * Queue up an outgoing message on the given connection.
2290 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2292 if (test_bit(CLOSED, &con->state)) {
2293 dout("con_send %p closed, dropping %p\n", con, msg);
2299 msg->hdr.src = con->msgr->inst.name;
2301 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2303 msg->needs_out_seq = true;
2306 mutex_lock(&con->mutex);
2307 BUG_ON(!list_empty(&msg->list_head));
2308 list_add_tail(&msg->list_head, &con->out_queue);
2309 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2310 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2311 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2312 le32_to_cpu(msg->hdr.front_len),
2313 le32_to_cpu(msg->hdr.middle_len),
2314 le32_to_cpu(msg->hdr.data_len));
2315 mutex_unlock(&con->mutex);
2317 /* if there wasn't anything waiting to send before, queue
2320 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2323 EXPORT_SYMBOL(ceph_con_send);
2326 * Revoke a message that was previously queued for send
2328 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2330 mutex_lock(&con->mutex);
2331 if (!list_empty(&msg->list_head)) {
2332 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2333 list_del_init(&msg->list_head);
2337 if (con->out_msg == msg) {
2338 dout("con_revoke %p msg %p - was sending\n", con, msg);
2339 con->out_msg = NULL;
2340 if (con->out_kvec_is_msg) {
2341 con->out_skip = con->out_kvec_bytes;
2342 con->out_kvec_is_msg = false;
2347 mutex_unlock(&con->mutex);
2351 * Revoke a message that we may be reading data into
2353 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2355 mutex_lock(&con->mutex);
2356 if (con->in_msg && con->in_msg == msg) {
2357 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2358 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2359 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2361 /* skip rest of message */
2362 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2363 con->in_base_pos = con->in_base_pos -
2364 sizeof(struct ceph_msg_header) -
2368 sizeof(struct ceph_msg_footer);
2369 ceph_msg_put(con->in_msg);
2371 con->in_tag = CEPH_MSGR_TAG_READY;
2374 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2375 con, con->in_msg, msg);
2377 mutex_unlock(&con->mutex);
2381 * Queue a keepalive byte to ensure the tcp connection is alive.
2383 void ceph_con_keepalive(struct ceph_connection *con)
2385 dout("con_keepalive %p\n", con);
2387 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2388 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2391 EXPORT_SYMBOL(ceph_con_keepalive);
2395 * construct a new message with given type, size
2396 * the new msg has a ref count of 1.
2398 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2403 m = kmalloc(sizeof(*m), flags);
2406 kref_init(&m->kref);
2407 INIT_LIST_HEAD(&m->list_head);
2410 m->hdr.type = cpu_to_le16(type);
2411 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2413 m->hdr.front_len = cpu_to_le32(front_len);
2414 m->hdr.middle_len = 0;
2415 m->hdr.data_len = 0;
2416 m->hdr.data_off = 0;
2417 m->hdr.reserved = 0;
2418 m->footer.front_crc = 0;
2419 m->footer.middle_crc = 0;
2420 m->footer.data_crc = 0;
2421 m->footer.flags = 0;
2422 m->front_max = front_len;
2423 m->front_is_vmalloc = false;
2424 m->more_to_follow = false;
2433 m->page_alignment = 0;
2443 if (front_len > PAGE_CACHE_SIZE) {
2444 m->front.iov_base = __vmalloc(front_len, flags,
2446 m->front_is_vmalloc = true;
2448 m->front.iov_base = kmalloc(front_len, flags);
2450 if (m->front.iov_base == NULL) {
2451 dout("ceph_msg_new can't allocate %d bytes\n",
2456 m->front.iov_base = NULL;
2458 m->front.iov_len = front_len;
2460 dout("ceph_msg_new %p front %d\n", m, front_len);
2467 pr_err("msg_new can't create type %d front %d\n", type,
2471 dout("msg_new can't create type %d front %d\n", type,
2476 EXPORT_SYMBOL(ceph_msg_new);
2479 * Allocate "middle" portion of a message, if it is needed and wasn't
2480 * allocated by alloc_msg. This allows us to read a small fixed-size
2481 * per-type header in the front and then gracefully fail (i.e.,
2482 * propagate the error to the caller based on info in the front) when
2483 * the middle is too large.
2485 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2487 int type = le16_to_cpu(msg->hdr.type);
2488 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2490 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2491 ceph_msg_type_name(type), middle_len);
2492 BUG_ON(!middle_len);
2493 BUG_ON(msg->middle);
2495 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2502 * Generic message allocator, for incoming messages.
2504 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2505 struct ceph_msg_header *hdr,
2508 int type = le16_to_cpu(hdr->type);
2509 int front_len = le32_to_cpu(hdr->front_len);
2510 int middle_len = le32_to_cpu(hdr->middle_len);
2511 struct ceph_msg *msg = NULL;
2514 if (con->ops->alloc_msg) {
2515 mutex_unlock(&con->mutex);
2516 msg = con->ops->alloc_msg(con, hdr, skip);
2517 mutex_lock(&con->mutex);
2523 msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2525 pr_err("unable to allocate msg type %d len %d\n",
2529 msg->page_alignment = le16_to_cpu(hdr->data_off);
2531 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2533 if (middle_len && !msg->middle) {
2534 ret = ceph_alloc_middle(con, msg);
2546 * Free a generically kmalloc'd message.
2548 void ceph_msg_kfree(struct ceph_msg *m)
2550 dout("msg_kfree %p\n", m);
2551 if (m->front_is_vmalloc)
2552 vfree(m->front.iov_base);
2554 kfree(m->front.iov_base);
2559 * Drop a msg ref. Destroy as needed.
2561 void ceph_msg_last_put(struct kref *kref)
2563 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2565 dout("ceph_msg_put last one on %p\n", m);
2566 WARN_ON(!list_empty(&m->list_head));
2568 /* drop middle, data, if any */
2570 ceph_buffer_put(m->middle);
2577 ceph_pagelist_release(m->pagelist);
2585 ceph_msgpool_put(m->pool, m);
2589 EXPORT_SYMBOL(ceph_msg_last_put);
2591 void ceph_msg_dump(struct ceph_msg *msg)
2593 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2594 msg->front_max, msg->nr_pages);
2595 print_hex_dump(KERN_DEBUG, "header: ",
2596 DUMP_PREFIX_OFFSET, 16, 1,
2597 &msg->hdr, sizeof(msg->hdr), true);
2598 print_hex_dump(KERN_DEBUG, " front: ",
2599 DUMP_PREFIX_OFFSET, 16, 1,
2600 msg->front.iov_base, msg->front.iov_len, true);
2602 print_hex_dump(KERN_DEBUG, "middle: ",
2603 DUMP_PREFIX_OFFSET, 16, 1,
2604 msg->middle->vec.iov_base,
2605 msg->middle->vec.iov_len, true);
2606 print_hex_dump(KERN_DEBUG, "footer: ",
2607 DUMP_PREFIX_OFFSET, 16, 1,
2608 &msg->footer, sizeof(msg->footer), true);
2610 EXPORT_SYMBOL(ceph_msg_dump);