1 // SPDX-License-Identifier: GPL-2.0
3 * Ceph msgr2 protocol implementation
5 * Copyright (C) 2020 Ilya Dryomov <idryomov@gmail.com>
8 #include <linux/ceph/ceph_debug.h>
10 #include <crypto/aead.h>
11 #include <crypto/algapi.h> /* for crypto_memneq() */
12 #include <crypto/hash.h>
13 #include <crypto/sha2.h>
14 #include <linux/bvec.h>
15 #include <linux/crc32c.h>
16 #include <linux/net.h>
17 #include <linux/scatterlist.h>
18 #include <linux/socket.h>
19 #include <linux/sched/mm.h>
23 #include <linux/ceph/ceph_features.h>
24 #include <linux/ceph/decode.h>
25 #include <linux/ceph/libceph.h>
26 #include <linux/ceph/messenger.h>
28 #include "crypto.h" /* for CEPH_KEY_LEN and CEPH_MAX_CON_SECRET_LEN */
30 #define FRAME_TAG_HELLO 1
31 #define FRAME_TAG_AUTH_REQUEST 2
32 #define FRAME_TAG_AUTH_BAD_METHOD 3
33 #define FRAME_TAG_AUTH_REPLY_MORE 4
34 #define FRAME_TAG_AUTH_REQUEST_MORE 5
35 #define FRAME_TAG_AUTH_DONE 6
36 #define FRAME_TAG_AUTH_SIGNATURE 7
37 #define FRAME_TAG_CLIENT_IDENT 8
38 #define FRAME_TAG_SERVER_IDENT 9
39 #define FRAME_TAG_IDENT_MISSING_FEATURES 10
40 #define FRAME_TAG_SESSION_RECONNECT 11
41 #define FRAME_TAG_SESSION_RESET 12
42 #define FRAME_TAG_SESSION_RETRY 13
43 #define FRAME_TAG_SESSION_RETRY_GLOBAL 14
44 #define FRAME_TAG_SESSION_RECONNECT_OK 15
45 #define FRAME_TAG_WAIT 16
46 #define FRAME_TAG_MESSAGE 17
47 #define FRAME_TAG_KEEPALIVE2 18
48 #define FRAME_TAG_KEEPALIVE2_ACK 19
49 #define FRAME_TAG_ACK 20
51 #define FRAME_LATE_STATUS_ABORTED 0x1
52 #define FRAME_LATE_STATUS_COMPLETE 0xe
53 #define FRAME_LATE_STATUS_ABORTED_MASK 0xf
55 #define IN_S_HANDLE_PREAMBLE 1
56 #define IN_S_HANDLE_CONTROL 2
57 #define IN_S_HANDLE_CONTROL_REMAINDER 3
58 #define IN_S_PREPARE_READ_DATA 4
59 #define IN_S_PREPARE_READ_DATA_CONT 5
60 #define IN_S_PREPARE_READ_ENC_PAGE 6
61 #define IN_S_HANDLE_EPILOGUE 7
62 #define IN_S_FINISH_SKIP 8
64 #define OUT_S_QUEUE_DATA 1
65 #define OUT_S_QUEUE_DATA_CONT 2
66 #define OUT_S_QUEUE_ENC_PAGE 3
67 #define OUT_S_QUEUE_ZEROS 4
68 #define OUT_S_FINISH_MESSAGE 5
69 #define OUT_S_GET_NEXT 6
71 #define CTRL_BODY(p) ((void *)(p) + CEPH_PREAMBLE_LEN)
72 #define FRONT_PAD(p) ((void *)(p) + CEPH_EPILOGUE_SECURE_LEN)
73 #define MIDDLE_PAD(p) (FRONT_PAD(p) + CEPH_GCM_BLOCK_LEN)
74 #define DATA_PAD(p) (MIDDLE_PAD(p) + CEPH_GCM_BLOCK_LEN)
76 #define CEPH_MSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
78 static int do_recvmsg(struct socket *sock, struct iov_iter *it)
80 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
84 while (iov_iter_count(it)) {
85 ret = sock_recvmsg(sock, &msg, msg.msg_flags);
92 iov_iter_advance(it, ret);
95 WARN_ON(msg_data_left(&msg));
100 * Read as much as possible.
103 * 1 - done, nothing (else) to read
104 * 0 - socket is empty, need to wait
107 static int ceph_tcp_recv(struct ceph_connection *con)
111 dout("%s con %p %s %zu\n", __func__, con,
112 iov_iter_is_discard(&con->v2.in_iter) ? "discard" : "need",
113 iov_iter_count(&con->v2.in_iter));
114 ret = do_recvmsg(con->sock, &con->v2.in_iter);
115 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
116 iov_iter_count(&con->v2.in_iter));
120 static int do_sendmsg(struct socket *sock, struct iov_iter *it)
122 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
126 while (iov_iter_count(it)) {
127 ret = sock_sendmsg(sock, &msg);
134 iov_iter_advance(it, ret);
137 WARN_ON(msg_data_left(&msg));
141 static int do_try_sendpage(struct socket *sock, struct iov_iter *it)
143 struct msghdr msg = { .msg_flags = CEPH_MSG_FLAGS };
147 if (WARN_ON(!iov_iter_is_bvec(it)))
150 while (iov_iter_count(it)) {
151 /* iov_iter_iovec() for ITER_BVEC */
152 bvec_set_page(&bv, it->bvec->bv_page,
153 min(iov_iter_count(it),
154 it->bvec->bv_len - it->iov_offset),
155 it->bvec->bv_offset + it->iov_offset);
158 * sendpage cannot properly handle pages with
159 * page_count == 0, we need to fall back to sendmsg if
162 * Same goes for slab pages: skb_can_coalesce() allows
163 * coalescing neighboring slab objects into a single frag
164 * which triggers one of hardened usercopy checks.
166 if (sendpage_ok(bv.bv_page)) {
167 ret = sock->ops->sendpage(sock, bv.bv_page,
168 bv.bv_offset, bv.bv_len,
171 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len);
172 ret = sock_sendmsg(sock, &msg);
180 iov_iter_advance(it, ret);
187 * Write as much as possible. The socket is expected to be corked,
188 * so we don't bother with MSG_MORE/MSG_SENDPAGE_NOTLAST here.
191 * 1 - done, nothing (else) to write
192 * 0 - socket is full, need to wait
195 static int ceph_tcp_send(struct ceph_connection *con)
199 dout("%s con %p have %zu try_sendpage %d\n", __func__, con,
200 iov_iter_count(&con->v2.out_iter), con->v2.out_iter_sendpage);
201 if (con->v2.out_iter_sendpage)
202 ret = do_try_sendpage(con->sock, &con->v2.out_iter);
204 ret = do_sendmsg(con->sock, &con->v2.out_iter);
205 dout("%s con %p ret %d left %zu\n", __func__, con, ret,
206 iov_iter_count(&con->v2.out_iter));
210 static void add_in_kvec(struct ceph_connection *con, void *buf, int len)
212 BUG_ON(con->v2.in_kvec_cnt >= ARRAY_SIZE(con->v2.in_kvecs));
213 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
215 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_base = buf;
216 con->v2.in_kvecs[con->v2.in_kvec_cnt].iov_len = len;
217 con->v2.in_kvec_cnt++;
219 con->v2.in_iter.nr_segs++;
220 con->v2.in_iter.count += len;
223 static void reset_in_kvecs(struct ceph_connection *con)
225 WARN_ON(iov_iter_count(&con->v2.in_iter));
227 con->v2.in_kvec_cnt = 0;
228 iov_iter_kvec(&con->v2.in_iter, ITER_DEST, con->v2.in_kvecs, 0, 0);
231 static void set_in_bvec(struct ceph_connection *con, const struct bio_vec *bv)
233 WARN_ON(iov_iter_count(&con->v2.in_iter));
235 con->v2.in_bvec = *bv;
236 iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len);
239 static void set_in_skip(struct ceph_connection *con, int len)
241 WARN_ON(iov_iter_count(&con->v2.in_iter));
243 dout("%s con %p len %d\n", __func__, con, len);
244 iov_iter_discard(&con->v2.in_iter, ITER_DEST, len);
247 static void add_out_kvec(struct ceph_connection *con, void *buf, int len)
249 BUG_ON(con->v2.out_kvec_cnt >= ARRAY_SIZE(con->v2.out_kvecs));
250 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
251 WARN_ON(con->v2.out_zero);
253 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_base = buf;
254 con->v2.out_kvecs[con->v2.out_kvec_cnt].iov_len = len;
255 con->v2.out_kvec_cnt++;
257 con->v2.out_iter.nr_segs++;
258 con->v2.out_iter.count += len;
261 static void reset_out_kvecs(struct ceph_connection *con)
263 WARN_ON(iov_iter_count(&con->v2.out_iter));
264 WARN_ON(con->v2.out_zero);
266 con->v2.out_kvec_cnt = 0;
268 iov_iter_kvec(&con->v2.out_iter, ITER_SOURCE, con->v2.out_kvecs, 0, 0);
269 con->v2.out_iter_sendpage = false;
272 static void set_out_bvec(struct ceph_connection *con, const struct bio_vec *bv,
275 WARN_ON(iov_iter_count(&con->v2.out_iter));
276 WARN_ON(con->v2.out_zero);
278 con->v2.out_bvec = *bv;
279 con->v2.out_iter_sendpage = zerocopy;
280 iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
281 con->v2.out_bvec.bv_len);
284 static void set_out_bvec_zero(struct ceph_connection *con)
286 WARN_ON(iov_iter_count(&con->v2.out_iter));
287 WARN_ON(!con->v2.out_zero);
289 bvec_set_page(&con->v2.out_bvec, ceph_zero_page,
290 min(con->v2.out_zero, (int)PAGE_SIZE), 0);
291 con->v2.out_iter_sendpage = true;
292 iov_iter_bvec(&con->v2.out_iter, ITER_SOURCE, &con->v2.out_bvec, 1,
293 con->v2.out_bvec.bv_len);
296 static void out_zero_add(struct ceph_connection *con, int len)
298 dout("%s con %p len %d\n", __func__, con, len);
299 con->v2.out_zero += len;
302 static void *alloc_conn_buf(struct ceph_connection *con, int len)
306 dout("%s con %p len %d\n", __func__, con, len);
308 if (WARN_ON(con->v2.conn_buf_cnt >= ARRAY_SIZE(con->v2.conn_bufs)))
311 buf = kvmalloc(len, GFP_NOIO);
315 con->v2.conn_bufs[con->v2.conn_buf_cnt++] = buf;
319 static void free_conn_bufs(struct ceph_connection *con)
321 while (con->v2.conn_buf_cnt)
322 kvfree(con->v2.conn_bufs[--con->v2.conn_buf_cnt]);
325 static void add_in_sign_kvec(struct ceph_connection *con, void *buf, int len)
327 BUG_ON(con->v2.in_sign_kvec_cnt >= ARRAY_SIZE(con->v2.in_sign_kvecs));
329 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_base = buf;
330 con->v2.in_sign_kvecs[con->v2.in_sign_kvec_cnt].iov_len = len;
331 con->v2.in_sign_kvec_cnt++;
334 static void clear_in_sign_kvecs(struct ceph_connection *con)
336 con->v2.in_sign_kvec_cnt = 0;
339 static void add_out_sign_kvec(struct ceph_connection *con, void *buf, int len)
341 BUG_ON(con->v2.out_sign_kvec_cnt >= ARRAY_SIZE(con->v2.out_sign_kvecs));
343 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_base = buf;
344 con->v2.out_sign_kvecs[con->v2.out_sign_kvec_cnt].iov_len = len;
345 con->v2.out_sign_kvec_cnt++;
348 static void clear_out_sign_kvecs(struct ceph_connection *con)
350 con->v2.out_sign_kvec_cnt = 0;
353 static bool con_secure(struct ceph_connection *con)
355 return con->v2.con_mode == CEPH_CON_MODE_SECURE;
358 static int front_len(const struct ceph_msg *msg)
360 return le32_to_cpu(msg->hdr.front_len);
363 static int middle_len(const struct ceph_msg *msg)
365 return le32_to_cpu(msg->hdr.middle_len);
368 static int data_len(const struct ceph_msg *msg)
370 return le32_to_cpu(msg->hdr.data_len);
373 static bool need_padding(int len)
375 return !IS_ALIGNED(len, CEPH_GCM_BLOCK_LEN);
378 static int padded_len(int len)
380 return ALIGN(len, CEPH_GCM_BLOCK_LEN);
383 static int padding_len(int len)
385 return padded_len(len) - len;
388 /* preamble + control segment */
389 static int head_onwire_len(int ctrl_len, bool secure)
395 head_len = CEPH_PREAMBLE_SECURE_LEN;
396 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
397 rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
398 head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
401 head_len = CEPH_PREAMBLE_PLAIN_LEN;
403 head_len += ctrl_len + CEPH_CRC_LEN;
408 /* front, middle and data segments + epilogue */
409 static int __tail_onwire_len(int front_len, int middle_len, int data_len,
412 if (!front_len && !middle_len && !data_len)
416 return front_len + middle_len + data_len +
417 CEPH_EPILOGUE_PLAIN_LEN;
419 return padded_len(front_len) + padded_len(middle_len) +
420 padded_len(data_len) + CEPH_EPILOGUE_SECURE_LEN;
423 static int tail_onwire_len(const struct ceph_msg *msg, bool secure)
425 return __tail_onwire_len(front_len(msg), middle_len(msg),
426 data_len(msg), secure);
429 /* head_onwire_len(sizeof(struct ceph_msg_header2), false) */
430 #define MESSAGE_HEAD_PLAIN_LEN (CEPH_PREAMBLE_PLAIN_LEN + \
431 sizeof(struct ceph_msg_header2) + \
434 static const int frame_aligns[] = {
442 * Discards trailing empty segments, unless there is just one segment.
443 * A frame always has at least one (possibly empty) segment.
445 static int calc_segment_count(const int *lens, int len_cnt)
449 for (i = len_cnt - 1; i >= 0; i--) {
457 static void init_frame_desc(struct ceph_frame_desc *desc, int tag,
458 const int *lens, int len_cnt)
462 memset(desc, 0, sizeof(*desc));
465 desc->fd_seg_cnt = calc_segment_count(lens, len_cnt);
466 BUG_ON(desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT);
467 for (i = 0; i < desc->fd_seg_cnt; i++) {
468 desc->fd_lens[i] = lens[i];
469 desc->fd_aligns[i] = frame_aligns[i];
474 * Preamble crc covers everything up to itself (28 bytes) and
475 * is calculated and verified irrespective of the connection mode
476 * (i.e. even if the frame is encrypted).
478 static void encode_preamble(const struct ceph_frame_desc *desc, void *p)
480 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
484 memset(p, 0, CEPH_PREAMBLE_LEN);
486 ceph_encode_8(&p, desc->fd_tag);
487 ceph_encode_8(&p, desc->fd_seg_cnt);
488 for (i = 0; i < desc->fd_seg_cnt; i++) {
489 ceph_encode_32(&p, desc->fd_lens[i]);
490 ceph_encode_16(&p, desc->fd_aligns[i]);
493 put_unaligned_le32(crc32c(0, start, crcp - start), crcp);
496 static int decode_preamble(void *p, struct ceph_frame_desc *desc)
498 void *crcp = p + CEPH_PREAMBLE_LEN - CEPH_CRC_LEN;
499 u32 crc, expected_crc;
502 crc = crc32c(0, p, crcp - p);
503 expected_crc = get_unaligned_le32(crcp);
504 if (crc != expected_crc) {
505 pr_err("bad preamble crc, calculated %u, expected %u\n",
510 memset(desc, 0, sizeof(*desc));
512 desc->fd_tag = ceph_decode_8(&p);
513 desc->fd_seg_cnt = ceph_decode_8(&p);
514 if (desc->fd_seg_cnt < 1 ||
515 desc->fd_seg_cnt > CEPH_FRAME_MAX_SEGMENT_COUNT) {
516 pr_err("bad segment count %d\n", desc->fd_seg_cnt);
519 for (i = 0; i < desc->fd_seg_cnt; i++) {
520 desc->fd_lens[i] = ceph_decode_32(&p);
521 desc->fd_aligns[i] = ceph_decode_16(&p);
525 * This would fire for FRAME_TAG_WAIT (it has one empty
526 * segment), but we should never get it as client.
528 if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
529 pr_err("last segment empty\n");
533 if (desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
534 pr_err("control segment too big %d\n", desc->fd_lens[0]);
537 if (desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
538 pr_err("front segment too big %d\n", desc->fd_lens[1]);
541 if (desc->fd_lens[2] > CEPH_MSG_MAX_MIDDLE_LEN) {
542 pr_err("middle segment too big %d\n", desc->fd_lens[2]);
545 if (desc->fd_lens[3] > CEPH_MSG_MAX_DATA_LEN) {
546 pr_err("data segment too big %d\n", desc->fd_lens[3]);
553 static void encode_epilogue_plain(struct ceph_connection *con, bool aborted)
555 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
556 FRAME_LATE_STATUS_COMPLETE;
557 cpu_to_le32s(&con->v2.out_epil.front_crc);
558 cpu_to_le32s(&con->v2.out_epil.middle_crc);
559 cpu_to_le32s(&con->v2.out_epil.data_crc);
562 static void encode_epilogue_secure(struct ceph_connection *con, bool aborted)
564 memset(&con->v2.out_epil, 0, sizeof(con->v2.out_epil));
565 con->v2.out_epil.late_status = aborted ? FRAME_LATE_STATUS_ABORTED :
566 FRAME_LATE_STATUS_COMPLETE;
569 static int decode_epilogue(void *p, u32 *front_crc, u32 *middle_crc,
574 late_status = ceph_decode_8(&p);
575 if ((late_status & FRAME_LATE_STATUS_ABORTED_MASK) !=
576 FRAME_LATE_STATUS_COMPLETE) {
577 /* we should never get an aborted message as client */
578 pr_err("bad late_status 0x%x\n", late_status);
582 if (front_crc && middle_crc && data_crc) {
583 *front_crc = ceph_decode_32(&p);
584 *middle_crc = ceph_decode_32(&p);
585 *data_crc = ceph_decode_32(&p);
591 static void fill_header(struct ceph_msg_header *hdr,
592 const struct ceph_msg_header2 *hdr2,
593 int front_len, int middle_len, int data_len,
594 const struct ceph_entity_name *peer_name)
596 hdr->seq = hdr2->seq;
597 hdr->tid = hdr2->tid;
598 hdr->type = hdr2->type;
599 hdr->priority = hdr2->priority;
600 hdr->version = hdr2->version;
601 hdr->front_len = cpu_to_le32(front_len);
602 hdr->middle_len = cpu_to_le32(middle_len);
603 hdr->data_len = cpu_to_le32(data_len);
604 hdr->data_off = hdr2->data_off;
605 hdr->src = *peer_name;
606 hdr->compat_version = hdr2->compat_version;
611 static void fill_header2(struct ceph_msg_header2 *hdr2,
612 const struct ceph_msg_header *hdr, u64 ack_seq)
614 hdr2->seq = hdr->seq;
615 hdr2->tid = hdr->tid;
616 hdr2->type = hdr->type;
617 hdr2->priority = hdr->priority;
618 hdr2->version = hdr->version;
619 hdr2->data_pre_padding_len = 0;
620 hdr2->data_off = hdr->data_off;
621 hdr2->ack_seq = cpu_to_le64(ack_seq);
623 hdr2->compat_version = hdr->compat_version;
627 static int verify_control_crc(struct ceph_connection *con)
629 int ctrl_len = con->v2.in_desc.fd_lens[0];
630 u32 crc, expected_crc;
632 WARN_ON(con->v2.in_kvecs[0].iov_len != ctrl_len);
633 WARN_ON(con->v2.in_kvecs[1].iov_len != CEPH_CRC_LEN);
635 crc = crc32c(-1, con->v2.in_kvecs[0].iov_base, ctrl_len);
636 expected_crc = get_unaligned_le32(con->v2.in_kvecs[1].iov_base);
637 if (crc != expected_crc) {
638 pr_err("bad control crc, calculated %u, expected %u\n",
646 static int verify_epilogue_crcs(struct ceph_connection *con, u32 front_crc,
647 u32 middle_crc, u32 data_crc)
649 if (front_len(con->in_msg)) {
650 con->in_front_crc = crc32c(-1, con->in_msg->front.iov_base,
651 front_len(con->in_msg));
653 WARN_ON(!middle_len(con->in_msg) && !data_len(con->in_msg));
654 con->in_front_crc = -1;
657 if (middle_len(con->in_msg))
658 con->in_middle_crc = crc32c(-1,
659 con->in_msg->middle->vec.iov_base,
660 middle_len(con->in_msg));
661 else if (data_len(con->in_msg))
662 con->in_middle_crc = -1;
664 con->in_middle_crc = 0;
666 if (!data_len(con->in_msg))
667 con->in_data_crc = 0;
669 dout("%s con %p msg %p crcs %u %u %u\n", __func__, con, con->in_msg,
670 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
672 if (con->in_front_crc != front_crc) {
673 pr_err("bad front crc, calculated %u, expected %u\n",
674 con->in_front_crc, front_crc);
677 if (con->in_middle_crc != middle_crc) {
678 pr_err("bad middle crc, calculated %u, expected %u\n",
679 con->in_middle_crc, middle_crc);
682 if (con->in_data_crc != data_crc) {
683 pr_err("bad data crc, calculated %u, expected %u\n",
684 con->in_data_crc, data_crc);
691 static int setup_crypto(struct ceph_connection *con,
692 const u8 *session_key, int session_key_len,
693 const u8 *con_secret, int con_secret_len)
695 unsigned int noio_flag;
698 dout("%s con %p con_mode %d session_key_len %d con_secret_len %d\n",
699 __func__, con, con->v2.con_mode, session_key_len, con_secret_len);
700 WARN_ON(con->v2.hmac_tfm || con->v2.gcm_tfm || con->v2.gcm_req);
702 if (con->v2.con_mode != CEPH_CON_MODE_CRC &&
703 con->v2.con_mode != CEPH_CON_MODE_SECURE) {
704 pr_err("bad con_mode %d\n", con->v2.con_mode);
708 if (!session_key_len) {
709 WARN_ON(con->v2.con_mode != CEPH_CON_MODE_CRC);
710 WARN_ON(con_secret_len);
711 return 0; /* auth_none */
714 noio_flag = memalloc_noio_save();
715 con->v2.hmac_tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
716 memalloc_noio_restore(noio_flag);
717 if (IS_ERR(con->v2.hmac_tfm)) {
718 ret = PTR_ERR(con->v2.hmac_tfm);
719 con->v2.hmac_tfm = NULL;
720 pr_err("failed to allocate hmac tfm context: %d\n", ret);
724 WARN_ON((unsigned long)session_key &
725 crypto_shash_alignmask(con->v2.hmac_tfm));
726 ret = crypto_shash_setkey(con->v2.hmac_tfm, session_key,
729 pr_err("failed to set hmac key: %d\n", ret);
733 if (con->v2.con_mode == CEPH_CON_MODE_CRC) {
734 WARN_ON(con_secret_len);
735 return 0; /* auth_x, plain mode */
738 if (con_secret_len < CEPH_GCM_KEY_LEN + 2 * CEPH_GCM_IV_LEN) {
739 pr_err("con_secret too small %d\n", con_secret_len);
743 noio_flag = memalloc_noio_save();
744 con->v2.gcm_tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
745 memalloc_noio_restore(noio_flag);
746 if (IS_ERR(con->v2.gcm_tfm)) {
747 ret = PTR_ERR(con->v2.gcm_tfm);
748 con->v2.gcm_tfm = NULL;
749 pr_err("failed to allocate gcm tfm context: %d\n", ret);
753 WARN_ON((unsigned long)con_secret &
754 crypto_aead_alignmask(con->v2.gcm_tfm));
755 ret = crypto_aead_setkey(con->v2.gcm_tfm, con_secret, CEPH_GCM_KEY_LEN);
757 pr_err("failed to set gcm key: %d\n", ret);
761 WARN_ON(crypto_aead_ivsize(con->v2.gcm_tfm) != CEPH_GCM_IV_LEN);
762 ret = crypto_aead_setauthsize(con->v2.gcm_tfm, CEPH_GCM_TAG_LEN);
764 pr_err("failed to set gcm tag size: %d\n", ret);
768 con->v2.gcm_req = aead_request_alloc(con->v2.gcm_tfm, GFP_NOIO);
769 if (!con->v2.gcm_req) {
770 pr_err("failed to allocate gcm request\n");
774 crypto_init_wait(&con->v2.gcm_wait);
775 aead_request_set_callback(con->v2.gcm_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
776 crypto_req_done, &con->v2.gcm_wait);
778 memcpy(&con->v2.in_gcm_nonce, con_secret + CEPH_GCM_KEY_LEN,
780 memcpy(&con->v2.out_gcm_nonce,
781 con_secret + CEPH_GCM_KEY_LEN + CEPH_GCM_IV_LEN,
783 return 0; /* auth_x, secure mode */
786 static int hmac_sha256(struct ceph_connection *con, const struct kvec *kvecs,
787 int kvec_cnt, u8 *hmac)
789 SHASH_DESC_ON_STACK(desc, con->v2.hmac_tfm); /* tfm arg is ignored */
793 dout("%s con %p hmac_tfm %p kvec_cnt %d\n", __func__, con,
794 con->v2.hmac_tfm, kvec_cnt);
796 if (!con->v2.hmac_tfm) {
797 memset(hmac, 0, SHA256_DIGEST_SIZE);
798 return 0; /* auth_none */
801 desc->tfm = con->v2.hmac_tfm;
802 ret = crypto_shash_init(desc);
806 for (i = 0; i < kvec_cnt; i++) {
807 WARN_ON((unsigned long)kvecs[i].iov_base &
808 crypto_shash_alignmask(con->v2.hmac_tfm));
809 ret = crypto_shash_update(desc, kvecs[i].iov_base,
815 ret = crypto_shash_final(desc, hmac);
818 shash_desc_zero(desc);
819 return ret; /* auth_x, both plain and secure modes */
822 static void gcm_inc_nonce(struct ceph_gcm_nonce *nonce)
826 counter = le64_to_cpu(nonce->counter);
827 nonce->counter = cpu_to_le64(counter + 1);
830 static int gcm_crypt(struct ceph_connection *con, bool encrypt,
831 struct scatterlist *src, struct scatterlist *dst,
834 struct ceph_gcm_nonce *nonce;
837 nonce = encrypt ? &con->v2.out_gcm_nonce : &con->v2.in_gcm_nonce;
839 aead_request_set_ad(con->v2.gcm_req, 0); /* no AAD */
840 aead_request_set_crypt(con->v2.gcm_req, src, dst, src_len, (u8 *)nonce);
841 ret = crypto_wait_req(encrypt ? crypto_aead_encrypt(con->v2.gcm_req) :
842 crypto_aead_decrypt(con->v2.gcm_req),
847 gcm_inc_nonce(nonce);
851 static void get_bvec_at(struct ceph_msg_data_cursor *cursor,
857 WARN_ON(!cursor->total_resid);
859 /* skip zero-length data items */
860 while (!cursor->resid)
861 ceph_msg_data_advance(cursor, 0);
863 /* get a piece of data, cursor isn't advanced */
864 page = ceph_msg_data_next(cursor, &off, &len);
865 bvec_set_page(bv, page, len, off);
868 static int calc_sg_cnt(void *buf, int buf_len)
875 sg_cnt = need_padding(buf_len) ? 1 : 0;
876 if (is_vmalloc_addr(buf)) {
877 WARN_ON(offset_in_page(buf));
878 sg_cnt += PAGE_ALIGN(buf_len) >> PAGE_SHIFT;
886 static int calc_sg_cnt_cursor(struct ceph_msg_data_cursor *cursor)
888 int data_len = cursor->total_resid;
895 sg_cnt = need_padding(data_len) ? 1 : 0;
897 get_bvec_at(cursor, &bv);
900 ceph_msg_data_advance(cursor, bv.bv_len);
901 } while (cursor->total_resid);
906 static void init_sgs(struct scatterlist **sg, void *buf, int buf_len, u8 *pad)
908 void *end = buf + buf_len;
916 if (is_vmalloc_addr(buf)) {
919 page = vmalloc_to_page(p);
920 len = min_t(int, end - p, PAGE_SIZE);
921 WARN_ON(!page || !len || offset_in_page(p));
922 sg_set_page(*sg, page, len, 0);
927 sg_set_buf(*sg, buf, buf_len);
931 if (need_padding(buf_len)) {
932 sg_set_buf(*sg, pad, padding_len(buf_len));
937 static void init_sgs_cursor(struct scatterlist **sg,
938 struct ceph_msg_data_cursor *cursor, u8 *pad)
940 int data_len = cursor->total_resid;
947 get_bvec_at(cursor, &bv);
948 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
951 ceph_msg_data_advance(cursor, bv.bv_len);
952 } while (cursor->total_resid);
954 if (need_padding(data_len)) {
955 sg_set_buf(*sg, pad, padding_len(data_len));
960 static int setup_message_sgs(struct sg_table *sgt, struct ceph_msg *msg,
961 u8 *front_pad, u8 *middle_pad, u8 *data_pad,
962 void *epilogue, bool add_tag)
964 struct ceph_msg_data_cursor cursor;
965 struct scatterlist *cur_sg;
969 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
972 sg_cnt = 1; /* epilogue + [auth tag] */
974 sg_cnt += calc_sg_cnt(msg->front.iov_base,
977 sg_cnt += calc_sg_cnt(msg->middle->vec.iov_base,
980 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
981 sg_cnt += calc_sg_cnt_cursor(&cursor);
984 ret = sg_alloc_table(sgt, sg_cnt, GFP_NOIO);
990 init_sgs(&cur_sg, msg->front.iov_base, front_len(msg),
993 init_sgs(&cur_sg, msg->middle->vec.iov_base, middle_len(msg),
996 ceph_msg_data_cursor_init(&cursor, msg, data_len(msg));
997 init_sgs_cursor(&cur_sg, &cursor, data_pad);
1000 WARN_ON(!sg_is_last(cur_sg));
1001 sg_set_buf(cur_sg, epilogue,
1002 CEPH_GCM_BLOCK_LEN + (add_tag ? CEPH_GCM_TAG_LEN : 0));
1006 static int decrypt_preamble(struct ceph_connection *con)
1008 struct scatterlist sg;
1010 sg_init_one(&sg, con->v2.in_buf, CEPH_PREAMBLE_SECURE_LEN);
1011 return gcm_crypt(con, false, &sg, &sg, CEPH_PREAMBLE_SECURE_LEN);
1014 static int decrypt_control_remainder(struct ceph_connection *con)
1016 int ctrl_len = con->v2.in_desc.fd_lens[0];
1017 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1018 int pt_len = padding_len(rem_len) + CEPH_GCM_TAG_LEN;
1019 struct scatterlist sgs[2];
1021 WARN_ON(con->v2.in_kvecs[0].iov_len != rem_len);
1022 WARN_ON(con->v2.in_kvecs[1].iov_len != pt_len);
1024 sg_init_table(sgs, 2);
1025 sg_set_buf(&sgs[0], con->v2.in_kvecs[0].iov_base, rem_len);
1026 sg_set_buf(&sgs[1], con->v2.in_buf, pt_len);
1028 return gcm_crypt(con, false, sgs, sgs,
1029 padded_len(rem_len) + CEPH_GCM_TAG_LEN);
1032 static int decrypt_tail(struct ceph_connection *con)
1034 struct sg_table enc_sgt = {};
1035 struct sg_table sgt = {};
1039 tail_len = tail_onwire_len(con->in_msg, true);
1040 ret = sg_alloc_table_from_pages(&enc_sgt, con->v2.in_enc_pages,
1041 con->v2.in_enc_page_cnt, 0, tail_len,
1046 ret = setup_message_sgs(&sgt, con->in_msg, FRONT_PAD(con->v2.in_buf),
1047 MIDDLE_PAD(con->v2.in_buf), DATA_PAD(con->v2.in_buf),
1048 con->v2.in_buf, true);
1052 dout("%s con %p msg %p enc_page_cnt %d sg_cnt %d\n", __func__, con,
1053 con->in_msg, con->v2.in_enc_page_cnt, sgt.orig_nents);
1054 ret = gcm_crypt(con, false, enc_sgt.sgl, sgt.sgl, tail_len);
1058 WARN_ON(!con->v2.in_enc_page_cnt);
1059 ceph_release_page_vector(con->v2.in_enc_pages,
1060 con->v2.in_enc_page_cnt);
1061 con->v2.in_enc_pages = NULL;
1062 con->v2.in_enc_page_cnt = 0;
1065 sg_free_table(&sgt);
1066 sg_free_table(&enc_sgt);
1070 static int prepare_banner(struct ceph_connection *con)
1072 int buf_len = CEPH_BANNER_V2_LEN + 2 + 8 + 8;
1075 buf = alloc_conn_buf(con, buf_len);
1080 ceph_encode_copy(&p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN);
1081 ceph_encode_16(&p, sizeof(u64) + sizeof(u64));
1082 ceph_encode_64(&p, CEPH_MSGR2_SUPPORTED_FEATURES);
1083 ceph_encode_64(&p, CEPH_MSGR2_REQUIRED_FEATURES);
1084 WARN_ON(p != buf + buf_len);
1086 add_out_kvec(con, buf, buf_len);
1087 add_out_sign_kvec(con, buf, buf_len);
1088 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1095 * control body (ctrl_len bytes)
1096 * space for control crc
1098 * extdata (optional):
1099 * control body (extdata_len bytes)
1101 * Compute control crc and gather base and extdata into:
1104 * control body (ctrl_len + extdata_len bytes)
1107 * Preamble should already be encoded at the start of base.
1109 static void prepare_head_plain(struct ceph_connection *con, void *base,
1110 int ctrl_len, void *extdata, int extdata_len,
1113 int base_len = CEPH_PREAMBLE_LEN + ctrl_len + CEPH_CRC_LEN;
1114 void *crcp = base + base_len - CEPH_CRC_LEN;
1117 crc = crc32c(-1, CTRL_BODY(base), ctrl_len);
1119 crc = crc32c(crc, extdata, extdata_len);
1120 put_unaligned_le32(crc, crcp);
1123 add_out_kvec(con, base, base_len);
1125 add_out_sign_kvec(con, base, base_len);
1129 add_out_kvec(con, base, crcp - base);
1130 add_out_kvec(con, extdata, extdata_len);
1131 add_out_kvec(con, crcp, CEPH_CRC_LEN);
1133 add_out_sign_kvec(con, base, crcp - base);
1134 add_out_sign_kvec(con, extdata, extdata_len);
1135 add_out_sign_kvec(con, crcp, CEPH_CRC_LEN);
1139 static int prepare_head_secure_small(struct ceph_connection *con,
1140 void *base, int ctrl_len)
1142 struct scatterlist sg;
1145 /* inline buffer padding? */
1146 if (ctrl_len < CEPH_PREAMBLE_INLINE_LEN)
1147 memset(CTRL_BODY(base) + ctrl_len, 0,
1148 CEPH_PREAMBLE_INLINE_LEN - ctrl_len);
1150 sg_init_one(&sg, base, CEPH_PREAMBLE_SECURE_LEN);
1151 ret = gcm_crypt(con, true, &sg, &sg,
1152 CEPH_PREAMBLE_SECURE_LEN - CEPH_GCM_TAG_LEN);
1156 add_out_kvec(con, base, CEPH_PREAMBLE_SECURE_LEN);
1163 * control body (ctrl_len bytes)
1164 * space for padding, if needed
1165 * space for control remainder auth tag
1166 * space for preamble auth tag
1168 * Encrypt preamble and the inline portion, then encrypt the remainder
1172 * control body (48 bytes)
1174 * control body (ctrl_len - 48 bytes)
1175 * zero padding, if needed
1176 * control remainder auth tag
1178 * Preamble should already be encoded at the start of base.
1180 static int prepare_head_secure_big(struct ceph_connection *con,
1181 void *base, int ctrl_len)
1183 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1184 void *rem = CTRL_BODY(base) + CEPH_PREAMBLE_INLINE_LEN;
1185 void *rem_tag = rem + padded_len(rem_len);
1186 void *pmbl_tag = rem_tag + CEPH_GCM_TAG_LEN;
1187 struct scatterlist sgs[2];
1190 sg_init_table(sgs, 2);
1191 sg_set_buf(&sgs[0], base, rem - base);
1192 sg_set_buf(&sgs[1], pmbl_tag, CEPH_GCM_TAG_LEN);
1193 ret = gcm_crypt(con, true, sgs, sgs, rem - base);
1197 /* control remainder padding? */
1198 if (need_padding(rem_len))
1199 memset(rem + rem_len, 0, padding_len(rem_len));
1201 sg_init_one(&sgs[0], rem, pmbl_tag - rem);
1202 ret = gcm_crypt(con, true, sgs, sgs, rem_tag - rem);
1206 add_out_kvec(con, base, rem - base);
1207 add_out_kvec(con, pmbl_tag, CEPH_GCM_TAG_LEN);
1208 add_out_kvec(con, rem, pmbl_tag - rem);
1212 static int __prepare_control(struct ceph_connection *con, int tag,
1213 void *base, int ctrl_len, void *extdata,
1214 int extdata_len, bool to_be_signed)
1216 int total_len = ctrl_len + extdata_len;
1217 struct ceph_frame_desc desc;
1220 dout("%s con %p tag %d len %d (%d+%d)\n", __func__, con, tag,
1221 total_len, ctrl_len, extdata_len);
1223 /* extdata may be vmalloc'ed but not base */
1224 if (WARN_ON(is_vmalloc_addr(base) || !ctrl_len))
1227 init_frame_desc(&desc, tag, &total_len, 1);
1228 encode_preamble(&desc, base);
1230 if (con_secure(con)) {
1231 if (WARN_ON(extdata_len || to_be_signed))
1234 if (ctrl_len <= CEPH_PREAMBLE_INLINE_LEN)
1235 /* fully inlined, inline buffer may need padding */
1236 ret = prepare_head_secure_small(con, base, ctrl_len);
1238 /* partially inlined, inline buffer is full */
1239 ret = prepare_head_secure_big(con, base, ctrl_len);
1243 prepare_head_plain(con, base, ctrl_len, extdata, extdata_len,
1247 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1251 static int prepare_control(struct ceph_connection *con, int tag,
1252 void *base, int ctrl_len)
1254 return __prepare_control(con, tag, base, ctrl_len, NULL, 0, false);
1257 static int prepare_hello(struct ceph_connection *con)
1262 ctrl_len = 1 + ceph_entity_addr_encoding_len(&con->peer_addr);
1263 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1268 ceph_encode_8(&p, CEPH_ENTITY_TYPE_CLIENT);
1269 ceph_encode_entity_addr(&p, &con->peer_addr);
1270 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1272 return __prepare_control(con, FRAME_TAG_HELLO, buf, ctrl_len,
1276 /* so that head_onwire_len(AUTH_BUF_LEN, false) is 512 */
1277 #define AUTH_BUF_LEN (512 - CEPH_CRC_LEN - CEPH_PREAMBLE_PLAIN_LEN)
1279 static int prepare_auth_request(struct ceph_connection *con)
1281 void *authorizer, *authorizer_copy;
1282 int ctrl_len, authorizer_len;
1286 ctrl_len = AUTH_BUF_LEN;
1287 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1291 mutex_unlock(&con->mutex);
1292 ret = con->ops->get_auth_request(con, CTRL_BODY(buf), &ctrl_len,
1293 &authorizer, &authorizer_len);
1294 mutex_lock(&con->mutex);
1295 if (con->state != CEPH_CON_S_V2_HELLO) {
1296 dout("%s con %p state changed to %d\n", __func__, con,
1301 dout("%s con %p get_auth_request ret %d\n", __func__, con, ret);
1305 authorizer_copy = alloc_conn_buf(con, authorizer_len);
1306 if (!authorizer_copy)
1309 memcpy(authorizer_copy, authorizer, authorizer_len);
1311 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST, buf, ctrl_len,
1312 authorizer_copy, authorizer_len, true);
1315 static int prepare_auth_request_more(struct ceph_connection *con,
1316 void *reply, int reply_len)
1318 int ctrl_len, authorizer_len;
1323 ctrl_len = AUTH_BUF_LEN;
1324 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, false));
1328 mutex_unlock(&con->mutex);
1329 ret = con->ops->handle_auth_reply_more(con, reply, reply_len,
1330 CTRL_BODY(buf), &ctrl_len,
1331 &authorizer, &authorizer_len);
1332 mutex_lock(&con->mutex);
1333 if (con->state != CEPH_CON_S_V2_AUTH) {
1334 dout("%s con %p state changed to %d\n", __func__, con,
1339 dout("%s con %p handle_auth_reply_more ret %d\n", __func__, con, ret);
1343 return __prepare_control(con, FRAME_TAG_AUTH_REQUEST_MORE, buf,
1344 ctrl_len, authorizer, authorizer_len, true);
1347 static int prepare_auth_signature(struct ceph_connection *con)
1352 buf = alloc_conn_buf(con, head_onwire_len(SHA256_DIGEST_SIZE,
1357 ret = hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
1362 return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
1363 SHA256_DIGEST_SIZE);
1366 static int prepare_client_ident(struct ceph_connection *con)
1368 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1369 struct ceph_client *client = from_msgr(con->msgr);
1370 u64 global_id = ceph_client_gid(client);
1374 WARN_ON(con->v2.server_cookie);
1375 WARN_ON(con->v2.connect_seq);
1376 WARN_ON(con->v2.peer_global_seq);
1378 if (!con->v2.client_cookie) {
1380 get_random_bytes(&con->v2.client_cookie,
1381 sizeof(con->v2.client_cookie));
1382 } while (!con->v2.client_cookie);
1383 dout("%s con %p generated cookie 0x%llx\n", __func__, con,
1384 con->v2.client_cookie);
1386 dout("%s con %p cookie already set 0x%llx\n", __func__, con,
1387 con->v2.client_cookie);
1390 dout("%s con %p my_addr %s/%u peer_addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx cookie 0x%llx\n",
1391 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1392 ceph_pr_addr(&con->peer_addr), le32_to_cpu(con->peer_addr.nonce),
1393 global_id, con->v2.global_seq, client->supported_features,
1394 client->required_features, con->v2.client_cookie);
1396 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) +
1397 ceph_entity_addr_encoding_len(&con->peer_addr) + 6 * 8;
1398 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1403 ceph_encode_8(&p, 2); /* addrvec marker */
1404 ceph_encode_32(&p, 1); /* addr_cnt */
1405 ceph_encode_entity_addr(&p, my_addr);
1406 ceph_encode_entity_addr(&p, &con->peer_addr);
1407 ceph_encode_64(&p, global_id);
1408 ceph_encode_64(&p, con->v2.global_seq);
1409 ceph_encode_64(&p, client->supported_features);
1410 ceph_encode_64(&p, client->required_features);
1411 ceph_encode_64(&p, 0); /* flags */
1412 ceph_encode_64(&p, con->v2.client_cookie);
1413 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1415 return prepare_control(con, FRAME_TAG_CLIENT_IDENT, buf, ctrl_len);
1418 static int prepare_session_reconnect(struct ceph_connection *con)
1420 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1424 WARN_ON(!con->v2.client_cookie);
1425 WARN_ON(!con->v2.server_cookie);
1426 WARN_ON(!con->v2.connect_seq);
1427 WARN_ON(!con->v2.peer_global_seq);
1429 dout("%s con %p my_addr %s/%u client_cookie 0x%llx server_cookie 0x%llx global_seq %llu connect_seq %llu in_seq %llu\n",
1430 __func__, con, ceph_pr_addr(my_addr), le32_to_cpu(my_addr->nonce),
1431 con->v2.client_cookie, con->v2.server_cookie, con->v2.global_seq,
1432 con->v2.connect_seq, con->in_seq);
1434 ctrl_len = 1 + 4 + ceph_entity_addr_encoding_len(my_addr) + 5 * 8;
1435 buf = alloc_conn_buf(con, head_onwire_len(ctrl_len, con_secure(con)));
1440 ceph_encode_8(&p, 2); /* entity_addrvec_t marker */
1441 ceph_encode_32(&p, 1); /* my_addrs len */
1442 ceph_encode_entity_addr(&p, my_addr);
1443 ceph_encode_64(&p, con->v2.client_cookie);
1444 ceph_encode_64(&p, con->v2.server_cookie);
1445 ceph_encode_64(&p, con->v2.global_seq);
1446 ceph_encode_64(&p, con->v2.connect_seq);
1447 ceph_encode_64(&p, con->in_seq);
1448 WARN_ON(p != CTRL_BODY(buf) + ctrl_len);
1450 return prepare_control(con, FRAME_TAG_SESSION_RECONNECT, buf, ctrl_len);
1453 static int prepare_keepalive2(struct ceph_connection *con)
1455 struct ceph_timespec *ts = CTRL_BODY(con->v2.out_buf);
1456 struct timespec64 now;
1458 ktime_get_real_ts64(&now);
1459 dout("%s con %p timestamp %lld.%09ld\n", __func__, con, now.tv_sec,
1462 ceph_encode_timespec64(ts, &now);
1464 reset_out_kvecs(con);
1465 return prepare_control(con, FRAME_TAG_KEEPALIVE2, con->v2.out_buf,
1466 sizeof(struct ceph_timespec));
1469 static int prepare_ack(struct ceph_connection *con)
1473 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1474 con->in_seq_acked, con->in_seq);
1475 con->in_seq_acked = con->in_seq;
1477 p = CTRL_BODY(con->v2.out_buf);
1478 ceph_encode_64(&p, con->in_seq_acked);
1480 reset_out_kvecs(con);
1481 return prepare_control(con, FRAME_TAG_ACK, con->v2.out_buf, 8);
1484 static void prepare_epilogue_plain(struct ceph_connection *con, bool aborted)
1486 dout("%s con %p msg %p aborted %d crcs %u %u %u\n", __func__, con,
1487 con->out_msg, aborted, con->v2.out_epil.front_crc,
1488 con->v2.out_epil.middle_crc, con->v2.out_epil.data_crc);
1490 encode_epilogue_plain(con, aborted);
1491 add_out_kvec(con, &con->v2.out_epil, CEPH_EPILOGUE_PLAIN_LEN);
1495 * For "used" empty segments, crc is -1. For unused (trailing)
1496 * segments, crc is 0.
1498 static void prepare_message_plain(struct ceph_connection *con)
1500 struct ceph_msg *msg = con->out_msg;
1502 prepare_head_plain(con, con->v2.out_buf,
1503 sizeof(struct ceph_msg_header2), NULL, 0, false);
1505 if (!front_len(msg) && !middle_len(msg)) {
1506 if (!data_len(msg)) {
1508 * Empty message: once the head is written,
1509 * we are done -- there is no epilogue.
1511 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1515 con->v2.out_epil.front_crc = -1;
1516 con->v2.out_epil.middle_crc = -1;
1517 con->v2.out_state = OUT_S_QUEUE_DATA;
1521 if (front_len(msg)) {
1522 con->v2.out_epil.front_crc = crc32c(-1, msg->front.iov_base,
1524 add_out_kvec(con, msg->front.iov_base, front_len(msg));
1526 /* middle (at least) is there, checked above */
1527 con->v2.out_epil.front_crc = -1;
1530 if (middle_len(msg)) {
1531 con->v2.out_epil.middle_crc =
1532 crc32c(-1, msg->middle->vec.iov_base, middle_len(msg));
1533 add_out_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1535 con->v2.out_epil.middle_crc = data_len(msg) ? -1 : 0;
1538 if (data_len(msg)) {
1539 con->v2.out_state = OUT_S_QUEUE_DATA;
1541 con->v2.out_epil.data_crc = 0;
1542 prepare_epilogue_plain(con, false);
1543 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1548 * Unfortunately the kernel crypto API doesn't support streaming
1549 * (piecewise) operation for AEAD algorithms, so we can't get away
1550 * with a fixed size buffer and a couple sgs. Instead, we have to
1551 * allocate pages for the entire tail of the message (currently up
1552 * to ~32M) and two sgs arrays (up to ~256K each)...
1554 static int prepare_message_secure(struct ceph_connection *con)
1556 void *zerop = page_address(ceph_zero_page);
1557 struct sg_table enc_sgt = {};
1558 struct sg_table sgt = {};
1559 struct page **enc_pages;
1564 ret = prepare_head_secure_small(con, con->v2.out_buf,
1565 sizeof(struct ceph_msg_header2));
1569 tail_len = tail_onwire_len(con->out_msg, true);
1572 * Empty message: once the head is written,
1573 * we are done -- there is no epilogue.
1575 con->v2.out_state = OUT_S_FINISH_MESSAGE;
1579 encode_epilogue_secure(con, false);
1580 ret = setup_message_sgs(&sgt, con->out_msg, zerop, zerop, zerop,
1581 &con->v2.out_epil, false);
1585 enc_page_cnt = calc_pages_for(0, tail_len);
1586 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1587 if (IS_ERR(enc_pages)) {
1588 ret = PTR_ERR(enc_pages);
1592 WARN_ON(con->v2.out_enc_pages || con->v2.out_enc_page_cnt);
1593 con->v2.out_enc_pages = enc_pages;
1594 con->v2.out_enc_page_cnt = enc_page_cnt;
1595 con->v2.out_enc_resid = tail_len;
1596 con->v2.out_enc_i = 0;
1598 ret = sg_alloc_table_from_pages(&enc_sgt, enc_pages, enc_page_cnt,
1599 0, tail_len, GFP_NOIO);
1603 ret = gcm_crypt(con, true, sgt.sgl, enc_sgt.sgl,
1604 tail_len - CEPH_GCM_TAG_LEN);
1608 dout("%s con %p msg %p sg_cnt %d enc_page_cnt %d\n", __func__, con,
1609 con->out_msg, sgt.orig_nents, enc_page_cnt);
1610 con->v2.out_state = OUT_S_QUEUE_ENC_PAGE;
1613 sg_free_table(&sgt);
1614 sg_free_table(&enc_sgt);
1618 static int prepare_message(struct ceph_connection *con)
1621 sizeof(struct ceph_msg_header2),
1622 front_len(con->out_msg),
1623 middle_len(con->out_msg),
1624 data_len(con->out_msg)
1626 struct ceph_frame_desc desc;
1629 dout("%s con %p msg %p logical %d+%d+%d+%d\n", __func__, con,
1630 con->out_msg, lens[0], lens[1], lens[2], lens[3]);
1632 if (con->in_seq > con->in_seq_acked) {
1633 dout("%s con %p in_seq_acked %llu -> %llu\n", __func__, con,
1634 con->in_seq_acked, con->in_seq);
1635 con->in_seq_acked = con->in_seq;
1638 reset_out_kvecs(con);
1639 init_frame_desc(&desc, FRAME_TAG_MESSAGE, lens, 4);
1640 encode_preamble(&desc, con->v2.out_buf);
1641 fill_header2(CTRL_BODY(con->v2.out_buf), &con->out_msg->hdr,
1644 if (con_secure(con)) {
1645 ret = prepare_message_secure(con);
1649 prepare_message_plain(con);
1652 ceph_con_flag_set(con, CEPH_CON_F_WRITE_PENDING);
1656 static int prepare_read_banner_prefix(struct ceph_connection *con)
1660 buf = alloc_conn_buf(con, CEPH_BANNER_V2_PREFIX_LEN);
1664 reset_in_kvecs(con);
1665 add_in_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1666 add_in_sign_kvec(con, buf, CEPH_BANNER_V2_PREFIX_LEN);
1667 con->state = CEPH_CON_S_V2_BANNER_PREFIX;
1671 static int prepare_read_banner_payload(struct ceph_connection *con,
1676 buf = alloc_conn_buf(con, payload_len);
1680 reset_in_kvecs(con);
1681 add_in_kvec(con, buf, payload_len);
1682 add_in_sign_kvec(con, buf, payload_len);
1683 con->state = CEPH_CON_S_V2_BANNER_PAYLOAD;
1687 static void prepare_read_preamble(struct ceph_connection *con)
1689 reset_in_kvecs(con);
1690 add_in_kvec(con, con->v2.in_buf,
1691 con_secure(con) ? CEPH_PREAMBLE_SECURE_LEN :
1692 CEPH_PREAMBLE_PLAIN_LEN);
1693 con->v2.in_state = IN_S_HANDLE_PREAMBLE;
1696 static int prepare_read_control(struct ceph_connection *con)
1698 int ctrl_len = con->v2.in_desc.fd_lens[0];
1702 reset_in_kvecs(con);
1703 if (con->state == CEPH_CON_S_V2_HELLO ||
1704 con->state == CEPH_CON_S_V2_AUTH) {
1705 head_len = head_onwire_len(ctrl_len, false);
1706 buf = alloc_conn_buf(con, head_len);
1710 /* preserve preamble */
1711 memcpy(buf, con->v2.in_buf, CEPH_PREAMBLE_LEN);
1713 add_in_kvec(con, CTRL_BODY(buf), ctrl_len);
1714 add_in_kvec(con, CTRL_BODY(buf) + ctrl_len, CEPH_CRC_LEN);
1715 add_in_sign_kvec(con, buf, head_len);
1717 if (ctrl_len > CEPH_PREAMBLE_INLINE_LEN) {
1718 buf = alloc_conn_buf(con, ctrl_len);
1722 add_in_kvec(con, buf, ctrl_len);
1724 add_in_kvec(con, CTRL_BODY(con->v2.in_buf), ctrl_len);
1726 add_in_kvec(con, con->v2.in_buf, CEPH_CRC_LEN);
1728 con->v2.in_state = IN_S_HANDLE_CONTROL;
1732 static int prepare_read_control_remainder(struct ceph_connection *con)
1734 int ctrl_len = con->v2.in_desc.fd_lens[0];
1735 int rem_len = ctrl_len - CEPH_PREAMBLE_INLINE_LEN;
1738 buf = alloc_conn_buf(con, ctrl_len);
1742 memcpy(buf, CTRL_BODY(con->v2.in_buf), CEPH_PREAMBLE_INLINE_LEN);
1744 reset_in_kvecs(con);
1745 add_in_kvec(con, buf + CEPH_PREAMBLE_INLINE_LEN, rem_len);
1746 add_in_kvec(con, con->v2.in_buf,
1747 padding_len(rem_len) + CEPH_GCM_TAG_LEN);
1748 con->v2.in_state = IN_S_HANDLE_CONTROL_REMAINDER;
1752 static int prepare_read_data(struct ceph_connection *con)
1756 con->in_data_crc = -1;
1757 ceph_msg_data_cursor_init(&con->v2.in_cursor, con->in_msg,
1758 data_len(con->in_msg));
1760 get_bvec_at(&con->v2.in_cursor, &bv);
1761 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1762 if (unlikely(!con->bounce_page)) {
1763 con->bounce_page = alloc_page(GFP_NOIO);
1764 if (!con->bounce_page) {
1765 pr_err("failed to allocate bounce page\n");
1770 bv.bv_page = con->bounce_page;
1773 set_in_bvec(con, &bv);
1774 con->v2.in_state = IN_S_PREPARE_READ_DATA_CONT;
1778 static void prepare_read_data_cont(struct ceph_connection *con)
1782 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1783 con->in_data_crc = crc32c(con->in_data_crc,
1784 page_address(con->bounce_page),
1785 con->v2.in_bvec.bv_len);
1787 get_bvec_at(&con->v2.in_cursor, &bv);
1788 memcpy_to_page(bv.bv_page, bv.bv_offset,
1789 page_address(con->bounce_page),
1790 con->v2.in_bvec.bv_len);
1792 con->in_data_crc = ceph_crc32c_page(con->in_data_crc,
1793 con->v2.in_bvec.bv_page,
1794 con->v2.in_bvec.bv_offset,
1795 con->v2.in_bvec.bv_len);
1798 ceph_msg_data_advance(&con->v2.in_cursor, con->v2.in_bvec.bv_len);
1799 if (con->v2.in_cursor.total_resid) {
1800 get_bvec_at(&con->v2.in_cursor, &bv);
1801 if (ceph_test_opt(from_msgr(con->msgr), RXBOUNCE)) {
1802 bv.bv_page = con->bounce_page;
1805 set_in_bvec(con, &bv);
1806 WARN_ON(con->v2.in_state != IN_S_PREPARE_READ_DATA_CONT);
1811 * We've read all data. Prepare to read epilogue.
1813 reset_in_kvecs(con);
1814 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1815 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1818 static int prepare_read_tail_plain(struct ceph_connection *con)
1820 struct ceph_msg *msg = con->in_msg;
1822 if (!front_len(msg) && !middle_len(msg)) {
1823 WARN_ON(!data_len(msg));
1824 return prepare_read_data(con);
1827 reset_in_kvecs(con);
1828 if (front_len(msg)) {
1829 add_in_kvec(con, msg->front.iov_base, front_len(msg));
1830 WARN_ON(msg->front.iov_len != front_len(msg));
1832 if (middle_len(msg)) {
1833 add_in_kvec(con, msg->middle->vec.iov_base, middle_len(msg));
1834 WARN_ON(msg->middle->vec.iov_len != middle_len(msg));
1837 if (data_len(msg)) {
1838 con->v2.in_state = IN_S_PREPARE_READ_DATA;
1840 add_in_kvec(con, con->v2.in_buf, CEPH_EPILOGUE_PLAIN_LEN);
1841 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1846 static void prepare_read_enc_page(struct ceph_connection *con)
1850 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.in_enc_i,
1851 con->v2.in_enc_resid);
1852 WARN_ON(!con->v2.in_enc_resid);
1854 bvec_set_page(&bv, con->v2.in_enc_pages[con->v2.in_enc_i],
1855 min(con->v2.in_enc_resid, (int)PAGE_SIZE), 0);
1857 set_in_bvec(con, &bv);
1859 con->v2.in_enc_resid -= bv.bv_len;
1861 if (con->v2.in_enc_resid) {
1862 con->v2.in_state = IN_S_PREPARE_READ_ENC_PAGE;
1867 * We are set to read the last piece of ciphertext (ending
1868 * with epilogue) + auth tag.
1870 WARN_ON(con->v2.in_enc_i != con->v2.in_enc_page_cnt);
1871 con->v2.in_state = IN_S_HANDLE_EPILOGUE;
1874 static int prepare_read_tail_secure(struct ceph_connection *con)
1876 struct page **enc_pages;
1880 tail_len = tail_onwire_len(con->in_msg, true);
1883 enc_page_cnt = calc_pages_for(0, tail_len);
1884 enc_pages = ceph_alloc_page_vector(enc_page_cnt, GFP_NOIO);
1885 if (IS_ERR(enc_pages))
1886 return PTR_ERR(enc_pages);
1888 WARN_ON(con->v2.in_enc_pages || con->v2.in_enc_page_cnt);
1889 con->v2.in_enc_pages = enc_pages;
1890 con->v2.in_enc_page_cnt = enc_page_cnt;
1891 con->v2.in_enc_resid = tail_len;
1892 con->v2.in_enc_i = 0;
1894 prepare_read_enc_page(con);
1898 static void __finish_skip(struct ceph_connection *con)
1901 prepare_read_preamble(con);
1904 static void prepare_skip_message(struct ceph_connection *con)
1906 struct ceph_frame_desc *desc = &con->v2.in_desc;
1909 dout("%s con %p %d+%d+%d\n", __func__, con, desc->fd_lens[1],
1910 desc->fd_lens[2], desc->fd_lens[3]);
1912 tail_len = __tail_onwire_len(desc->fd_lens[1], desc->fd_lens[2],
1913 desc->fd_lens[3], con_secure(con));
1917 set_in_skip(con, tail_len);
1918 con->v2.in_state = IN_S_FINISH_SKIP;
1922 static int process_banner_prefix(struct ceph_connection *con)
1927 WARN_ON(con->v2.in_kvecs[0].iov_len != CEPH_BANNER_V2_PREFIX_LEN);
1929 p = con->v2.in_kvecs[0].iov_base;
1930 if (memcmp(p, CEPH_BANNER_V2, CEPH_BANNER_V2_LEN)) {
1931 if (!memcmp(p, CEPH_BANNER, CEPH_BANNER_LEN))
1932 con->error_msg = "server is speaking msgr1 protocol";
1934 con->error_msg = "protocol error, bad banner";
1938 p += CEPH_BANNER_V2_LEN;
1939 payload_len = ceph_decode_16(&p);
1940 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
1942 return prepare_read_banner_payload(con, payload_len);
1945 static int process_banner_payload(struct ceph_connection *con)
1947 void *end = con->v2.in_kvecs[0].iov_base + con->v2.in_kvecs[0].iov_len;
1948 u64 feat = CEPH_MSGR2_SUPPORTED_FEATURES;
1949 u64 req_feat = CEPH_MSGR2_REQUIRED_FEATURES;
1950 u64 server_feat, server_req_feat;
1954 p = con->v2.in_kvecs[0].iov_base;
1955 ceph_decode_64_safe(&p, end, server_feat, bad);
1956 ceph_decode_64_safe(&p, end, server_req_feat, bad);
1958 dout("%s con %p server_feat 0x%llx server_req_feat 0x%llx\n",
1959 __func__, con, server_feat, server_req_feat);
1961 if (req_feat & ~server_feat) {
1962 pr_err("msgr2 feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
1963 server_feat, req_feat & ~server_feat);
1964 con->error_msg = "missing required protocol features";
1967 if (server_req_feat & ~feat) {
1968 pr_err("msgr2 feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
1969 feat, server_req_feat & ~feat);
1970 con->error_msg = "missing required protocol features";
1974 /* no reset_out_kvecs() as our banner may still be pending */
1975 ret = prepare_hello(con);
1977 pr_err("prepare_hello failed: %d\n", ret);
1981 con->state = CEPH_CON_S_V2_HELLO;
1982 prepare_read_preamble(con);
1986 pr_err("failed to decode banner payload\n");
1990 static int process_hello(struct ceph_connection *con, void *p, void *end)
1992 struct ceph_entity_addr *my_addr = &con->msgr->inst.addr;
1993 struct ceph_entity_addr addr_for_me;
1997 if (con->state != CEPH_CON_S_V2_HELLO) {
1998 con->error_msg = "protocol error, unexpected hello";
2002 ceph_decode_8_safe(&p, end, entity_type, bad);
2003 ret = ceph_decode_entity_addr(&p, end, &addr_for_me);
2005 pr_err("failed to decode addr_for_me: %d\n", ret);
2009 dout("%s con %p entity_type %d addr_for_me %s\n", __func__, con,
2010 entity_type, ceph_pr_addr(&addr_for_me));
2012 if (entity_type != con->peer_name.type) {
2013 pr_err("bad peer type, want %d, got %d\n",
2014 con->peer_name.type, entity_type);
2015 con->error_msg = "wrong peer at address";
2020 * Set our address to the address our first peer (i.e. monitor)
2021 * sees that we are connecting from. If we are behind some sort
2022 * of NAT and want to be identified by some private (not NATed)
2023 * address, ip option should be used.
2025 if (ceph_addr_is_blank(my_addr)) {
2026 memcpy(&my_addr->in_addr, &addr_for_me.in_addr,
2027 sizeof(my_addr->in_addr));
2028 ceph_addr_set_port(my_addr, 0);
2029 dout("%s con %p set my addr %s, as seen by peer %s\n",
2030 __func__, con, ceph_pr_addr(my_addr),
2031 ceph_pr_addr(&con->peer_addr));
2033 dout("%s con %p my addr already set %s\n",
2034 __func__, con, ceph_pr_addr(my_addr));
2037 WARN_ON(ceph_addr_is_blank(my_addr) || ceph_addr_port(my_addr));
2038 WARN_ON(my_addr->type != CEPH_ENTITY_ADDR_TYPE_ANY);
2039 WARN_ON(!my_addr->nonce);
2041 /* no reset_out_kvecs() as our hello may still be pending */
2042 ret = prepare_auth_request(con);
2045 pr_err("prepare_auth_request failed: %d\n", ret);
2049 con->state = CEPH_CON_S_V2_AUTH;
2053 pr_err("failed to decode hello\n");
2057 static int process_auth_bad_method(struct ceph_connection *con,
2060 int allowed_protos[8], allowed_modes[8];
2061 int allowed_proto_cnt, allowed_mode_cnt;
2062 int used_proto, result;
2066 if (con->state != CEPH_CON_S_V2_AUTH) {
2067 con->error_msg = "protocol error, unexpected auth_bad_method";
2071 ceph_decode_32_safe(&p, end, used_proto, bad);
2072 ceph_decode_32_safe(&p, end, result, bad);
2073 dout("%s con %p used_proto %d result %d\n", __func__, con, used_proto,
2076 ceph_decode_32_safe(&p, end, allowed_proto_cnt, bad);
2077 if (allowed_proto_cnt > ARRAY_SIZE(allowed_protos)) {
2078 pr_err("allowed_protos too big %d\n", allowed_proto_cnt);
2081 for (i = 0; i < allowed_proto_cnt; i++) {
2082 ceph_decode_32_safe(&p, end, allowed_protos[i], bad);
2083 dout("%s con %p allowed_protos[%d] %d\n", __func__, con,
2084 i, allowed_protos[i]);
2087 ceph_decode_32_safe(&p, end, allowed_mode_cnt, bad);
2088 if (allowed_mode_cnt > ARRAY_SIZE(allowed_modes)) {
2089 pr_err("allowed_modes too big %d\n", allowed_mode_cnt);
2092 for (i = 0; i < allowed_mode_cnt; i++) {
2093 ceph_decode_32_safe(&p, end, allowed_modes[i], bad);
2094 dout("%s con %p allowed_modes[%d] %d\n", __func__, con,
2095 i, allowed_modes[i]);
2098 mutex_unlock(&con->mutex);
2099 ret = con->ops->handle_auth_bad_method(con, used_proto, result,
2104 mutex_lock(&con->mutex);
2105 if (con->state != CEPH_CON_S_V2_AUTH) {
2106 dout("%s con %p state changed to %d\n", __func__, con,
2111 dout("%s con %p handle_auth_bad_method ret %d\n", __func__, con, ret);
2115 pr_err("failed to decode auth_bad_method\n");
2119 static int process_auth_reply_more(struct ceph_connection *con,
2125 if (con->state != CEPH_CON_S_V2_AUTH) {
2126 con->error_msg = "protocol error, unexpected auth_reply_more";
2130 ceph_decode_32_safe(&p, end, payload_len, bad);
2131 ceph_decode_need(&p, end, payload_len, bad);
2133 dout("%s con %p payload_len %d\n", __func__, con, payload_len);
2135 reset_out_kvecs(con);
2136 ret = prepare_auth_request_more(con, p, payload_len);
2139 pr_err("prepare_auth_request_more failed: %d\n", ret);
2146 pr_err("failed to decode auth_reply_more\n");
2151 * Align session_key and con_secret to avoid GFP_ATOMIC allocation
2152 * inside crypto_shash_setkey() and crypto_aead_setkey() called from
2153 * setup_crypto(). __aligned(16) isn't guaranteed to work for stack
2154 * objects, so do it by hand.
2156 static int process_auth_done(struct ceph_connection *con, void *p, void *end)
2158 u8 session_key_buf[CEPH_KEY_LEN + 16];
2159 u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
2160 u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
2161 u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
2162 int session_key_len, con_secret_len;
2167 if (con->state != CEPH_CON_S_V2_AUTH) {
2168 con->error_msg = "protocol error, unexpected auth_done";
2172 ceph_decode_64_safe(&p, end, global_id, bad);
2173 ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
2174 ceph_decode_32_safe(&p, end, payload_len, bad);
2176 dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
2177 __func__, con, global_id, con->v2.con_mode, payload_len);
2179 mutex_unlock(&con->mutex);
2180 session_key_len = 0;
2182 ret = con->ops->handle_auth_done(con, global_id, p, payload_len,
2183 session_key, &session_key_len,
2184 con_secret, &con_secret_len);
2185 mutex_lock(&con->mutex);
2186 if (con->state != CEPH_CON_S_V2_AUTH) {
2187 dout("%s con %p state changed to %d\n", __func__, con,
2193 dout("%s con %p handle_auth_done ret %d\n", __func__, con, ret);
2197 ret = setup_crypto(con, session_key, session_key_len, con_secret,
2202 reset_out_kvecs(con);
2203 ret = prepare_auth_signature(con);
2205 pr_err("prepare_auth_signature failed: %d\n", ret);
2209 con->state = CEPH_CON_S_V2_AUTH_SIGNATURE;
2212 memzero_explicit(session_key_buf, sizeof(session_key_buf));
2213 memzero_explicit(con_secret_buf, sizeof(con_secret_buf));
2217 pr_err("failed to decode auth_done\n");
2221 static int process_auth_signature(struct ceph_connection *con,
2224 u8 hmac[SHA256_DIGEST_SIZE];
2227 if (con->state != CEPH_CON_S_V2_AUTH_SIGNATURE) {
2228 con->error_msg = "protocol error, unexpected auth_signature";
2232 ret = hmac_sha256(con, con->v2.out_sign_kvecs,
2233 con->v2.out_sign_kvec_cnt, hmac);
2237 ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
2238 if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {
2239 con->error_msg = "integrity error, bad auth signature";
2243 dout("%s con %p auth signature ok\n", __func__, con);
2245 /* no reset_out_kvecs() as our auth_signature may still be pending */
2246 if (!con->v2.server_cookie) {
2247 ret = prepare_client_ident(con);
2249 pr_err("prepare_client_ident failed: %d\n", ret);
2253 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2255 ret = prepare_session_reconnect(con);
2257 pr_err("prepare_session_reconnect failed: %d\n", ret);
2261 con->state = CEPH_CON_S_V2_SESSION_RECONNECT;
2267 pr_err("failed to decode auth_signature\n");
2271 static int process_server_ident(struct ceph_connection *con,
2274 struct ceph_client *client = from_msgr(con->msgr);
2275 u64 features, required_features;
2276 struct ceph_entity_addr addr;
2283 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2284 con->error_msg = "protocol error, unexpected server_ident";
2288 ret = ceph_decode_entity_addrvec(&p, end, true, &addr);
2290 pr_err("failed to decode server addrs: %d\n", ret);
2294 ceph_decode_64_safe(&p, end, global_id, bad);
2295 ceph_decode_64_safe(&p, end, global_seq, bad);
2296 ceph_decode_64_safe(&p, end, features, bad);
2297 ceph_decode_64_safe(&p, end, required_features, bad);
2298 ceph_decode_64_safe(&p, end, flags, bad);
2299 ceph_decode_64_safe(&p, end, cookie, bad);
2301 dout("%s con %p addr %s/%u global_id %llu global_seq %llu features 0x%llx required_features 0x%llx flags 0x%llx cookie 0x%llx\n",
2302 __func__, con, ceph_pr_addr(&addr), le32_to_cpu(addr.nonce),
2303 global_id, global_seq, features, required_features, flags, cookie);
2305 /* is this who we intended to talk to? */
2306 if (memcmp(&addr, &con->peer_addr, sizeof(con->peer_addr))) {
2307 pr_err("bad peer addr/nonce, want %s/%u, got %s/%u\n",
2308 ceph_pr_addr(&con->peer_addr),
2309 le32_to_cpu(con->peer_addr.nonce),
2310 ceph_pr_addr(&addr), le32_to_cpu(addr.nonce));
2311 con->error_msg = "wrong peer at address";
2315 if (client->required_features & ~features) {
2316 pr_err("RADOS feature set mismatch: my required > server's supported 0x%llx, need 0x%llx\n",
2317 features, client->required_features & ~features);
2318 con->error_msg = "missing required protocol features";
2323 * Both name->type and name->num are set in ceph_con_open() but
2324 * name->num may be bogus in the initial monmap. name->type is
2325 * verified in handle_hello().
2327 WARN_ON(!con->peer_name.type);
2328 con->peer_name.num = cpu_to_le64(global_id);
2329 con->v2.peer_global_seq = global_seq;
2330 con->peer_features = features;
2331 WARN_ON(required_features & ~client->supported_features);
2332 con->v2.server_cookie = cookie;
2334 if (flags & CEPH_MSG_CONNECT_LOSSY) {
2335 ceph_con_flag_set(con, CEPH_CON_F_LOSSYTX);
2336 WARN_ON(con->v2.server_cookie);
2338 WARN_ON(!con->v2.server_cookie);
2341 clear_in_sign_kvecs(con);
2342 clear_out_sign_kvecs(con);
2343 free_conn_bufs(con);
2344 con->delay = 0; /* reset backoff memory */
2346 con->state = CEPH_CON_S_OPEN;
2347 con->v2.out_state = OUT_S_GET_NEXT;
2351 pr_err("failed to decode server_ident\n");
2355 static int process_ident_missing_features(struct ceph_connection *con,
2358 struct ceph_client *client = from_msgr(con->msgr);
2359 u64 missing_features;
2361 if (con->state != CEPH_CON_S_V2_SESSION_CONNECT) {
2362 con->error_msg = "protocol error, unexpected ident_missing_features";
2366 ceph_decode_64_safe(&p, end, missing_features, bad);
2367 pr_err("RADOS feature set mismatch: server's required > my supported 0x%llx, missing 0x%llx\n",
2368 client->supported_features, missing_features);
2369 con->error_msg = "missing required protocol features";
2373 pr_err("failed to decode ident_missing_features\n");
2377 static int process_session_reconnect_ok(struct ceph_connection *con,
2382 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2383 con->error_msg = "protocol error, unexpected session_reconnect_ok";
2387 ceph_decode_64_safe(&p, end, seq, bad);
2389 dout("%s con %p seq %llu\n", __func__, con, seq);
2390 ceph_con_discard_requeued(con, seq);
2392 clear_in_sign_kvecs(con);
2393 clear_out_sign_kvecs(con);
2394 free_conn_bufs(con);
2395 con->delay = 0; /* reset backoff memory */
2397 con->state = CEPH_CON_S_OPEN;
2398 con->v2.out_state = OUT_S_GET_NEXT;
2402 pr_err("failed to decode session_reconnect_ok\n");
2406 static int process_session_retry(struct ceph_connection *con,
2412 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2413 con->error_msg = "protocol error, unexpected session_retry";
2417 ceph_decode_64_safe(&p, end, connect_seq, bad);
2419 dout("%s con %p connect_seq %llu\n", __func__, con, connect_seq);
2420 WARN_ON(connect_seq <= con->v2.connect_seq);
2421 con->v2.connect_seq = connect_seq + 1;
2423 free_conn_bufs(con);
2425 reset_out_kvecs(con);
2426 ret = prepare_session_reconnect(con);
2428 pr_err("prepare_session_reconnect (cseq) failed: %d\n", ret);
2435 pr_err("failed to decode session_retry\n");
2439 static int process_session_retry_global(struct ceph_connection *con,
2445 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2446 con->error_msg = "protocol error, unexpected session_retry_global";
2450 ceph_decode_64_safe(&p, end, global_seq, bad);
2452 dout("%s con %p global_seq %llu\n", __func__, con, global_seq);
2453 WARN_ON(global_seq <= con->v2.global_seq);
2454 con->v2.global_seq = ceph_get_global_seq(con->msgr, global_seq);
2456 free_conn_bufs(con);
2458 reset_out_kvecs(con);
2459 ret = prepare_session_reconnect(con);
2461 pr_err("prepare_session_reconnect (gseq) failed: %d\n", ret);
2468 pr_err("failed to decode session_retry_global\n");
2472 static int process_session_reset(struct ceph_connection *con,
2478 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2479 con->error_msg = "protocol error, unexpected session_reset";
2483 ceph_decode_8_safe(&p, end, full, bad);
2485 con->error_msg = "protocol error, bad session_reset";
2489 pr_info("%s%lld %s session reset\n", ENTITY_NAME(con->peer_name),
2490 ceph_pr_addr(&con->peer_addr));
2491 ceph_con_reset_session(con);
2493 mutex_unlock(&con->mutex);
2494 if (con->ops->peer_reset)
2495 con->ops->peer_reset(con);
2496 mutex_lock(&con->mutex);
2497 if (con->state != CEPH_CON_S_V2_SESSION_RECONNECT) {
2498 dout("%s con %p state changed to %d\n", __func__, con,
2503 free_conn_bufs(con);
2505 reset_out_kvecs(con);
2506 ret = prepare_client_ident(con);
2508 pr_err("prepare_client_ident (rst) failed: %d\n", ret);
2512 con->state = CEPH_CON_S_V2_SESSION_CONNECT;
2516 pr_err("failed to decode session_reset\n");
2520 static int process_keepalive2_ack(struct ceph_connection *con,
2523 if (con->state != CEPH_CON_S_OPEN) {
2524 con->error_msg = "protocol error, unexpected keepalive2_ack";
2528 ceph_decode_need(&p, end, sizeof(struct ceph_timespec), bad);
2529 ceph_decode_timespec64(&con->last_keepalive_ack, p);
2531 dout("%s con %p timestamp %lld.%09ld\n", __func__, con,
2532 con->last_keepalive_ack.tv_sec, con->last_keepalive_ack.tv_nsec);
2537 pr_err("failed to decode keepalive2_ack\n");
2541 static int process_ack(struct ceph_connection *con, void *p, void *end)
2545 if (con->state != CEPH_CON_S_OPEN) {
2546 con->error_msg = "protocol error, unexpected ack";
2550 ceph_decode_64_safe(&p, end, seq, bad);
2552 dout("%s con %p seq %llu\n", __func__, con, seq);
2553 ceph_con_discard_sent(con, seq);
2557 pr_err("failed to decode ack\n");
2561 static int process_control(struct ceph_connection *con, void *p, void *end)
2563 int tag = con->v2.in_desc.fd_tag;
2566 dout("%s con %p tag %d len %d\n", __func__, con, tag, (int)(end - p));
2569 case FRAME_TAG_HELLO:
2570 ret = process_hello(con, p, end);
2572 case FRAME_TAG_AUTH_BAD_METHOD:
2573 ret = process_auth_bad_method(con, p, end);
2575 case FRAME_TAG_AUTH_REPLY_MORE:
2576 ret = process_auth_reply_more(con, p, end);
2578 case FRAME_TAG_AUTH_DONE:
2579 ret = process_auth_done(con, p, end);
2581 case FRAME_TAG_AUTH_SIGNATURE:
2582 ret = process_auth_signature(con, p, end);
2584 case FRAME_TAG_SERVER_IDENT:
2585 ret = process_server_ident(con, p, end);
2587 case FRAME_TAG_IDENT_MISSING_FEATURES:
2588 ret = process_ident_missing_features(con, p, end);
2590 case FRAME_TAG_SESSION_RECONNECT_OK:
2591 ret = process_session_reconnect_ok(con, p, end);
2593 case FRAME_TAG_SESSION_RETRY:
2594 ret = process_session_retry(con, p, end);
2596 case FRAME_TAG_SESSION_RETRY_GLOBAL:
2597 ret = process_session_retry_global(con, p, end);
2599 case FRAME_TAG_SESSION_RESET:
2600 ret = process_session_reset(con, p, end);
2602 case FRAME_TAG_KEEPALIVE2_ACK:
2603 ret = process_keepalive2_ack(con, p, end);
2606 ret = process_ack(con, p, end);
2609 pr_err("bad tag %d\n", tag);
2610 con->error_msg = "protocol error, bad tag";
2614 dout("%s con %p error %d\n", __func__, con, ret);
2618 prepare_read_preamble(con);
2624 * 1 - con->in_msg set, read message
2628 static int process_message_header(struct ceph_connection *con,
2631 struct ceph_frame_desc *desc = &con->v2.in_desc;
2632 struct ceph_msg_header2 *hdr2 = p;
2633 struct ceph_msg_header hdr;
2639 seq = le64_to_cpu(hdr2->seq);
2640 if ((s64)seq - (s64)con->in_seq < 1) {
2641 pr_info("%s%lld %s skipping old message: seq %llu, expected %llu\n",
2642 ENTITY_NAME(con->peer_name),
2643 ceph_pr_addr(&con->peer_addr),
2644 seq, con->in_seq + 1);
2647 if ((s64)seq - (s64)con->in_seq > 1) {
2648 pr_err("bad seq %llu, expected %llu\n", seq, con->in_seq + 1);
2649 con->error_msg = "bad message sequence # for incoming message";
2653 ceph_con_discard_sent(con, le64_to_cpu(hdr2->ack_seq));
2655 fill_header(&hdr, hdr2, desc->fd_lens[1], desc->fd_lens[2],
2656 desc->fd_lens[3], &con->peer_name);
2657 ret = ceph_con_in_msg_alloc(con, &hdr, &skip);
2661 WARN_ON(!con->in_msg ^ skip);
2665 WARN_ON(!con->in_msg);
2666 WARN_ON(con->in_msg->con != con);
2670 static int process_message(struct ceph_connection *con)
2672 ceph_con_process_message(con);
2675 * We could have been closed by ceph_con_close() because
2676 * ceph_con_process_message() temporarily drops con->mutex.
2678 if (con->state != CEPH_CON_S_OPEN) {
2679 dout("%s con %p state changed to %d\n", __func__, con,
2684 prepare_read_preamble(con);
2688 static int __handle_control(struct ceph_connection *con, void *p)
2690 void *end = p + con->v2.in_desc.fd_lens[0];
2691 struct ceph_msg *msg;
2694 if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
2695 return process_control(con, p, end);
2697 ret = process_message_header(con, p, end);
2701 prepare_skip_message(con);
2705 msg = con->in_msg; /* set in process_message_header() */
2706 if (front_len(msg)) {
2707 WARN_ON(front_len(msg) > msg->front_alloc_len);
2708 msg->front.iov_len = front_len(msg);
2710 msg->front.iov_len = 0;
2712 if (middle_len(msg)) {
2713 WARN_ON(middle_len(msg) > msg->middle->alloc_len);
2714 msg->middle->vec.iov_len = middle_len(msg);
2715 } else if (msg->middle) {
2716 msg->middle->vec.iov_len = 0;
2719 if (!front_len(msg) && !middle_len(msg) && !data_len(msg))
2720 return process_message(con);
2722 if (con_secure(con))
2723 return prepare_read_tail_secure(con);
2725 return prepare_read_tail_plain(con);
2728 static int handle_preamble(struct ceph_connection *con)
2730 struct ceph_frame_desc *desc = &con->v2.in_desc;
2733 if (con_secure(con)) {
2734 ret = decrypt_preamble(con);
2736 if (ret == -EBADMSG)
2737 con->error_msg = "integrity error, bad preamble auth tag";
2742 ret = decode_preamble(con->v2.in_buf, desc);
2744 if (ret == -EBADMSG)
2745 con->error_msg = "integrity error, bad crc";
2747 con->error_msg = "protocol error, bad preamble";
2751 dout("%s con %p tag %d seg_cnt %d %d+%d+%d+%d\n", __func__,
2752 con, desc->fd_tag, desc->fd_seg_cnt, desc->fd_lens[0],
2753 desc->fd_lens[1], desc->fd_lens[2], desc->fd_lens[3]);
2755 if (!con_secure(con))
2756 return prepare_read_control(con);
2758 if (desc->fd_lens[0] > CEPH_PREAMBLE_INLINE_LEN)
2759 return prepare_read_control_remainder(con);
2761 return __handle_control(con, CTRL_BODY(con->v2.in_buf));
2764 static int handle_control(struct ceph_connection *con)
2766 int ctrl_len = con->v2.in_desc.fd_lens[0];
2770 WARN_ON(con_secure(con));
2772 ret = verify_control_crc(con);
2774 con->error_msg = "integrity error, bad crc";
2778 if (con->state == CEPH_CON_S_V2_AUTH) {
2779 buf = alloc_conn_buf(con, ctrl_len);
2783 memcpy(buf, con->v2.in_kvecs[0].iov_base, ctrl_len);
2784 return __handle_control(con, buf);
2787 return __handle_control(con, con->v2.in_kvecs[0].iov_base);
2790 static int handle_control_remainder(struct ceph_connection *con)
2794 WARN_ON(!con_secure(con));
2796 ret = decrypt_control_remainder(con);
2798 if (ret == -EBADMSG)
2799 con->error_msg = "integrity error, bad control remainder auth tag";
2803 return __handle_control(con, con->v2.in_kvecs[0].iov_base -
2804 CEPH_PREAMBLE_INLINE_LEN);
2807 static int handle_epilogue(struct ceph_connection *con)
2809 u32 front_crc, middle_crc, data_crc;
2812 if (con_secure(con)) {
2813 ret = decrypt_tail(con);
2815 if (ret == -EBADMSG)
2816 con->error_msg = "integrity error, bad epilogue auth tag";
2820 /* just late_status */
2821 ret = decode_epilogue(con->v2.in_buf, NULL, NULL, NULL);
2823 con->error_msg = "protocol error, bad epilogue";
2827 ret = decode_epilogue(con->v2.in_buf, &front_crc,
2828 &middle_crc, &data_crc);
2830 con->error_msg = "protocol error, bad epilogue";
2834 ret = verify_epilogue_crcs(con, front_crc, middle_crc,
2837 con->error_msg = "integrity error, bad crc";
2842 return process_message(con);
2845 static void finish_skip(struct ceph_connection *con)
2847 dout("%s con %p\n", __func__, con);
2849 if (con_secure(con))
2850 gcm_inc_nonce(&con->v2.in_gcm_nonce);
2855 static int populate_in_iter(struct ceph_connection *con)
2859 dout("%s con %p state %d in_state %d\n", __func__, con, con->state,
2861 WARN_ON(iov_iter_count(&con->v2.in_iter));
2863 if (con->state == CEPH_CON_S_V2_BANNER_PREFIX) {
2864 ret = process_banner_prefix(con);
2865 } else if (con->state == CEPH_CON_S_V2_BANNER_PAYLOAD) {
2866 ret = process_banner_payload(con);
2867 } else if ((con->state >= CEPH_CON_S_V2_HELLO &&
2868 con->state <= CEPH_CON_S_V2_SESSION_RECONNECT) ||
2869 con->state == CEPH_CON_S_OPEN) {
2870 switch (con->v2.in_state) {
2871 case IN_S_HANDLE_PREAMBLE:
2872 ret = handle_preamble(con);
2874 case IN_S_HANDLE_CONTROL:
2875 ret = handle_control(con);
2877 case IN_S_HANDLE_CONTROL_REMAINDER:
2878 ret = handle_control_remainder(con);
2880 case IN_S_PREPARE_READ_DATA:
2881 ret = prepare_read_data(con);
2883 case IN_S_PREPARE_READ_DATA_CONT:
2884 prepare_read_data_cont(con);
2887 case IN_S_PREPARE_READ_ENC_PAGE:
2888 prepare_read_enc_page(con);
2891 case IN_S_HANDLE_EPILOGUE:
2892 ret = handle_epilogue(con);
2894 case IN_S_FINISH_SKIP:
2899 WARN(1, "bad in_state %d", con->v2.in_state);
2903 WARN(1, "bad state %d", con->state);
2907 dout("%s con %p error %d\n", __func__, con, ret);
2911 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2913 dout("%s con %p populated %zu\n", __func__, con,
2914 iov_iter_count(&con->v2.in_iter));
2918 int ceph_con_v2_try_read(struct ceph_connection *con)
2922 dout("%s con %p state %d need %zu\n", __func__, con, con->state,
2923 iov_iter_count(&con->v2.in_iter));
2925 if (con->state == CEPH_CON_S_PREOPEN)
2929 * We should always have something pending here. If not,
2930 * avoid calling populate_in_iter() as if we read something
2931 * (ceph_tcp_recv() would immediately return 1).
2933 if (WARN_ON(!iov_iter_count(&con->v2.in_iter)))
2937 ret = ceph_tcp_recv(con);
2941 ret = populate_in_iter(con);
2943 if (ret && ret != -EAGAIN && !con->error_msg)
2944 con->error_msg = "read processing error";
2950 static void queue_data(struct ceph_connection *con)
2954 con->v2.out_epil.data_crc = -1;
2955 ceph_msg_data_cursor_init(&con->v2.out_cursor, con->out_msg,
2956 data_len(con->out_msg));
2958 get_bvec_at(&con->v2.out_cursor, &bv);
2959 set_out_bvec(con, &bv, true);
2960 con->v2.out_state = OUT_S_QUEUE_DATA_CONT;
2963 static void queue_data_cont(struct ceph_connection *con)
2967 con->v2.out_epil.data_crc = ceph_crc32c_page(
2968 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
2969 con->v2.out_bvec.bv_offset, con->v2.out_bvec.bv_len);
2971 ceph_msg_data_advance(&con->v2.out_cursor, con->v2.out_bvec.bv_len);
2972 if (con->v2.out_cursor.total_resid) {
2973 get_bvec_at(&con->v2.out_cursor, &bv);
2974 set_out_bvec(con, &bv, true);
2975 WARN_ON(con->v2.out_state != OUT_S_QUEUE_DATA_CONT);
2980 * We've written all data. Queue epilogue. Once it's written,
2983 reset_out_kvecs(con);
2984 prepare_epilogue_plain(con, false);
2985 con->v2.out_state = OUT_S_FINISH_MESSAGE;
2988 static void queue_enc_page(struct ceph_connection *con)
2992 dout("%s con %p i %d resid %d\n", __func__, con, con->v2.out_enc_i,
2993 con->v2.out_enc_resid);
2994 WARN_ON(!con->v2.out_enc_resid);
2996 bvec_set_page(&bv, con->v2.out_enc_pages[con->v2.out_enc_i],
2997 min(con->v2.out_enc_resid, (int)PAGE_SIZE), 0);
2999 set_out_bvec(con, &bv, false);
3000 con->v2.out_enc_i++;
3001 con->v2.out_enc_resid -= bv.bv_len;
3003 if (con->v2.out_enc_resid) {
3004 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE);
3009 * We've queued the last piece of ciphertext (ending with
3010 * epilogue) + auth tag. Once it's written, we are done.
3012 WARN_ON(con->v2.out_enc_i != con->v2.out_enc_page_cnt);
3013 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3016 static void queue_zeros(struct ceph_connection *con)
3018 dout("%s con %p out_zero %d\n", __func__, con, con->v2.out_zero);
3020 if (con->v2.out_zero) {
3021 set_out_bvec_zero(con);
3022 con->v2.out_zero -= con->v2.out_bvec.bv_len;
3023 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3028 * We've zero-filled everything up to epilogue. Queue epilogue
3029 * with late_status set to ABORTED and crcs adjusted for zeros.
3030 * Once it's written, we are done patching up for the revoke.
3032 reset_out_kvecs(con);
3033 prepare_epilogue_plain(con, true);
3034 con->v2.out_state = OUT_S_FINISH_MESSAGE;
3037 static void finish_message(struct ceph_connection *con)
3039 dout("%s con %p msg %p\n", __func__, con, con->out_msg);
3041 /* we end up here both plain and secure modes */
3042 if (con->v2.out_enc_pages) {
3043 WARN_ON(!con->v2.out_enc_page_cnt);
3044 ceph_release_page_vector(con->v2.out_enc_pages,
3045 con->v2.out_enc_page_cnt);
3046 con->v2.out_enc_pages = NULL;
3047 con->v2.out_enc_page_cnt = 0;
3049 /* message may have been revoked */
3051 ceph_msg_put(con->out_msg);
3052 con->out_msg = NULL;
3055 con->v2.out_state = OUT_S_GET_NEXT;
3058 static int populate_out_iter(struct ceph_connection *con)
3062 dout("%s con %p state %d out_state %d\n", __func__, con, con->state,
3064 WARN_ON(iov_iter_count(&con->v2.out_iter));
3066 if (con->state != CEPH_CON_S_OPEN) {
3067 WARN_ON(con->state < CEPH_CON_S_V2_BANNER_PREFIX ||
3068 con->state > CEPH_CON_S_V2_SESSION_RECONNECT);
3069 goto nothing_pending;
3072 switch (con->v2.out_state) {
3073 case OUT_S_QUEUE_DATA:
3074 WARN_ON(!con->out_msg);
3077 case OUT_S_QUEUE_DATA_CONT:
3078 WARN_ON(!con->out_msg);
3079 queue_data_cont(con);
3081 case OUT_S_QUEUE_ENC_PAGE:
3082 queue_enc_page(con);
3084 case OUT_S_QUEUE_ZEROS:
3085 WARN_ON(con->out_msg); /* revoked */
3088 case OUT_S_FINISH_MESSAGE:
3089 finish_message(con);
3091 case OUT_S_GET_NEXT:
3094 WARN(1, "bad out_state %d", con->v2.out_state);
3098 WARN_ON(con->v2.out_state != OUT_S_GET_NEXT);
3099 if (ceph_con_flag_test_and_clear(con, CEPH_CON_F_KEEPALIVE_PENDING)) {
3100 ret = prepare_keepalive2(con);
3102 pr_err("prepare_keepalive2 failed: %d\n", ret);
3105 } else if (!list_empty(&con->out_queue)) {
3106 ceph_con_get_out_msg(con);
3107 ret = prepare_message(con);
3109 pr_err("prepare_message failed: %d\n", ret);
3112 } else if (con->in_seq > con->in_seq_acked) {
3113 ret = prepare_ack(con);
3115 pr_err("prepare_ack failed: %d\n", ret);
3119 goto nothing_pending;
3123 if (WARN_ON(!iov_iter_count(&con->v2.out_iter)))
3125 dout("%s con %p populated %zu\n", __func__, con,
3126 iov_iter_count(&con->v2.out_iter));
3130 WARN_ON(iov_iter_count(&con->v2.out_iter));
3131 dout("%s con %p nothing pending\n", __func__, con);
3132 ceph_con_flag_clear(con, CEPH_CON_F_WRITE_PENDING);
3136 int ceph_con_v2_try_write(struct ceph_connection *con)
3140 dout("%s con %p state %d have %zu\n", __func__, con, con->state,
3141 iov_iter_count(&con->v2.out_iter));
3143 /* open the socket first? */
3144 if (con->state == CEPH_CON_S_PREOPEN) {
3145 WARN_ON(con->peer_addr.type != CEPH_ENTITY_ADDR_TYPE_MSGR2);
3148 * Always bump global_seq. Bump connect_seq only if
3149 * there is a session (i.e. we are reconnecting and will
3150 * send session_reconnect instead of client_ident).
3152 con->v2.global_seq = ceph_get_global_seq(con->msgr, 0);
3153 if (con->v2.server_cookie)
3154 con->v2.connect_seq++;
3156 ret = prepare_read_banner_prefix(con);
3158 pr_err("prepare_read_banner_prefix failed: %d\n", ret);
3159 con->error_msg = "connect error";
3163 reset_out_kvecs(con);
3164 ret = prepare_banner(con);
3166 pr_err("prepare_banner failed: %d\n", ret);
3167 con->error_msg = "connect error";
3171 ret = ceph_tcp_connect(con);
3173 pr_err("ceph_tcp_connect failed: %d\n", ret);
3174 con->error_msg = "connect error";
3179 if (!iov_iter_count(&con->v2.out_iter)) {
3180 ret = populate_out_iter(con);
3182 if (ret && ret != -EAGAIN && !con->error_msg)
3183 con->error_msg = "write processing error";
3188 tcp_sock_set_cork(con->sock->sk, true);
3190 ret = ceph_tcp_send(con);
3194 ret = populate_out_iter(con);
3196 if (ret && ret != -EAGAIN && !con->error_msg)
3197 con->error_msg = "write processing error";
3202 tcp_sock_set_cork(con->sock->sk, false);
3206 static u32 crc32c_zeros(u32 crc, int zero_len)
3211 len = min(zero_len, (int)PAGE_SIZE);
3212 crc = crc32c(crc, page_address(ceph_zero_page), len);
3219 static void prepare_zero_front(struct ceph_connection *con, int resid)
3223 WARN_ON(!resid || resid > front_len(con->out_msg));
3224 sent = front_len(con->out_msg) - resid;
3225 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3228 con->v2.out_epil.front_crc =
3229 crc32c(-1, con->out_msg->front.iov_base, sent);
3230 con->v2.out_epil.front_crc =
3231 crc32c_zeros(con->v2.out_epil.front_crc, resid);
3233 con->v2.out_epil.front_crc = crc32c_zeros(-1, resid);
3236 con->v2.out_iter.count -= resid;
3237 out_zero_add(con, resid);
3240 static void prepare_zero_middle(struct ceph_connection *con, int resid)
3244 WARN_ON(!resid || resid > middle_len(con->out_msg));
3245 sent = middle_len(con->out_msg) - resid;
3246 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3249 con->v2.out_epil.middle_crc =
3250 crc32c(-1, con->out_msg->middle->vec.iov_base, sent);
3251 con->v2.out_epil.middle_crc =
3252 crc32c_zeros(con->v2.out_epil.middle_crc, resid);
3254 con->v2.out_epil.middle_crc = crc32c_zeros(-1, resid);
3257 con->v2.out_iter.count -= resid;
3258 out_zero_add(con, resid);
3261 static void prepare_zero_data(struct ceph_connection *con)
3263 dout("%s con %p\n", __func__, con);
3264 con->v2.out_epil.data_crc = crc32c_zeros(-1, data_len(con->out_msg));
3265 out_zero_add(con, data_len(con->out_msg));
3268 static void revoke_at_queue_data(struct ceph_connection *con)
3273 WARN_ON(!data_len(con->out_msg));
3274 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3275 resid = iov_iter_count(&con->v2.out_iter);
3277 boundary = front_len(con->out_msg) + middle_len(con->out_msg);
3278 if (resid > boundary) {
3280 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3281 dout("%s con %p was sending head\n", __func__, con);
3282 if (front_len(con->out_msg))
3283 prepare_zero_front(con, front_len(con->out_msg));
3284 if (middle_len(con->out_msg))
3285 prepare_zero_middle(con, middle_len(con->out_msg));
3286 prepare_zero_data(con);
3287 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3288 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3292 boundary = middle_len(con->out_msg);
3293 if (resid > boundary) {
3295 dout("%s con %p was sending front\n", __func__, con);
3296 prepare_zero_front(con, resid);
3297 if (middle_len(con->out_msg))
3298 prepare_zero_middle(con, middle_len(con->out_msg));
3299 prepare_zero_data(con);
3305 dout("%s con %p was sending middle\n", __func__, con);
3306 prepare_zero_middle(con, resid);
3307 prepare_zero_data(con);
3311 static void revoke_at_queue_data_cont(struct ceph_connection *con)
3313 int sent, resid; /* current piece of data */
3315 WARN_ON(!data_len(con->out_msg));
3316 WARN_ON(!iov_iter_is_bvec(&con->v2.out_iter));
3317 resid = iov_iter_count(&con->v2.out_iter);
3318 WARN_ON(!resid || resid > con->v2.out_bvec.bv_len);
3319 sent = con->v2.out_bvec.bv_len - resid;
3320 dout("%s con %p sent %d resid %d\n", __func__, con, sent, resid);
3323 con->v2.out_epil.data_crc = ceph_crc32c_page(
3324 con->v2.out_epil.data_crc, con->v2.out_bvec.bv_page,
3325 con->v2.out_bvec.bv_offset, sent);
3326 ceph_msg_data_advance(&con->v2.out_cursor, sent);
3328 WARN_ON(resid > con->v2.out_cursor.total_resid);
3329 con->v2.out_epil.data_crc = crc32c_zeros(con->v2.out_epil.data_crc,
3330 con->v2.out_cursor.total_resid);
3332 con->v2.out_iter.count -= resid;
3333 out_zero_add(con, con->v2.out_cursor.total_resid);
3337 static void revoke_at_finish_message(struct ceph_connection *con)
3342 WARN_ON(!iov_iter_is_kvec(&con->v2.out_iter));
3343 resid = iov_iter_count(&con->v2.out_iter);
3345 if (!front_len(con->out_msg) && !middle_len(con->out_msg) &&
3346 !data_len(con->out_msg)) {
3347 WARN_ON(!resid || resid > MESSAGE_HEAD_PLAIN_LEN);
3348 dout("%s con %p was sending head (empty message) - noop\n",
3353 boundary = front_len(con->out_msg) + middle_len(con->out_msg) +
3354 CEPH_EPILOGUE_PLAIN_LEN;
3355 if (resid > boundary) {
3357 WARN_ON(resid > MESSAGE_HEAD_PLAIN_LEN);
3358 dout("%s con %p was sending head\n", __func__, con);
3359 if (front_len(con->out_msg))
3360 prepare_zero_front(con, front_len(con->out_msg));
3361 if (middle_len(con->out_msg))
3362 prepare_zero_middle(con, middle_len(con->out_msg));
3363 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3364 WARN_ON(iov_iter_count(&con->v2.out_iter) != resid);
3365 con->v2.out_state = OUT_S_QUEUE_ZEROS;
3369 boundary = middle_len(con->out_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3370 if (resid > boundary) {
3372 dout("%s con %p was sending front\n", __func__, con);
3373 prepare_zero_front(con, resid);
3374 if (middle_len(con->out_msg))
3375 prepare_zero_middle(con, middle_len(con->out_msg));
3376 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3381 boundary = CEPH_EPILOGUE_PLAIN_LEN;
3382 if (resid > boundary) {
3384 dout("%s con %p was sending middle\n", __func__, con);
3385 prepare_zero_middle(con, resid);
3386 con->v2.out_iter.count -= CEPH_EPILOGUE_PLAIN_LEN;
3392 dout("%s con %p was sending epilogue - noop\n", __func__, con);
3395 void ceph_con_v2_revoke(struct ceph_connection *con)
3397 WARN_ON(con->v2.out_zero);
3399 if (con_secure(con)) {
3400 WARN_ON(con->v2.out_state != OUT_S_QUEUE_ENC_PAGE &&
3401 con->v2.out_state != OUT_S_FINISH_MESSAGE);
3402 dout("%s con %p secure - noop\n", __func__, con);
3406 switch (con->v2.out_state) {
3407 case OUT_S_QUEUE_DATA:
3408 revoke_at_queue_data(con);
3410 case OUT_S_QUEUE_DATA_CONT:
3411 revoke_at_queue_data_cont(con);
3413 case OUT_S_FINISH_MESSAGE:
3414 revoke_at_finish_message(con);
3417 WARN(1, "bad out_state %d", con->v2.out_state);
3422 static void revoke_at_prepare_read_data(struct ceph_connection *con)
3427 WARN_ON(con_secure(con));
3428 WARN_ON(!data_len(con->in_msg));
3429 WARN_ON(!iov_iter_is_kvec(&con->v2.in_iter));
3430 resid = iov_iter_count(&con->v2.in_iter);
3433 remaining = data_len(con->in_msg) + CEPH_EPILOGUE_PLAIN_LEN;
3434 dout("%s con %p resid %d remaining %d\n", __func__, con, resid,
3436 con->v2.in_iter.count -= resid;
3437 set_in_skip(con, resid + remaining);
3438 con->v2.in_state = IN_S_FINISH_SKIP;
3441 static void revoke_at_prepare_read_data_cont(struct ceph_connection *con)
3443 int recved, resid; /* current piece of data */
3446 WARN_ON(con_secure(con));
3447 WARN_ON(!data_len(con->in_msg));
3448 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3449 resid = iov_iter_count(&con->v2.in_iter);
3450 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3451 recved = con->v2.in_bvec.bv_len - resid;
3452 dout("%s con %p recved %d resid %d\n", __func__, con, recved, resid);
3455 ceph_msg_data_advance(&con->v2.in_cursor, recved);
3456 WARN_ON(resid > con->v2.in_cursor.total_resid);
3458 remaining = CEPH_EPILOGUE_PLAIN_LEN;
3459 dout("%s con %p total_resid %zu remaining %d\n", __func__, con,
3460 con->v2.in_cursor.total_resid, remaining);
3461 con->v2.in_iter.count -= resid;
3462 set_in_skip(con, con->v2.in_cursor.total_resid + remaining);
3463 con->v2.in_state = IN_S_FINISH_SKIP;
3466 static void revoke_at_prepare_read_enc_page(struct ceph_connection *con)
3468 int resid; /* current enc page (not necessarily data) */
3470 WARN_ON(!con_secure(con));
3471 WARN_ON(!iov_iter_is_bvec(&con->v2.in_iter));
3472 resid = iov_iter_count(&con->v2.in_iter);
3473 WARN_ON(!resid || resid > con->v2.in_bvec.bv_len);
3475 dout("%s con %p resid %d enc_resid %d\n", __func__, con, resid,
3476 con->v2.in_enc_resid);
3477 con->v2.in_iter.count -= resid;
3478 set_in_skip(con, resid + con->v2.in_enc_resid);
3479 con->v2.in_state = IN_S_FINISH_SKIP;
3482 static void revoke_at_handle_epilogue(struct ceph_connection *con)
3486 resid = iov_iter_count(&con->v2.in_iter);
3489 dout("%s con %p resid %d\n", __func__, con, resid);
3490 con->v2.in_iter.count -= resid;
3491 set_in_skip(con, resid);
3492 con->v2.in_state = IN_S_FINISH_SKIP;
3495 void ceph_con_v2_revoke_incoming(struct ceph_connection *con)
3497 switch (con->v2.in_state) {
3498 case IN_S_PREPARE_READ_DATA:
3499 revoke_at_prepare_read_data(con);
3501 case IN_S_PREPARE_READ_DATA_CONT:
3502 revoke_at_prepare_read_data_cont(con);
3504 case IN_S_PREPARE_READ_ENC_PAGE:
3505 revoke_at_prepare_read_enc_page(con);
3507 case IN_S_HANDLE_EPILOGUE:
3508 revoke_at_handle_epilogue(con);
3511 WARN(1, "bad in_state %d", con->v2.in_state);
3516 bool ceph_con_v2_opened(struct ceph_connection *con)
3518 return con->v2.peer_global_seq;
3521 void ceph_con_v2_reset_session(struct ceph_connection *con)
3523 con->v2.client_cookie = 0;
3524 con->v2.server_cookie = 0;
3525 con->v2.global_seq = 0;
3526 con->v2.connect_seq = 0;
3527 con->v2.peer_global_seq = 0;
3530 void ceph_con_v2_reset_protocol(struct ceph_connection *con)
3532 iov_iter_truncate(&con->v2.in_iter, 0);
3533 iov_iter_truncate(&con->v2.out_iter, 0);
3534 con->v2.out_zero = 0;
3536 clear_in_sign_kvecs(con);
3537 clear_out_sign_kvecs(con);
3538 free_conn_bufs(con);
3540 if (con->v2.in_enc_pages) {
3541 WARN_ON(!con->v2.in_enc_page_cnt);
3542 ceph_release_page_vector(con->v2.in_enc_pages,
3543 con->v2.in_enc_page_cnt);
3544 con->v2.in_enc_pages = NULL;
3545 con->v2.in_enc_page_cnt = 0;
3547 if (con->v2.out_enc_pages) {
3548 WARN_ON(!con->v2.out_enc_page_cnt);
3549 ceph_release_page_vector(con->v2.out_enc_pages,
3550 con->v2.out_enc_page_cnt);
3551 con->v2.out_enc_pages = NULL;
3552 con->v2.out_enc_page_cnt = 0;
3555 con->v2.con_mode = CEPH_CON_MODE_UNKNOWN;
3556 memzero_explicit(&con->v2.in_gcm_nonce, CEPH_GCM_IV_LEN);
3557 memzero_explicit(&con->v2.out_gcm_nonce, CEPH_GCM_IV_LEN);
3559 if (con->v2.hmac_tfm) {
3560 crypto_free_shash(con->v2.hmac_tfm);
3561 con->v2.hmac_tfm = NULL;
3563 if (con->v2.gcm_req) {
3564 aead_request_free(con->v2.gcm_req);
3565 con->v2.gcm_req = NULL;
3567 if (con->v2.gcm_tfm) {
3568 crypto_free_aead(con->v2.gcm_tfm);
3569 con->v2.gcm_tfm = NULL;