netfilter: nft_set_pipapo: .walk does not deal with generations
[platform/kernel/linux-starfive.git] / net / smc / smc.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  *  Shared Memory Communications over RDMA (SMC-R) and RoCE
4  *
5  *  Definitions for the SMC module (socket related)
6  *
7  *  Copyright IBM Corp. 2016
8  *
9  *  Author(s):  Ursula Braun <ubraun@linux.vnet.ibm.com>
10  */
11 #ifndef __SMC_H
12 #define __SMC_H
13
14 #include <linux/socket.h>
15 #include <linux/types.h>
16 #include <linux/compiler.h> /* __aligned */
17 #include <net/genetlink.h>
18 #include <net/sock.h>
19
20 #include "smc_ib.h"
21
22 #define SMC_V1          1               /* SMC version V1 */
23 #define SMC_V2          2               /* SMC version V2 */
24 #define SMC_RELEASE     0
25
26 #define SMCPROTO_SMC            0       /* SMC protocol, IPv4 */
27 #define SMCPROTO_SMC6           1       /* SMC protocol, IPv6 */
28
29 #define SMC_MAX_ISM_DEVS        8       /* max # of proposed non-native ISM
30                                          * devices
31                                          */
32 #define SMC_AUTOCORKING_DEFAULT_SIZE    0x10000 /* 64K by default */
33
34 extern struct proto smc_proto;
35 extern struct proto smc_proto6;
36
37 #ifdef ATOMIC64_INIT
38 #define KERNEL_HAS_ATOMIC64
39 #endif
40
41 enum smc_state {                /* possible states of an SMC socket */
42         SMC_ACTIVE      = 1,
43         SMC_INIT        = 2,
44         SMC_CLOSED      = 7,
45         SMC_LISTEN      = 10,
46         /* normal close */
47         SMC_PEERCLOSEWAIT1      = 20,
48         SMC_PEERCLOSEWAIT2      = 21,
49         SMC_APPFINCLOSEWAIT     = 24,
50         SMC_APPCLOSEWAIT1       = 22,
51         SMC_APPCLOSEWAIT2       = 23,
52         SMC_PEERFINCLOSEWAIT    = 25,
53         /* abnormal close */
54         SMC_PEERABORTWAIT       = 26,
55         SMC_PROCESSABORT        = 27,
56 };
57
58 struct smc_link_group;
59
60 struct smc_wr_rx_hdr {  /* common prefix part of LLC and CDC to demultiplex */
61         union {
62                 u8 type;
63 #if defined(__BIG_ENDIAN_BITFIELD)
64                 struct {
65                         u8 llc_version:4,
66                            llc_type:4;
67                 };
68 #elif defined(__LITTLE_ENDIAN_BITFIELD)
69                 struct {
70                         u8 llc_type:4,
71                            llc_version:4;
72                 };
73 #endif
74         };
75 } __aligned(1);
76
77 struct smc_cdc_conn_state_flags {
78 #if defined(__BIG_ENDIAN_BITFIELD)
79         u8      peer_done_writing : 1;  /* Sending done indicator */
80         u8      peer_conn_closed : 1;   /* Peer connection closed indicator */
81         u8      peer_conn_abort : 1;    /* Abnormal close indicator */
82         u8      reserved : 5;
83 #elif defined(__LITTLE_ENDIAN_BITFIELD)
84         u8      reserved : 5;
85         u8      peer_conn_abort : 1;
86         u8      peer_conn_closed : 1;
87         u8      peer_done_writing : 1;
88 #endif
89 };
90
91 struct smc_cdc_producer_flags {
92 #if defined(__BIG_ENDIAN_BITFIELD)
93         u8      write_blocked : 1;      /* Writing Blocked, no rx buf space */
94         u8      urg_data_pending : 1;   /* Urgent Data Pending */
95         u8      urg_data_present : 1;   /* Urgent Data Present */
96         u8      cons_curs_upd_req : 1;  /* cursor update requested */
97         u8      failover_validation : 1;/* message replay due to failover */
98         u8      reserved : 3;
99 #elif defined(__LITTLE_ENDIAN_BITFIELD)
100         u8      reserved : 3;
101         u8      failover_validation : 1;
102         u8      cons_curs_upd_req : 1;
103         u8      urg_data_present : 1;
104         u8      urg_data_pending : 1;
105         u8      write_blocked : 1;
106 #endif
107 };
108
109 /* in host byte order */
110 union smc_host_cursor { /* SMC cursor - an offset in an RMBE */
111         struct {
112                 u16     reserved;
113                 u16     wrap;           /* window wrap sequence number */
114                 u32     count;          /* cursor (= offset) part */
115         };
116 #ifdef KERNEL_HAS_ATOMIC64
117         atomic64_t              acurs;  /* for atomic processing */
118 #else
119         u64                     acurs;  /* for atomic processing */
120 #endif
121 } __aligned(8);
122
123 /* in host byte order, except for flag bitfields in network byte order */
124 struct smc_host_cdc_msg {               /* Connection Data Control message */
125         struct smc_wr_rx_hdr            common; /* .type = 0xFE */
126         u8                              len;    /* length = 44 */
127         u16                             seqno;  /* connection seq # */
128         u32                             token;  /* alert_token */
129         union smc_host_cursor           prod;           /* producer cursor */
130         union smc_host_cursor           cons;           /* consumer cursor,
131                                                          * piggy backed "ack"
132                                                          */
133         struct smc_cdc_producer_flags   prod_flags;     /* conn. tx/rx status */
134         struct smc_cdc_conn_state_flags conn_state_flags; /* peer conn. status*/
135         u8                              reserved[18];
136 } __aligned(8);
137
138 enum smc_urg_state {
139         SMC_URG_VALID   = 1,                    /* data present */
140         SMC_URG_NOTYET  = 2,                    /* data pending */
141         SMC_URG_READ    = 3,                    /* data was already read */
142 };
143
144 struct smc_mark_woken {
145         bool woken;
146         void *key;
147         wait_queue_entry_t wait_entry;
148 };
149
150 struct smc_connection {
151         struct rb_node          alert_node;
152         struct smc_link_group   *lgr;           /* link group of connection */
153         struct smc_link         *lnk;           /* assigned SMC-R link */
154         u32                     alert_token_local; /* unique conn. id */
155         u8                      peer_rmbe_idx;  /* from tcp handshake */
156         int                     peer_rmbe_size; /* size of peer rx buffer */
157         atomic_t                peer_rmbe_space;/* remaining free bytes in peer
158                                                  * rmbe
159                                                  */
160         int                     rtoken_idx;     /* idx to peer RMB rkey/addr */
161
162         struct smc_buf_desc     *sndbuf_desc;   /* send buffer descriptor */
163         struct smc_buf_desc     *rmb_desc;      /* RMBE descriptor */
164         int                     rmbe_size_short;/* compressed notation */
165         int                     rmbe_update_limit;
166                                                 /* lower limit for consumer
167                                                  * cursor update
168                                                  */
169
170         struct smc_host_cdc_msg local_tx_ctrl;  /* host byte order staging
171                                                  * buffer for CDC msg send
172                                                  * .prod cf. TCP snd_nxt
173                                                  * .cons cf. TCP sends ack
174                                                  */
175         union smc_host_cursor   local_tx_ctrl_fin;
176                                                 /* prod crsr - confirmed by peer
177                                                  */
178         union smc_host_cursor   tx_curs_prep;   /* tx - prepared data
179                                                  * snd_max..wmem_alloc
180                                                  */
181         union smc_host_cursor   tx_curs_sent;   /* tx - sent data
182                                                  * snd_nxt ?
183                                                  */
184         union smc_host_cursor   tx_curs_fin;    /* tx - confirmed by peer
185                                                  * snd-wnd-begin ?
186                                                  */
187         atomic_t                sndbuf_space;   /* remaining space in sndbuf */
188         u16                     tx_cdc_seq;     /* sequence # for CDC send */
189         u16                     tx_cdc_seq_fin; /* sequence # - tx completed */
190         spinlock_t              send_lock;      /* protect wr_sends */
191         atomic_t                cdc_pend_tx_wr; /* number of pending tx CDC wqe
192                                                  * - inc when post wqe,
193                                                  * - dec on polled tx cqe
194                                                  */
195         wait_queue_head_t       cdc_pend_tx_wq; /* wakeup on no cdc_pend_tx_wr*/
196         atomic_t                tx_pushing;     /* nr_threads trying tx push */
197         struct delayed_work     tx_work;        /* retry of smc_cdc_msg_send */
198         u32                     tx_off;         /* base offset in peer rmb */
199
200         struct smc_host_cdc_msg local_rx_ctrl;  /* filled during event_handl.
201                                                  * .prod cf. TCP rcv_nxt
202                                                  * .cons cf. TCP snd_una
203                                                  */
204         union smc_host_cursor   rx_curs_confirmed; /* confirmed to peer
205                                                     * source of snd_una ?
206                                                     */
207         union smc_host_cursor   urg_curs;       /* points at urgent byte */
208         enum smc_urg_state      urg_state;
209         bool                    urg_tx_pend;    /* urgent data staged */
210         bool                    urg_rx_skip_pend;
211                                                 /* indicate urgent oob data
212                                                  * read, but previous regular
213                                                  * data still pending
214                                                  */
215         char                    urg_rx_byte;    /* urgent byte */
216         bool                    tx_in_release_sock;
217                                                 /* flush pending tx data in
218                                                  * sock release_cb()
219                                                  */
220         atomic_t                bytes_to_rcv;   /* arrived data,
221                                                  * not yet received
222                                                  */
223         atomic_t                splice_pending; /* number of spliced bytes
224                                                  * pending processing
225                                                  */
226 #ifndef KERNEL_HAS_ATOMIC64
227         spinlock_t              acurs_lock;     /* protect cursors */
228 #endif
229         struct work_struct      close_work;     /* peer sent some closing */
230         struct work_struct      abort_work;     /* abort the connection */
231         struct tasklet_struct   rx_tsklet;      /* Receiver tasklet for SMC-D */
232         u8                      rx_off;         /* receive offset:
233                                                  * 0 for SMC-R, 32 for SMC-D
234                                                  */
235         u64                     peer_token;     /* SMC-D token of peer */
236         u8                      killed : 1;     /* abnormal termination */
237         u8                      freed : 1;      /* normal termiation */
238         u8                      out_of_sync : 1; /* out of sync with peer */
239 };
240
241 struct smc_sock {                               /* smc sock container */
242         struct sock             sk;
243         struct socket           *clcsock;       /* internal tcp socket */
244         void                    (*clcsk_state_change)(struct sock *sk);
245                                                 /* original stat_change fct. */
246         void                    (*clcsk_data_ready)(struct sock *sk);
247                                                 /* original data_ready fct. */
248         void                    (*clcsk_write_space)(struct sock *sk);
249                                                 /* original write_space fct. */
250         void                    (*clcsk_error_report)(struct sock *sk);
251                                                 /* original error_report fct. */
252         struct smc_connection   conn;           /* smc connection */
253         struct smc_sock         *listen_smc;    /* listen parent */
254         struct work_struct      connect_work;   /* handle non-blocking connect*/
255         struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
256         struct work_struct      smc_listen_work;/* prepare new accept socket */
257         struct list_head        accept_q;       /* sockets to be accepted */
258         spinlock_t              accept_q_lock;  /* protects accept_q */
259         bool                    limit_smc_hs;   /* put constraint on handshake */
260         bool                    use_fallback;   /* fallback to tcp */
261         int                     fallback_rsn;   /* reason for fallback */
262         u32                     peer_diagnosis; /* decline reason from peer */
263         atomic_t                queued_smc_hs;  /* queued smc handshakes */
264         struct inet_connection_sock_af_ops              af_ops;
265         const struct inet_connection_sock_af_ops        *ori_af_ops;
266                                                 /* original af ops */
267         int                     sockopt_defer_accept;
268                                                 /* sockopt TCP_DEFER_ACCEPT
269                                                  * value
270                                                  */
271         u8                      wait_close_tx_prepared : 1;
272                                                 /* shutdown wr or close
273                                                  * started, waiting for unsent
274                                                  * data to be sent
275                                                  */
276         u8                      connect_nonblock : 1;
277                                                 /* non-blocking connect in
278                                                  * flight
279                                                  */
280         struct mutex            clcsock_release_lock;
281                                                 /* protects clcsock of a listen
282                                                  * socket
283                                                  * */
284 };
285
286 static inline struct smc_sock *smc_sk(const struct sock *sk)
287 {
288         return (struct smc_sock *)sk;
289 }
290
291 static inline void smc_init_saved_callbacks(struct smc_sock *smc)
292 {
293         smc->clcsk_state_change = NULL;
294         smc->clcsk_data_ready   = NULL;
295         smc->clcsk_write_space  = NULL;
296         smc->clcsk_error_report = NULL;
297 }
298
299 static inline struct smc_sock *smc_clcsock_user_data(const struct sock *clcsk)
300 {
301         return (struct smc_sock *)
302                ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY);
303 }
304
305 /* save target_cb in saved_cb, and replace target_cb with new_cb */
306 static inline void smc_clcsock_replace_cb(void (**target_cb)(struct sock *),
307                                           void (*new_cb)(struct sock *),
308                                           void (**saved_cb)(struct sock *))
309 {
310         /* only save once */
311         if (!*saved_cb)
312                 *saved_cb = *target_cb;
313         *target_cb = new_cb;
314 }
315
316 /* restore target_cb to saved_cb, and reset saved_cb to NULL */
317 static inline void smc_clcsock_restore_cb(void (**target_cb)(struct sock *),
318                                           void (**saved_cb)(struct sock *))
319 {
320         if (!*saved_cb)
321                 return;
322         *target_cb = *saved_cb;
323         *saved_cb = NULL;
324 }
325
326 extern struct workqueue_struct  *smc_hs_wq;     /* wq for handshake work */
327 extern struct workqueue_struct  *smc_close_wq;  /* wq for close work */
328
329 #define SMC_SYSTEMID_LEN                8
330
331 extern u8       local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
332
333 #define ntohll(x) be64_to_cpu(x)
334 #define htonll(x) cpu_to_be64(x)
335
336 /* convert an u32 value into network byte order, store it into a 3 byte field */
337 static inline void hton24(u8 *net, u32 host)
338 {
339         __be32 t;
340
341         t = cpu_to_be32(host);
342         memcpy(net, ((u8 *)&t) + 1, 3);
343 }
344
345 /* convert a received 3 byte field into host byte order*/
346 static inline u32 ntoh24(u8 *net)
347 {
348         __be32 t = 0;
349
350         memcpy(((u8 *)&t) + 1, net, 3);
351         return be32_to_cpu(t);
352 }
353
354 #ifdef CONFIG_XFRM
355 static inline bool using_ipsec(struct smc_sock *smc)
356 {
357         return (smc->clcsock->sk->sk_policy[0] ||
358                 smc->clcsock->sk->sk_policy[1]) ? true : false;
359 }
360 #else
361 static inline bool using_ipsec(struct smc_sock *smc)
362 {
363         return false;
364 }
365 #endif
366
367 struct smc_gidlist;
368
369 struct sock *smc_accept_dequeue(struct sock *parent, struct socket *new_sock);
370 void smc_close_non_accepted(struct sock *sk);
371 void smc_fill_gid_list(struct smc_link_group *lgr,
372                        struct smc_gidlist *gidlist,
373                        struct smc_ib_device *known_dev, u8 *known_gid);
374
375 /* smc handshake limitation interface for netlink  */
376 int smc_nl_dump_hs_limitation(struct sk_buff *skb, struct netlink_callback *cb);
377 int smc_nl_enable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
378 int smc_nl_disable_hs_limitation(struct sk_buff *skb, struct genl_info *info);
379
380 #endif  /* __SMC_H */