net/mlx5e: Honor user choice of IPsec replay window size
[platform/kernel/linux-rpi.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_accel / ipsec_offload.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
3
4 #include "mlx5_core.h"
5 #include "en.h"
6 #include "ipsec.h"
7 #include "lib/crypto.h"
8
9 enum {
10         MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
11         MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET,
12 };
13
14 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
15 {
16         u32 caps = 0;
17
18         if (!MLX5_CAP_GEN(mdev, ipsec_offload))
19                 return 0;
20
21         if (!MLX5_CAP_GEN(mdev, log_max_dek))
22                 return 0;
23
24         if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
25             MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
26                 return 0;
27
28         if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
29             !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
30                 return 0;
31
32         if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
33             !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
34                 return 0;
35
36         if (MLX5_CAP_IPSEC(mdev, ipsec_crypto_offload) &&
37             MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
38                 caps |= MLX5_IPSEC_CAP_CRYPTO;
39
40         if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
41                 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
42                                               reformat_add_esp_trasport) &&
43                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
44                                               reformat_del_esp_trasport) &&
45                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
46                         caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
47
48                 if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
49                      MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
50                     MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
51                         caps |= MLX5_IPSEC_CAP_PRIO;
52
53                 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
54                                               reformat_l2_to_l3_esp_tunnel) &&
55                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
56                                               reformat_l3_esp_tunnel_to_l2))
57                         caps |= MLX5_IPSEC_CAP_TUNNEL;
58
59                 if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
60                                               reformat_add_esp_transport_over_udp) &&
61                     MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
62                                               reformat_del_esp_transport_over_udp))
63                         caps |= MLX5_IPSEC_CAP_ESPINUDP;
64         }
65
66         if (mlx5_get_roce_state(mdev) &&
67             MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
68             MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
69                 caps |= MLX5_IPSEC_CAP_ROCE;
70
71         if (!caps)
72                 return 0;
73
74         if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
75                 caps |= MLX5_IPSEC_CAP_ESN;
76
77         /* We can accommodate up to 2^24 different IPsec objects
78          * because we use up to 24 bit in flow table metadata
79          * to hold the IPsec Object unique handle.
80          */
81         WARN_ON_ONCE(MLX5_CAP_IPSEC(mdev, log_max_ipsec_offload) > 24);
82         return caps;
83 }
84 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
85
86 static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
87                                      struct mlx5_accel_esp_xfrm_attrs *attrs)
88 {
89         void *aso_ctx;
90
91         aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
92         if (attrs->replay_esn.trigger) {
93                 MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
94
95                 if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
96                         MLX5_SET(ipsec_aso, aso_ctx, window_sz,
97                                  attrs->replay_esn.replay_window);
98                         MLX5_SET(ipsec_aso, aso_ctx, mode,
99                                  MLX5_IPSEC_ASO_REPLAY_PROTECTION);
100                 }
101                 MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
102                          attrs->replay_esn.esn);
103         }
104
105         /* ASO context */
106         MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
107         MLX5_SET(ipsec_obj, obj, full_offload, 1);
108         MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
109         /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
110          * in flow steering to perform matching against. Please be
111          * aware that this register was chosen arbitrary and can't
112          * be used in other places as long as IPsec packet offload
113          * active.
114          */
115         MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
116         if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
117                 MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
118
119         if (attrs->lft.hard_packet_limit != XFRM_INF) {
120                 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
121                          attrs->lft.hard_packet_limit);
122                 MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
123         }
124
125         if (attrs->lft.soft_packet_limit != XFRM_INF) {
126                 MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
127                          attrs->lft.soft_packet_limit);
128
129                 MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
130         }
131 }
132
133 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
134 {
135         struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
136         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
137         struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
138         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
139         u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
140         void *obj, *salt_p, *salt_iv_p;
141         struct mlx5e_hw_objs *res;
142         int err;
143
144         obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
145
146         /* salt and seq_iv */
147         salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
148         memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
149
150         MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
151         salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
152         memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
153         /* esn */
154         if (attrs->replay_esn.trigger) {
155                 MLX5_SET(ipsec_obj, obj, esn_en, 1);
156                 MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
157                 MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
158         }
159
160         MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
161
162         /* general object fields set */
163         MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
164                  MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
165         MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
166                  MLX5_GENERAL_OBJECT_TYPES_IPSEC);
167
168         res = &mdev->mlx5e_res.hw_objs;
169         if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
170                 mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
171
172         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
173         if (!err)
174                 sa_entry->ipsec_obj_id =
175                         MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
176
177         return err;
178 }
179
180 static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
181 {
182         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
183         u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
184         u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
185
186         MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
187                  MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
188         MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
189                  MLX5_GENERAL_OBJECT_TYPES_IPSEC);
190         MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
191
192         mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
193 }
194
195 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
196 {
197         struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
198         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
199         int err;
200
201         /* key */
202         err = mlx5_create_encryption_key(mdev, aes_gcm->aes_key,
203                                          aes_gcm->key_len / BITS_PER_BYTE,
204                                          MLX5_ACCEL_OBJ_IPSEC_KEY,
205                                          &sa_entry->enc_key_id);
206         if (err) {
207                 mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
208                 return err;
209         }
210
211         err = mlx5_create_ipsec_obj(sa_entry);
212         if (err) {
213                 mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
214                 goto err_enc_key;
215         }
216
217         return 0;
218
219 err_enc_key:
220         mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
221         return err;
222 }
223
224 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
225 {
226         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
227
228         mlx5_destroy_ipsec_obj(sa_entry);
229         mlx5_destroy_encryption_key(mdev, sa_entry->enc_key_id);
230 }
231
232 static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
233                                  const struct mlx5_accel_esp_xfrm_attrs *attrs)
234 {
235         struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
236         u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
237         u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
238         u64 modify_field_select = 0;
239         u64 general_obj_types;
240         void *obj;
241         int err;
242
243         general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
244         if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
245                 return -EINVAL;
246
247         /* general object fields set */
248         MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
249         MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
250         MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
251         err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
252         if (err) {
253                 mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
254                               sa_entry->ipsec_obj_id, err);
255                 return err;
256         }
257
258         obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
259         modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
260
261         /* esn */
262         if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
263             !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
264                 return -EOPNOTSUPP;
265
266         obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
267         MLX5_SET64(ipsec_obj, obj, modify_field_select,
268                    MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
269                            MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
270         MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
271         MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
272
273         /* general object fields set */
274         MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
275
276         return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
277 }
278
279 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
280                                 const struct mlx5_accel_esp_xfrm_attrs *attrs)
281 {
282         int err;
283
284         err = mlx5_modify_ipsec_obj(sa_entry, attrs);
285         if (err)
286                 return;
287
288         memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
289 }
290
291 static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
292                                    struct mlx5_wqe_aso_ctrl_seg *data)
293 {
294         data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
295         data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
296                                       MLX5_ASO_ALWAYS_TRUE << 4;
297
298         mlx5e_ipsec_aso_query(sa_entry, data);
299 }
300
301 static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
302                                          u32 mode_param)
303 {
304         struct mlx5_accel_esp_xfrm_attrs attrs = {};
305         struct mlx5_wqe_aso_ctrl_seg data = {};
306
307         if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
308                 sa_entry->esn_state.esn_msb++;
309                 sa_entry->esn_state.overlap = 0;
310         } else {
311                 sa_entry->esn_state.overlap = 1;
312         }
313
314         mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
315
316         /* It is safe to execute the modify below unlocked since the only flows
317          * that could affect this HW object, are create, destroy and this work.
318          *
319          * Creation flow can't co-exist with this modify work, the destruction
320          * flow would cancel this work, and this work is a single entity that
321          * can't conflict with it self.
322          */
323         spin_unlock_bh(&sa_entry->x->lock);
324         mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
325         spin_lock_bh(&sa_entry->x->lock);
326
327         data.data_offset_condition_operand =
328                 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
329         data.bitwise_data = cpu_to_be64(BIT_ULL(54));
330         data.data_mask = data.bitwise_data;
331
332         mlx5e_ipsec_aso_update(sa_entry, &data);
333 }
334
335 static void mlx5e_ipsec_aso_update_hard(struct mlx5e_ipsec_sa_entry *sa_entry)
336 {
337         struct mlx5_wqe_aso_ctrl_seg data = {};
338
339         data.data_offset_condition_operand =
340                 MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
341         data.bitwise_data = cpu_to_be64(BIT_ULL(57) + BIT_ULL(31));
342         data.data_mask = data.bitwise_data;
343         mlx5e_ipsec_aso_update(sa_entry, &data);
344 }
345
346 static void mlx5e_ipsec_aso_update_soft(struct mlx5e_ipsec_sa_entry *sa_entry,
347                                         u32 val)
348 {
349         struct mlx5_wqe_aso_ctrl_seg data = {};
350
351         data.data_offset_condition_operand =
352                 MLX5_IPSEC_ASO_REMOVE_FLOW_SOFT_LFT_OFFSET;
353         data.bitwise_data = cpu_to_be64(val);
354         data.data_mask = cpu_to_be64(U32_MAX);
355         mlx5e_ipsec_aso_update(sa_entry, &data);
356 }
357
358 static void mlx5e_ipsec_handle_limits(struct mlx5e_ipsec_sa_entry *sa_entry)
359 {
360         struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
361         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
362         struct mlx5e_ipsec_aso *aso = ipsec->aso;
363         bool soft_arm, hard_arm;
364         u64 hard_cnt;
365
366         lockdep_assert_held(&sa_entry->x->lock);
367
368         soft_arm = !MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm);
369         hard_arm = !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm);
370         if (!soft_arm && !hard_arm)
371                 /* It is not lifetime event */
372                 return;
373
374         hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
375         if (!hard_cnt || hard_arm) {
376                 /* It is possible to see packet counter equal to zero without
377                  * hard limit event armed. Such situation can be if packet
378                  * decreased, while we handled soft limit event.
379                  *
380                  * However it will be HW/FW bug if hard limit event is raised
381                  * and packet counter is not zero.
382                  */
383                 WARN_ON_ONCE(hard_arm && hard_cnt);
384
385                 /* Notify about hard limit */
386                 xfrm_state_check_expire(sa_entry->x);
387                 return;
388         }
389
390         /* We are in soft limit event. */
391         if (!sa_entry->limits.soft_limit_hit &&
392             sa_entry->limits.round == attrs->lft.numb_rounds_soft) {
393                 sa_entry->limits.soft_limit_hit = true;
394                 /* Notify about soft limit */
395                 xfrm_state_check_expire(sa_entry->x);
396
397                 if (sa_entry->limits.round == attrs->lft.numb_rounds_hard)
398                         goto hard;
399
400                 if (attrs->lft.soft_packet_limit > BIT_ULL(31)) {
401                         /* We cannot avoid a soft_value that might have the high
402                          * bit set. For instance soft_value=2^31+1 cannot be
403                          * adjusted to the low bit clear version of soft_value=1
404                          * because it is too close to 0.
405                          *
406                          * Thus we have this corner case where we can hit the
407                          * soft_limit with the high bit set, but cannot adjust
408                          * the counter. Thus we set a temporary interrupt_value
409                          * at least 2^30 away from here and do the adjustment
410                          * then.
411                          */
412                         mlx5e_ipsec_aso_update_soft(sa_entry,
413                                                     BIT_ULL(31) - BIT_ULL(30));
414                         sa_entry->limits.fix_limit = true;
415                         return;
416                 }
417
418                 sa_entry->limits.fix_limit = true;
419         }
420
421 hard:
422         if (sa_entry->limits.round == attrs->lft.numb_rounds_hard) {
423                 mlx5e_ipsec_aso_update_soft(sa_entry, 0);
424                 attrs->lft.soft_packet_limit = XFRM_INF;
425                 return;
426         }
427
428         mlx5e_ipsec_aso_update_hard(sa_entry);
429         sa_entry->limits.round++;
430         if (sa_entry->limits.round == attrs->lft.numb_rounds_soft)
431                 mlx5e_ipsec_aso_update_soft(sa_entry,
432                                             attrs->lft.soft_packet_limit);
433         if (sa_entry->limits.fix_limit) {
434                 sa_entry->limits.fix_limit = false;
435                 mlx5e_ipsec_aso_update_soft(sa_entry, BIT_ULL(31) - 1);
436         }
437 }
438
439 static void mlx5e_ipsec_handle_event(struct work_struct *_work)
440 {
441         struct mlx5e_ipsec_work *work =
442                 container_of(_work, struct mlx5e_ipsec_work, work);
443         struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
444         struct mlx5_accel_esp_xfrm_attrs *attrs;
445         struct mlx5e_ipsec_aso *aso;
446         int ret;
447
448         aso = sa_entry->ipsec->aso;
449         attrs = &sa_entry->attrs;
450
451         spin_lock_bh(&sa_entry->x->lock);
452         ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
453         if (ret)
454                 goto unlock;
455
456         if (attrs->replay_esn.trigger &&
457             !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
458                 u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
459
460                 mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
461         }
462
463         if (attrs->lft.soft_packet_limit != XFRM_INF)
464                 mlx5e_ipsec_handle_limits(sa_entry);
465
466 unlock:
467         spin_unlock_bh(&sa_entry->x->lock);
468         kfree(work);
469 }
470
471 static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
472                              void *data)
473 {
474         struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
475         struct mlx5e_ipsec_sa_entry *sa_entry;
476         struct mlx5_eqe_obj_change *object;
477         struct mlx5e_ipsec_work *work;
478         struct mlx5_eqe *eqe = data;
479         u16 type;
480
481         if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
482                 return NOTIFY_DONE;
483
484         object = &eqe->data.obj_change;
485         type = be16_to_cpu(object->obj_type);
486
487         if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
488                 return NOTIFY_DONE;
489
490         sa_entry = xa_load(&ipsec->sadb, be32_to_cpu(object->obj_id));
491         if (!sa_entry)
492                 return NOTIFY_DONE;
493
494         work = kmalloc(sizeof(*work), GFP_ATOMIC);
495         if (!work)
496                 return NOTIFY_DONE;
497
498         INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
499         work->data = sa_entry;
500
501         queue_work(ipsec->wq, &work->work);
502         return NOTIFY_OK;
503 }
504
505 int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
506 {
507         struct mlx5_core_dev *mdev = ipsec->mdev;
508         struct mlx5e_ipsec_aso *aso;
509         struct mlx5e_hw_objs *res;
510         struct device *pdev;
511         int err;
512
513         aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
514         if (!aso)
515                 return -ENOMEM;
516
517         res = &mdev->mlx5e_res.hw_objs;
518
519         pdev = mlx5_core_dma_dev(mdev);
520         aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
521                                        DMA_BIDIRECTIONAL);
522         err = dma_mapping_error(pdev, aso->dma_addr);
523         if (err)
524                 goto err_dma;
525
526         aso->aso = mlx5_aso_create(mdev, res->pdn);
527         if (IS_ERR(aso->aso)) {
528                 err = PTR_ERR(aso->aso);
529                 goto err_aso_create;
530         }
531
532         spin_lock_init(&aso->lock);
533         ipsec->nb.notifier_call = mlx5e_ipsec_event;
534         mlx5_notifier_register(mdev, &ipsec->nb);
535
536         ipsec->aso = aso;
537         return 0;
538
539 err_aso_create:
540         dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
541                          DMA_BIDIRECTIONAL);
542 err_dma:
543         kfree(aso);
544         return err;
545 }
546
547 void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
548 {
549         struct mlx5_core_dev *mdev = ipsec->mdev;
550         struct mlx5e_ipsec_aso *aso;
551         struct device *pdev;
552
553         aso = ipsec->aso;
554         pdev = mlx5_core_dma_dev(mdev);
555
556         mlx5_notifier_unregister(mdev, &ipsec->nb);
557         mlx5_aso_destroy(aso->aso);
558         dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
559                          DMA_BIDIRECTIONAL);
560         kfree(aso);
561 }
562
563 static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
564                                  struct mlx5_wqe_aso_ctrl_seg *data)
565 {
566         if (!data)
567                 return;
568
569         ctrl->data_mask_mode = data->data_mask_mode;
570         ctrl->condition_1_0_operand = data->condition_1_0_operand;
571         ctrl->condition_1_0_offset = data->condition_1_0_offset;
572         ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
573         ctrl->condition_0_data = data->condition_0_data;
574         ctrl->condition_0_mask = data->condition_0_mask;
575         ctrl->condition_1_data = data->condition_1_data;
576         ctrl->condition_1_mask = data->condition_1_mask;
577         ctrl->bitwise_data = data->bitwise_data;
578         ctrl->data_mask = data->data_mask;
579 }
580
581 int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
582                           struct mlx5_wqe_aso_ctrl_seg *data)
583 {
584         struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
585         struct mlx5e_ipsec_aso *aso = ipsec->aso;
586         struct mlx5_core_dev *mdev = ipsec->mdev;
587         struct mlx5_wqe_aso_ctrl_seg *ctrl;
588         struct mlx5e_hw_objs *res;
589         struct mlx5_aso_wqe *wqe;
590         unsigned long expires;
591         u8 ds_cnt;
592         int ret;
593
594         lockdep_assert_held(&sa_entry->x->lock);
595         res = &mdev->mlx5e_res.hw_objs;
596
597         spin_lock_bh(&aso->lock);
598         memset(aso->ctx, 0, sizeof(aso->ctx));
599         wqe = mlx5_aso_get_wqe(aso->aso);
600         ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
601         mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
602                            MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
603
604         ctrl = &wqe->aso_ctrl;
605         ctrl->va_l =
606                 cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
607         ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
608         ctrl->l_key = cpu_to_be32(res->mkey);
609         mlx5e_ipsec_aso_copy(ctrl, data);
610
611         mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
612         expires = jiffies + msecs_to_jiffies(10);
613         do {
614                 ret = mlx5_aso_poll_cq(aso->aso, false);
615                 if (ret)
616                         /* We are in atomic context */
617                         udelay(10);
618         } while (ret && time_is_after_jiffies(expires));
619         spin_unlock_bh(&aso->lock);
620         return ret;
621 }