Merge tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorJakub Kicinski <kuba@kernel.org>
Sat, 10 Dec 2022 04:06:34 +0000 (20:06 -0800)
committerJakub Kicinski <kuba@kernel.org>
Sat, 10 Dec 2022 04:06:35 +0000 (20:06 -0800)
Steffen Klassert says:

====================
ipsec-next 2022-12-09

1) Add xfrm packet offload core API.
   From Leon Romanovsky.

2) Add xfrm packet offload support for mlx5.
   From Leon Romanovsky and Raed Salem.

3) Fix a typto in a error message.
   From Colin Ian King.

* tag 'ipsec-next-2022-12-09' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next: (38 commits)
  xfrm: Fix spelling mistake "oflload" -> "offload"
  net/mlx5e: Open mlx5 driver to accept IPsec packet offload
  net/mlx5e: Handle ESN update events
  net/mlx5e: Handle hardware IPsec limits events
  net/mlx5e: Update IPsec soft and hard limits
  net/mlx5e: Store all XFRM SAs in Xarray
  net/mlx5e: Provide intermediate pointer to access IPsec struct
  net/mlx5e: Skip IPsec encryption for TX path without matching policy
  net/mlx5e: Add statistics for Rx/Tx IPsec offloaded flows
  net/mlx5e: Improve IPsec flow steering autogroup
  net/mlx5e: Configure IPsec packet offload flow steering
  net/mlx5e: Use same coding pattern for Rx and Tx flows
  net/mlx5e: Add XFRM policy offload logic
  net/mlx5e: Create IPsec policy offload tables
  net/mlx5e: Generalize creation of default IPsec miss group and rule
  net/mlx5e: Group IPsec miss handles into separate struct
  net/mlx5e: Make clear what IPsec rx_err does
  net/mlx5e: Flatten the IPsec RX add rule path
  net/mlx5e: Refactor FTE setup code to be more clear
  net/mlx5e: Move IPsec flow table creation to separate function
  ...
====================

Link: https://lore.kernel.org/r/20221209093310.4018731-1-steffen.klassert@secunet.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
30 files changed:
Documentation/networking/xfrm_device.rst
drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
drivers/net/ethernet/intel/ixgbevf/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.h
drivers/net/ethernet/netronome/nfp/crypto/ipsec.c
drivers/net/netdevsim/ipsec.c
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/net/xfrm.h
include/uapi/linux/xfrm.h
net/xfrm/xfrm_device.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c

index 01391df..c43ace7 100644 (file)
@@ -5,6 +5,7 @@ XFRM device - offloading the IPsec computations
 ===============================================
 
 Shannon Nelson <shannon.nelson@oracle.com>
+Leon Romanovsky <leonro@nvidia.com>
 
 
 Overview
@@ -18,10 +19,21 @@ can radically increase throughput and decrease CPU utilization.  The XFRM
 Device interface allows NIC drivers to offer to the stack access to the
 hardware offload.
 
+Right now, there are two types of hardware offload that kernel supports.
+ * IPsec crypto offload:
+   * NIC performs encrypt/decrypt
+   * Kernel does everything else
+ * IPsec packet offload:
+   * NIC performs encrypt/decrypt
+   * NIC does encapsulation
+   * Kernel and NIC have SA and policy in-sync
+   * NIC handles the SA and policies states
+   * The Kernel talks to the keymanager
+
 Userland access to the offload is typically through a system such as
 libreswan or KAME/raccoon, but the iproute2 'ip xfrm' command set can
 be handy when experimenting.  An example command might look something
-like this::
+like this for crypto offload:
 
   ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
      reqid 0x07 replay-window 32 \
@@ -29,6 +41,17 @@ like this::
      sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
      offload dev eth4 dir in
 
+and for packet offload
+
+  ip x s add proto esp dst 14.0.0.70 src 14.0.0.52 spi 0x07 mode transport \
+     reqid 0x07 replay-window 32 \
+     aead 'rfc4106(gcm(aes))' 0x44434241343332312423222114131211f4f3f2f1 128 \
+     sel src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp \
+     offload packet dev eth4 dir in
+
+  ip x p add src 14.0.0.70 dst 14.0.0.52 offload packet dev eth4 dir in
+  tmpl src 14.0.0.70 dst 14.0.0.52 proto esp reqid 10000 mode transport
+
 Yes, that's ugly, but that's what shell scripts and/or libreswan are for.
 
 
@@ -40,17 +63,24 @@ Callbacks to implement
 
   /* from include/linux/netdevice.h */
   struct xfrmdev_ops {
+        /* Crypto and Packet offload callbacks */
        int     (*xdo_dev_state_add) (struct xfrm_state *x);
        void    (*xdo_dev_state_delete) (struct xfrm_state *x);
        void    (*xdo_dev_state_free) (struct xfrm_state *x);
        bool    (*xdo_dev_offload_ok) (struct sk_buff *skb,
                                       struct xfrm_state *x);
        void    (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+
+        /* Solely packet offload callbacks */
+       void    (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+       int     (*xdo_dev_policy_add) (struct xfrm_policy *x);
+       void    (*xdo_dev_policy_delete) (struct xfrm_policy *x);
+       void    (*xdo_dev_policy_free) (struct xfrm_policy *x);
   };
 
-The NIC driver offering ipsec offload will need to implement these
-callbacks to make the offload available to the network stack's
-XFRM subsystem.  Additionally, the feature bits NETIF_F_HW_ESP and
+The NIC driver offering ipsec offload will need to implement callbacks
+relevant to supported offload to make the offload available to the network
+stack's XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
 NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
 
 
@@ -79,7 +109,8 @@ and an indication of whether it is for Rx or Tx.  The driver should
 
                ===========   ===================================
                0             success
-               -EOPNETSUPP   offload not supported, try SW IPsec
+               -EOPNETSUPP   offload not supported, try SW IPsec,
+                              not applicable for packet offload mode
                other         fail the request
                ===========   ===================================
 
@@ -96,6 +127,7 @@ will serviceable.  This can check the packet information to be sure the
 offload can be supported (e.g. IPv4 or IPv6, no IPv4 options, etc) and
 return true of false to signify its support.
 
+Crypto offload mode:
 When ready to send, the driver needs to inspect the Tx packet for the
 offload information, including the opaque context, and set up the packet
 send accordingly::
@@ -139,13 +171,25 @@ the stack in xfrm_input().
 In ESN mode, xdo_dev_state_advance_esn() is called from xfrm_replay_advance_esn().
 Driver will check packet seq number and update HW ESN state machine if needed.
 
+Packet offload mode:
+HW adds and deletes XFRM headers. So in RX path, XFRM stack is bypassed if HW
+reported success. In TX path, the packet lefts kernel without extra header
+and not encrypted, the HW is responsible to perform it.
+
 When the SA is removed by the user, the driver's xdo_dev_state_delete()
-is asked to disable the offload.  Later, xdo_dev_state_free() is called
-from a garbage collection routine after all reference counts to the state
+and xdo_dev_policy_delete() are asked to disable the offload.  Later,
+xdo_dev_state_free() and xdo_dev_policy_free() are called from a garbage
+collection routine after all reference counts to the state and policy
 have been removed and any remaining resources can be cleared for the
 offload state.  How these are used by the driver will depend on specific
 hardware needs.
 
 As a netdev is set to DOWN the XFRM stack's netdev listener will call
-xdo_dev_state_delete() and xdo_dev_state_free() on any remaining offloaded
-states.
+xdo_dev_state_delete(), xdo_dev_policy_delete(), xdo_dev_state_free() and
+xdo_dev_policy_free() on any remaining offloaded states.
+
+Outcome of HW handling packets, the XFRM core can't count hard, soft limits.
+The HW/driver are responsible to perform it and provide accurate data when
+xdo_dev_state_update_curlft() is called. In case of one of these limits
+occuried, the driver needs to call to xfrm_state_check_expire() to make sure
+that XFRM performs rekeying sequence.
index 5855905..ca21794 100644 (file)
@@ -283,6 +283,10 @@ static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
                pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
                return -EINVAL;
        }
+       if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+               pr_debug("Unsupported xfrm offload\n");
+               return -EINVAL;
+       }
 
        sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
        if (!sa_entry) {
index 774de63..53a969e 100644 (file)
@@ -585,6 +585,11 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs)
                return -EINVAL;
        }
 
+       if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+               netdev_err(dev, "Unsupported ipsec offload type\n");
+               return -EINVAL;
+       }
+
        if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
                struct rx_sa rsa;
 
index 9984ebc..c1cf540 100644 (file)
@@ -280,6 +280,11 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
                return -EINVAL;
        }
 
+       if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+               netdev_err(dev, "Unsupported ipsec offload type\n");
+               return -EINVAL;
+       }
+
        if (xs->xso.dir == XFRM_DEV_OFFLOAD_IN) {
                struct rx_sa rsa;
 
index 65790ff..2d77fb8 100644 (file)
@@ -1245,4 +1245,5 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t
 int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi);
 int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats);
 #endif
+int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
 #endif /* __MLX5_EN_H__ */
index bf2741e..379c6dc 100644 (file)
@@ -84,7 +84,8 @@ enum {
        MLX5E_ARFS_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
 #endif
 #ifdef CONFIG_MLX5_EN_IPSEC
-       MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+       MLX5E_ACCEL_FS_POL_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+       MLX5E_ACCEL_FS_ESP_FT_LEVEL,
        MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
 #endif
 };
index 9c1c24d..78af8a3 100644 (file)
@@ -162,7 +162,6 @@ mlx5e_tc_meter_modify(struct mlx5_core_dev *mdev,
                           MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER);
 
        aso_ctrl = &aso_wqe->aso_ctrl;
-       memset(aso_ctrl, 0, sizeof(*aso_ctrl));
        aso_ctrl->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE << 6;
        aso_ctrl->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE |
                                          MLX5_ASO_ALWAYS_TRUE << 4;
index 1b03ab0..bb90239 100644 (file)
@@ -45,55 +45,9 @@ static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(struct xfrm_state *x)
        return (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
 }
 
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
-                                             unsigned int handle)
+static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(struct xfrm_policy *x)
 {
-       struct mlx5e_ipsec_sa_entry *sa_entry;
-       struct xfrm_state *ret = NULL;
-
-       rcu_read_lock();
-       hash_for_each_possible_rcu(ipsec->sadb_rx, sa_entry, hlist, handle)
-               if (sa_entry->handle == handle) {
-                       ret = sa_entry->x;
-                       xfrm_state_hold(ret);
-                       break;
-               }
-       rcu_read_unlock();
-
-       return ret;
-}
-
-static int mlx5e_ipsec_sadb_rx_add(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
-       unsigned int handle = sa_entry->ipsec_obj_id;
-       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-       struct mlx5e_ipsec_sa_entry *_sa_entry;
-       unsigned long flags;
-
-       rcu_read_lock();
-       hash_for_each_possible_rcu(ipsec->sadb_rx, _sa_entry, hlist, handle)
-               if (_sa_entry->handle == handle) {
-                       rcu_read_unlock();
-                       return  -EEXIST;
-               }
-       rcu_read_unlock();
-
-       spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
-       sa_entry->handle = handle;
-       hash_add_rcu(ipsec->sadb_rx, &sa_entry->hlist, sa_entry->handle);
-       spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
-
-       return 0;
-}
-
-static void mlx5e_ipsec_sadb_rx_del(struct mlx5e_ipsec_sa_entry *sa_entry)
-{
-       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ipsec->sadb_rx_lock, flags);
-       hash_del_rcu(&sa_entry->hlist);
-       spin_unlock_irqrestore(&ipsec->sadb_rx_lock, flags);
+       return (struct mlx5e_ipsec_pol_entry *)x->xdo.offload_handle;
 }
 
 static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
@@ -129,9 +83,33 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry)
        return false;
 }
 
-static void
-mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
-                                  struct mlx5_accel_esp_xfrm_attrs *attrs)
+static void mlx5e_ipsec_init_limits(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                   struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+       struct xfrm_state *x = sa_entry->x;
+
+       attrs->hard_packet_limit = x->lft.hard_packet_limit;
+       if (x->lft.soft_packet_limit == XFRM_INF)
+               return;
+
+       /* Hardware decrements hard_packet_limit counter through
+        * the operation. While fires an event when soft_packet_limit
+        * is reached. It emans that we need substitute the numbers
+        * in order to properly count soft limit.
+        *
+        * As an example:
+        * XFRM user sets soft limit is 2 and hard limit is 9 and
+        * expects to see soft event after 2 packets and hard event
+        * after 9 packets. In our case, the hard limit will be set
+        * to 9 and soft limit is comparator to 7 so user gets the
+        * soft event after 2 packeta
+        */
+       attrs->soft_packet_limit =
+               x->lft.hard_packet_limit - x->lft.soft_packet_limit;
+}
+
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                       struct mlx5_accel_esp_xfrm_attrs *attrs)
 {
        struct xfrm_state *x = sa_entry->x;
        struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
@@ -157,33 +135,31 @@ mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
        memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
               sizeof(aes_gcm->salt));
 
+       attrs->authsize = crypto_aead_authsize(aead) / 4; /* in dwords */
+
        /* iv len */
        aes_gcm->icv_len = x->aead->alg_icv_len;
 
        /* esn */
        if (sa_entry->esn_state.trigger) {
-               attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED;
+               attrs->esn_trigger = true;
                attrs->esn = sa_entry->esn_state.esn;
-               if (sa_entry->esn_state.overlap)
-                       attrs->flags |= MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP;
+               attrs->esn_overlap = sa_entry->esn_state.overlap;
+               attrs->replay_window = x->replay_esn->replay_window;
        }
 
-       /* action */
-       attrs->action = (x->xso.dir == XFRM_DEV_OFFLOAD_OUT) ?
-                               MLX5_ACCEL_ESP_ACTION_ENCRYPT :
-                                     MLX5_ACCEL_ESP_ACTION_DECRYPT;
-       /* flags */
-       attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
-                       MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
-                       MLX5_ACCEL_ESP_FLAGS_TUNNEL;
-
+       attrs->dir = x->xso.dir;
        /* spi */
        attrs->spi = be32_to_cpu(x->id.spi);
 
        /* source , destination ips */
        memcpy(&attrs->saddr, x->props.saddr.a6, sizeof(attrs->saddr));
        memcpy(&attrs->daddr, x->id.daddr.a6, sizeof(attrs->daddr));
-       attrs->is_ipv6 = (x->props.family != AF_INET);
+       attrs->family = x->props.family;
+       attrs->type = x->xso.type;
+       attrs->reqid = x->props.reqid;
+
+       mlx5e_ipsec_init_limits(sa_entry, attrs);
 }
 
 static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
@@ -215,11 +191,6 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
                netdev_info(netdev, "Only IPv4/6 xfrm states may be offloaded\n");
                return -EINVAL;
        }
-       if (x->props.mode != XFRM_MODE_TRANSPORT &&
-           x->props.mode != XFRM_MODE_TUNNEL) {
-               dev_info(&netdev->dev, "Only transport and tunnel xfrm states may be offloaded\n");
-               return -EINVAL;
-       }
        if (x->id.proto != IPPROTO_ESP) {
                netdev_info(netdev, "Only ESP xfrm state may be offloaded\n");
                return -EINVAL;
@@ -253,6 +224,67 @@ static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
                netdev_info(netdev, "Cannot offload xfrm states with geniv other than seqiv\n");
                return -EINVAL;
        }
+       switch (x->xso.type) {
+       case XFRM_DEV_OFFLOAD_CRYPTO:
+               if (!(mlx5_ipsec_device_caps(priv->mdev) &
+                     MLX5_IPSEC_CAP_CRYPTO)) {
+                       netdev_info(netdev, "Crypto offload is not supported\n");
+                       return -EINVAL;
+               }
+
+               if (x->props.mode != XFRM_MODE_TRANSPORT &&
+                   x->props.mode != XFRM_MODE_TUNNEL) {
+                       netdev_info(netdev, "Only transport and tunnel xfrm states may be offloaded\n");
+                       return -EINVAL;
+               }
+               break;
+       case XFRM_DEV_OFFLOAD_PACKET:
+               if (!(mlx5_ipsec_device_caps(priv->mdev) &
+                     MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+                       netdev_info(netdev, "Packet offload is not supported\n");
+                       return -EINVAL;
+               }
+
+               if (x->props.mode != XFRM_MODE_TRANSPORT) {
+                       netdev_info(netdev, "Only transport xfrm states may be offloaded in packet mode\n");
+                       return -EINVAL;
+               }
+
+               if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+                   x->replay_esn->replay_window != 64 &&
+                   x->replay_esn->replay_window != 128 &&
+                   x->replay_esn->replay_window != 256) {
+                       netdev_info(netdev,
+                                   "Unsupported replay window size %u\n",
+                                   x->replay_esn->replay_window);
+                       return -EINVAL;
+               }
+
+               if (!x->props.reqid) {
+                       netdev_info(netdev, "Cannot offload without reqid\n");
+                       return -EINVAL;
+               }
+
+               if (x->lft.hard_byte_limit != XFRM_INF ||
+                   x->lft.soft_byte_limit != XFRM_INF) {
+                       netdev_info(netdev,
+                                   "Device doesn't support limits in bytes\n");
+                       return -EINVAL;
+               }
+
+               if (x->lft.soft_packet_limit >= x->lft.hard_packet_limit &&
+                   x->lft.hard_packet_limit != XFRM_INF) {
+                       /* XFRM stack doesn't prevent such configuration :(. */
+                       netdev_info(netdev,
+                                   "Hard packet limit must be greater than soft one\n");
+                       return -EINVAL;
+               }
+               break;
+       default:
+               netdev_info(netdev, "Unsupported xfrm offload type %d\n",
+                           x->xso.type);
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -270,6 +302,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
 {
        struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
        struct net_device *netdev = x->xso.real_dev;
+       struct mlx5e_ipsec *ipsec;
        struct mlx5e_priv *priv;
        int err;
 
@@ -277,6 +310,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
        if (!priv->ipsec)
                return -EOPNOTSUPP;
 
+       ipsec = priv->ipsec;
        err = mlx5e_xfrm_validate_state(x);
        if (err)
                return err;
@@ -288,7 +322,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
        }
 
        sa_entry->x = x;
-       sa_entry->ipsec = priv->ipsec;
+       sa_entry->ipsec = ipsec;
 
        /* check esn */
        mlx5e_ipsec_update_esn_state(sa_entry);
@@ -299,25 +333,29 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
        if (err)
                goto err_xfrm;
 
-       err = mlx5e_accel_ipsec_fs_add_rule(priv, sa_entry);
+       err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
        if (err)
                goto err_hw_ctx;
 
-       if (x->xso.dir == XFRM_DEV_OFFLOAD_IN) {
-               err = mlx5e_ipsec_sadb_rx_add(sa_entry);
-               if (err)
-                       goto err_add_rule;
-       } else {
+       /* We use *_bh() variant because xfrm_timer_handler(), which runs
+        * in softirq context, can reach our state delete logic and we need
+        * xa_erase_bh() there.
+        */
+       err = xa_insert_bh(&ipsec->sadb, sa_entry->ipsec_obj_id, sa_entry,
+                          GFP_KERNEL);
+       if (err)
+               goto err_add_rule;
+
+       if (x->xso.dir == XFRM_DEV_OFFLOAD_OUT)
                sa_entry->set_iv_op = (x->props.flags & XFRM_STATE_ESN) ?
                                mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
-       }
 
        INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
        x->xso.offload_handle = (unsigned long)sa_entry;
-       goto out;
+       return 0;
 
 err_add_rule:
-       mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+       mlx5e_accel_ipsec_fs_del_rule(sa_entry);
 err_hw_ctx:
        mlx5_ipsec_free_sa_ctx(sa_entry);
 err_xfrm:
@@ -329,18 +367,19 @@ out:
 static void mlx5e_xfrm_del_state(struct xfrm_state *x)
 {
        struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5e_ipsec_sa_entry *old;
 
-       if (x->xso.dir == XFRM_DEV_OFFLOAD_IN)
-               mlx5e_ipsec_sadb_rx_del(sa_entry);
+       old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
+       WARN_ON(old != sa_entry);
 }
 
 static void mlx5e_xfrm_free_state(struct xfrm_state *x)
 {
        struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
-       struct mlx5e_priv *priv = netdev_priv(x->xso.dev);
 
        cancel_work_sync(&sa_entry->modify_work.work);
-       mlx5e_accel_ipsec_fs_del_rule(priv, sa_entry);
+       mlx5e_accel_ipsec_fs_del_rule(sa_entry);
        mlx5_ipsec_free_sa_ctx(sa_entry);
        kfree(sa_entry);
 }
@@ -359,23 +398,33 @@ void mlx5e_ipsec_init(struct mlx5e_priv *priv)
        if (!ipsec)
                return;
 
-       hash_init(ipsec->sadb_rx);
-       spin_lock_init(&ipsec->sadb_rx_lock);
+       xa_init_flags(&ipsec->sadb, XA_FLAGS_ALLOC);
        ipsec->mdev = priv->mdev;
        ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0,
                                            priv->netdev->name);
        if (!ipsec->wq)
                goto err_wq;
 
+       if (mlx5_ipsec_device_caps(priv->mdev) &
+           MLX5_IPSEC_CAP_PACKET_OFFLOAD) {
+               ret = mlx5e_ipsec_aso_init(ipsec);
+               if (ret)
+                       goto err_aso;
+       }
+
        ret = mlx5e_accel_ipsec_fs_init(ipsec);
        if (ret)
                goto err_fs_init;
 
+       ipsec->fs = priv->fs;
        priv->ipsec = ipsec;
        netdev_dbg(priv->netdev, "IPSec attached to netdevice\n");
        return;
 
 err_fs_init:
+       if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+               mlx5e_ipsec_aso_cleanup(ipsec);
+err_aso:
        destroy_workqueue(ipsec->wq);
 err_wq:
        kfree(ipsec);
@@ -391,6 +440,8 @@ void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
                return;
 
        mlx5e_accel_ipsec_fs_cleanup(ipsec);
+       if (mlx5_ipsec_device_caps(priv->mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+               mlx5e_ipsec_aso_cleanup(ipsec);
        destroy_workqueue(ipsec->wq);
        kfree(ipsec);
        priv->ipsec = NULL;
@@ -426,6 +477,122 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
        queue_work(sa_entry->ipsec->wq, &modify_work->work);
 }
 
+static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
+{
+       struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
+       int err;
+
+       lockdep_assert_held(&x->lock);
+
+       if (sa_entry->attrs.soft_packet_limit == XFRM_INF)
+               /* Limits are not configured, as soft limit
+                * must be lowever than hard limit.
+                */
+               return;
+
+       err = mlx5e_ipsec_aso_query(sa_entry, NULL);
+       if (err)
+               return;
+
+       mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
+}
+
+static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x)
+{
+       struct net_device *netdev = x->xdo.real_dev;
+
+       if (x->type != XFRM_POLICY_TYPE_MAIN) {
+               netdev_info(netdev, "Cannot offload non-main policy types\n");
+               return -EINVAL;
+       }
+
+       /* Please pay attention that we support only one template */
+       if (x->xfrm_nr > 1) {
+               netdev_info(netdev, "Cannot offload more than one template\n");
+               return -EINVAL;
+       }
+
+       if (x->xdo.dir != XFRM_DEV_OFFLOAD_IN &&
+           x->xdo.dir != XFRM_DEV_OFFLOAD_OUT) {
+               netdev_info(netdev, "Cannot offload forward policy\n");
+               return -EINVAL;
+       }
+
+       if (!x->xfrm_vec[0].reqid) {
+               netdev_info(netdev, "Cannot offload policy without reqid\n");
+               return -EINVAL;
+       }
+
+       if (x->xdo.type != XFRM_DEV_OFFLOAD_PACKET) {
+               netdev_info(netdev, "Unsupported xfrm offload type\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void
+mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
+                                 struct mlx5_accel_pol_xfrm_attrs *attrs)
+{
+       struct xfrm_policy *x = pol_entry->x;
+       struct xfrm_selector *sel;
+
+       sel = &x->selector;
+       memset(attrs, 0, sizeof(*attrs));
+
+       memcpy(&attrs->saddr, sel->saddr.a6, sizeof(attrs->saddr));
+       memcpy(&attrs->daddr, sel->daddr.a6, sizeof(attrs->daddr));
+       attrs->family = sel->family;
+       attrs->dir = x->xdo.dir;
+       attrs->action = x->action;
+       attrs->type = XFRM_DEV_OFFLOAD_PACKET;
+       attrs->reqid = x->xfrm_vec[0].reqid;
+}
+
+static int mlx5e_xfrm_add_policy(struct xfrm_policy *x)
+{
+       struct net_device *netdev = x->xdo.real_dev;
+       struct mlx5e_ipsec_pol_entry *pol_entry;
+       struct mlx5e_priv *priv;
+       int err;
+
+       priv = netdev_priv(netdev);
+       if (!priv->ipsec)
+               return -EOPNOTSUPP;
+
+       err = mlx5e_xfrm_validate_policy(x);
+       if (err)
+               return err;
+
+       pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
+       if (!pol_entry)
+               return -ENOMEM;
+
+       pol_entry->x = x;
+       pol_entry->ipsec = priv->ipsec;
+
+       mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs);
+       err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
+       if (err)
+               goto err_fs;
+
+       x->xdo.offload_handle = (unsigned long)pol_entry;
+       return 0;
+
+err_fs:
+       kfree(pol_entry);
+       return err;
+}
+
+static void mlx5e_xfrm_free_policy(struct xfrm_policy *x)
+{
+       struct mlx5e_ipsec_pol_entry *pol_entry = to_ipsec_pol_entry(x);
+
+       mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+       kfree(pol_entry);
+}
+
 static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
        .xdo_dev_state_add      = mlx5e_xfrm_add_state,
        .xdo_dev_state_delete   = mlx5e_xfrm_del_state,
@@ -434,6 +601,18 @@ static const struct xfrmdev_ops mlx5e_ipsec_xfrmdev_ops = {
        .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
 };
 
+static const struct xfrmdev_ops mlx5e_ipsec_packet_xfrmdev_ops = {
+       .xdo_dev_state_add      = mlx5e_xfrm_add_state,
+       .xdo_dev_state_delete   = mlx5e_xfrm_del_state,
+       .xdo_dev_state_free     = mlx5e_xfrm_free_state,
+       .xdo_dev_offload_ok     = mlx5e_ipsec_offload_ok,
+       .xdo_dev_state_advance_esn = mlx5e_xfrm_advance_esn_state,
+
+       .xdo_dev_state_update_curlft = mlx5e_xfrm_update_curlft,
+       .xdo_dev_policy_add = mlx5e_xfrm_add_policy,
+       .xdo_dev_policy_free = mlx5e_xfrm_free_policy,
+};
+
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
@@ -443,7 +622,12 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
                return;
 
        mlx5_core_info(mdev, "mlx5e: IPSec ESP acceleration enabled\n");
-       netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
+       if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+               netdev->xfrmdev_ops = &mlx5e_ipsec_packet_xfrmdev_ops;
+       else
+               netdev->xfrmdev_ops = &mlx5e_ipsec_xfrmdev_ops;
+
        netdev->features |= NETIF_F_HW_ESP;
        netdev->hw_enc_features |= NETIF_F_HW_ESP;
 
index 4c47347..a92e19c 100644 (file)
 #ifndef __MLX5E_IPSEC_H__
 #define __MLX5E_IPSEC_H__
 
-#ifdef CONFIG_MLX5_EN_IPSEC
-
 #include <linux/mlx5/device.h>
 #include <net/xfrm.h>
 #include <linux/idr.h>
+#include "lib/aso.h"
 
 #define MLX5E_IPSEC_SADB_RX_BITS 10
 #define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
 
-enum mlx5_accel_esp_flags {
-       MLX5_ACCEL_ESP_FLAGS_TUNNEL            = 0,    /* Default */
-       MLX5_ACCEL_ESP_FLAGS_TRANSPORT         = 1UL << 0,
-       MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED     = 1UL << 1,
-       MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP = 1UL << 2,
-};
-
-enum mlx5_accel_esp_action {
-       MLX5_ACCEL_ESP_ACTION_DECRYPT,
-       MLX5_ACCEL_ESP_ACTION_ENCRYPT,
-};
-
 struct aes_gcm_keymat {
        u64   seq_iv;
 
@@ -66,7 +53,6 @@ struct aes_gcm_keymat {
 };
 
 struct mlx5_accel_esp_xfrm_attrs {
-       enum mlx5_accel_esp_action action;
        u32   esn;
        u32   spi;
        u32   flags;
@@ -82,16 +68,37 @@ struct mlx5_accel_esp_xfrm_attrs {
                __be32 a6[4];
        } daddr;
 
-       u8 is_ipv6;
+       u8 dir : 2;
+       u8 esn_overlap : 1;
+       u8 esn_trigger : 1;
+       u8 type : 2;
+       u8 family;
+       u32 replay_window;
+       u32 authsize;
+       u32 reqid;
+       u64 hard_packet_limit;
+       u64 soft_packet_limit;
 };
 
 enum mlx5_ipsec_cap {
        MLX5_IPSEC_CAP_CRYPTO           = 1 << 0,
        MLX5_IPSEC_CAP_ESN              = 1 << 1,
+       MLX5_IPSEC_CAP_PACKET_OFFLOAD   = 1 << 2,
 };
 
 struct mlx5e_priv;
 
+struct mlx5e_ipsec_hw_stats {
+       u64 ipsec_rx_pkts;
+       u64 ipsec_rx_bytes;
+       u64 ipsec_rx_drop_pkts;
+       u64 ipsec_rx_drop_bytes;
+       u64 ipsec_tx_pkts;
+       u64 ipsec_tx_bytes;
+       u64 ipsec_tx_drop_pkts;
+       u64 ipsec_tx_drop_bytes;
+};
+
 struct mlx5e_ipsec_sw_stats {
        atomic64_t ipsec_rx_drop_sp_alloc;
        atomic64_t ipsec_rx_drop_sadb_miss;
@@ -102,17 +109,38 @@ struct mlx5e_ipsec_sw_stats {
        atomic64_t ipsec_tx_drop_trailer;
 };
 
-struct mlx5e_accel_fs_esp;
+struct mlx5e_ipsec_rx;
 struct mlx5e_ipsec_tx;
 
+struct mlx5e_ipsec_work {
+       struct work_struct work;
+       struct mlx5e_ipsec *ipsec;
+       u32 id;
+};
+
+struct mlx5e_ipsec_aso {
+       u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+       dma_addr_t dma_addr;
+       struct mlx5_aso *aso;
+       /* IPsec ASO caches data on every query call,
+        * so in nested calls, we can use this boolean to save
+        * recursive calls to mlx5e_ipsec_aso_query()
+        */
+       u8 use_cache : 1;
+};
+
 struct mlx5e_ipsec {
        struct mlx5_core_dev *mdev;
-       DECLARE_HASHTABLE(sadb_rx, MLX5E_IPSEC_SADB_RX_BITS);
-       spinlock_t sadb_rx_lock; /* Protects sadb_rx */
+       struct xarray sadb;
        struct mlx5e_ipsec_sw_stats sw_stats;
+       struct mlx5e_ipsec_hw_stats hw_stats;
        struct workqueue_struct *wq;
-       struct mlx5e_accel_fs_esp *rx_fs;
-       struct mlx5e_ipsec_tx *tx_fs;
+       struct mlx5e_flow_steering *fs;
+       struct mlx5e_ipsec_rx *rx_ipv4;
+       struct mlx5e_ipsec_rx *rx_ipv6;
+       struct mlx5e_ipsec_tx *tx;
+       struct mlx5e_ipsec_aso *aso;
+       struct notifier_block nb;
 };
 
 struct mlx5e_ipsec_esn_state {
@@ -123,7 +151,8 @@ struct mlx5e_ipsec_esn_state {
 
 struct mlx5e_ipsec_rule {
        struct mlx5_flow_handle *rule;
-       struct mlx5_modify_hdr *set_modify_hdr;
+       struct mlx5_modify_hdr *modify_hdr;
+       struct mlx5_pkt_reformat *pkt_reformat;
 };
 
 struct mlx5e_ipsec_modify_state_work {
@@ -132,9 +161,7 @@ struct mlx5e_ipsec_modify_state_work {
 };
 
 struct mlx5e_ipsec_sa_entry {
-       struct hlist_node hlist; /* Item in SADB_RX hashtable */
        struct mlx5e_ipsec_esn_state esn_state;
-       unsigned int handle; /* Handle in SADB_RX */
        struct xfrm_state *x;
        struct mlx5e_ipsec *ipsec;
        struct mlx5_accel_esp_xfrm_attrs attrs;
@@ -146,19 +173,43 @@ struct mlx5e_ipsec_sa_entry {
        struct mlx5e_ipsec_modify_state_work modify_work;
 };
 
+struct mlx5_accel_pol_xfrm_attrs {
+       union {
+               __be32 a4;
+               __be32 a6[4];
+       } saddr;
+
+       union {
+               __be32 a4;
+               __be32 a6[4];
+       } daddr;
+
+       u8 family;
+       u8 action;
+       u8 type : 2;
+       u8 dir : 2;
+       u32 reqid;
+};
+
+struct mlx5e_ipsec_pol_entry {
+       struct xfrm_policy *x;
+       struct mlx5e_ipsec *ipsec;
+       struct mlx5e_ipsec_rule ipsec_rule;
+       struct mlx5_accel_pol_xfrm_attrs attrs;
+};
+
+#ifdef CONFIG_MLX5_EN_IPSEC
+
 void mlx5e_ipsec_init(struct mlx5e_priv *priv);
 void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
 void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv);
 
-struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *dev,
-                                             unsigned int handle);
-
 void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
-                                 struct mlx5e_ipsec_sa_entry *sa_entry);
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
-                                  struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
 
 int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
 void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
@@ -168,11 +219,30 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
 void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
                                const struct mlx5_accel_esp_xfrm_attrs *attrs);
 
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+                         struct mlx5_wqe_aso_ctrl_seg *data);
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                  u64 *packets);
+
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
+                                    void *ipsec_stats);
+
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                       struct mlx5_accel_esp_xfrm_attrs *attrs);
 static inline struct mlx5_core_dev *
 mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
        return sa_entry->ipsec->mdev;
 }
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+       return pol_entry->ipsec->mdev;
+}
 #else
 static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
 {
index b859e4a..9f19f4b 100644 (file)
@@ -9,53 +9,67 @@
 
 #define NUM_IPSEC_FTE BIT(15)
 
-enum accel_fs_esp_type {
-       ACCEL_FS_ESP4,
-       ACCEL_FS_ESP6,
-       ACCEL_FS_ESP_NUM_TYPES,
+struct mlx5e_ipsec_fc {
+       struct mlx5_fc *cnt;
+       struct mlx5_fc *drop;
 };
 
-struct mlx5e_ipsec_rx_err {
-       struct mlx5_flow_table *ft;
-       struct mlx5_flow_handle *rule;
-       struct mlx5_modify_hdr *copy_modify_hdr;
+struct mlx5e_ipsec_ft {
+       struct mutex mutex; /* Protect changes to this struct */
+       struct mlx5_flow_table *pol;
+       struct mlx5_flow_table *sa;
+       struct mlx5_flow_table *status;
+       u32 refcnt;
 };
 
-struct mlx5e_accel_fs_esp_prot {
-       struct mlx5_flow_table *ft;
-       struct mlx5_flow_group *miss_group;
-       struct mlx5_flow_handle *miss_rule;
-       struct mlx5_flow_destination default_dest;
-       struct mlx5e_ipsec_rx_err rx_err;
-       u32 refcnt;
-       struct mutex prot_mutex; /* protect ESP4/ESP6 protocol */
+struct mlx5e_ipsec_miss {
+       struct mlx5_flow_group *group;
+       struct mlx5_flow_handle *rule;
 };
 
-struct mlx5e_accel_fs_esp {
-       struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
+struct mlx5e_ipsec_rx {
+       struct mlx5e_ipsec_ft ft;
+       struct mlx5e_ipsec_miss pol;
+       struct mlx5e_ipsec_miss sa;
+       struct mlx5e_ipsec_rule status;
+       struct mlx5e_ipsec_fc *fc;
 };
 
 struct mlx5e_ipsec_tx {
+       struct mlx5e_ipsec_ft ft;
+       struct mlx5e_ipsec_miss pol;
        struct mlx5_flow_namespace *ns;
-       struct mlx5_flow_table *ft;
-       struct mutex mutex; /* Protect IPsec TX steering */
-       u32 refcnt;
+       struct mlx5e_ipsec_fc *fc;
 };
 
 /* IPsec RX flow steering */
-static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
+static enum mlx5_traffic_types family2tt(u32 family)
 {
-       if (i == ACCEL_FS_ESP4)
+       if (family == AF_INET)
                return MLX5_TT_IPV4_IPSEC_ESP;
        return MLX5_TT_IPV6_IPSEC_ESP;
 }
 
-static int rx_err_add_rule(struct mlx5e_priv *priv,
-                          struct mlx5e_accel_fs_esp_prot *fs_prot,
-                          struct mlx5e_ipsec_rx_err *rx_err)
+static struct mlx5_flow_table *ipsec_ft_create(struct mlx5_flow_namespace *ns,
+                                              int level, int prio,
+                                              int max_num_groups)
+{
+       struct mlx5_flow_table_attr ft_attr = {};
+
+       ft_attr.autogroup.num_reserved_entries = 1;
+       ft_attr.autogroup.max_num_groups = max_num_groups;
+       ft_attr.max_fte = NUM_IPSEC_FTE;
+       ft_attr.level = level;
+       ft_attr.prio = prio;
+
+       return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+}
+
+static int ipsec_status_rule(struct mlx5_core_dev *mdev,
+                            struct mlx5e_ipsec_rx *rx,
+                            struct mlx5_flow_destination *dest)
 {
        u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
-       struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_flow_act flow_act = {};
        struct mlx5_modify_hdr *modify_hdr;
        struct mlx5_flow_handle *fte;
@@ -79,26 +93,26 @@ static int rx_err_add_rule(struct mlx5e_priv *priv,
 
        if (IS_ERR(modify_hdr)) {
                err = PTR_ERR(modify_hdr);
-               netdev_err(priv->netdev,
-                          "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+               mlx5_core_err(mdev,
+                             "fail to alloc ipsec copy modify_header_id err=%d\n", err);
                goto out_spec;
        }
 
        /* create fte */
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
-                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
        flow_act.modify_hdr = modify_hdr;
-       fte = mlx5_add_flow_rules(rx_err->ft, spec, &flow_act,
-                                 &fs_prot->default_dest, 1);
+       fte = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
        if (IS_ERR(fte)) {
                err = PTR_ERR(fte);
-               netdev_err(priv->netdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+               mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
                goto out;
        }
 
        kvfree(spec);
-       rx_err->rule = fte;
-       rx_err->copy_modify_hdr = modify_hdr;
+       rx->status.rule = fte;
+       rx->status.modify_hdr = modify_hdr;
        return 0;
 
 out:
@@ -108,13 +122,12 @@ out_spec:
        return err;
 }
 
-static int rx_fs_create(struct mlx5e_priv *priv,
-                       struct mlx5e_accel_fs_esp_prot *fs_prot)
+static int ipsec_miss_create(struct mlx5_core_dev *mdev,
+                            struct mlx5_flow_table *ft,
+                            struct mlx5e_ipsec_miss *miss,
+                            struct mlx5_flow_destination *dest)
 {
        int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       struct mlx5_flow_table *ft = fs_prot->ft;
-       struct mlx5_flow_group *miss_group;
-       struct mlx5_flow_handle *miss_rule;
        MLX5_DECLARE_FLOW_ACT(flow_act);
        struct mlx5_flow_spec *spec;
        u32 *flow_group_in;
@@ -130,450 +143,888 @@ static int rx_fs_create(struct mlx5e_priv *priv,
        /* Create miss_group */
        MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
        MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
-       miss_group = mlx5_create_flow_group(ft, flow_group_in);
-       if (IS_ERR(miss_group)) {
-               err = PTR_ERR(miss_group);
-               netdev_err(priv->netdev, "fail to create ipsec rx miss_group err=%d\n", err);
+       miss->group = mlx5_create_flow_group(ft, flow_group_in);
+       if (IS_ERR(miss->group)) {
+               err = PTR_ERR(miss->group);
+               mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
+                             err);
                goto out;
        }
-       fs_prot->miss_group = miss_group;
 
        /* Create miss rule */
-       miss_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &fs_prot->default_dest, 1);
-       if (IS_ERR(miss_rule)) {
-               mlx5_destroy_flow_group(fs_prot->miss_group);
-               err = PTR_ERR(miss_rule);
-               netdev_err(priv->netdev, "fail to create ipsec rx miss_rule err=%d\n", err);
+       miss->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
+       if (IS_ERR(miss->rule)) {
+               mlx5_destroy_flow_group(miss->group);
+               err = PTR_ERR(miss->rule);
+               mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
+                             err);
                goto out;
        }
-       fs_prot->miss_rule = miss_rule;
 out:
        kvfree(flow_group_in);
        kvfree(spec);
        return err;
 }
 
-static void rx_destroy(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
 {
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
-       struct mlx5e_accel_fs_esp *accel_esp;
-
-       accel_esp = priv->ipsec->rx_fs;
-
-       /* The netdev unreg already happened, so all offloaded rule are already removed */
-       fs_prot = &accel_esp->fs_prot[type];
+       mlx5_del_flow_rules(rx->pol.rule);
+       mlx5_destroy_flow_group(rx->pol.group);
+       mlx5_destroy_flow_table(rx->ft.pol);
 
-       mlx5_del_flow_rules(fs_prot->miss_rule);
-       mlx5_destroy_flow_group(fs_prot->miss_group);
-       mlx5_destroy_flow_table(fs_prot->ft);
+       mlx5_del_flow_rules(rx->sa.rule);
+       mlx5_destroy_flow_group(rx->sa.group);
+       mlx5_destroy_flow_table(rx->ft.sa);
 
-       mlx5_del_flow_rules(fs_prot->rx_err.rule);
-       mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
-       mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+       mlx5_del_flow_rules(rx->status.rule);
+       mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+       mlx5_destroy_flow_table(rx->ft.status);
 }
 
-static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static int rx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+                    struct mlx5e_ipsec_rx *rx, u32 family)
 {
-       struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(priv->fs, false);
-       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
-       struct mlx5_flow_table_attr ft_attr = {};
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
-       struct mlx5e_accel_fs_esp *accel_esp;
+       struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(ipsec->fs, false);
+       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+       struct mlx5_flow_destination dest[2];
        struct mlx5_flow_table *ft;
        int err;
 
-       accel_esp = priv->ipsec->rx_fs;
-       fs_prot = &accel_esp->fs_prot[type];
-       fs_prot->default_dest =
-               mlx5_ttc_get_default_dest(ttc, fs_esp2tt(type));
-
-       ft_attr.max_fte = 1;
-       ft_attr.autogroup.max_num_groups = 1;
-       ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
-       ft_attr.prio = MLX5E_NIC_PRIO;
-       ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+       ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+                            MLX5E_NIC_PRIO, 1);
        if (IS_ERR(ft))
                return PTR_ERR(ft);
 
-       fs_prot->rx_err.ft = ft;
-       err = rx_err_add_rule(priv, fs_prot, &fs_prot->rx_err);
+       rx->ft.status = ft;
+
+       dest[0] = mlx5_ttc_get_default_dest(ttc, family2tt(family));
+       dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+       err = ipsec_status_rule(mdev, rx, dest);
        if (err)
                goto err_add;
 
        /* Create FT */
-       ft_attr.max_fte = NUM_IPSEC_FTE;
-       ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
-       ft_attr.prio = MLX5E_NIC_PRIO;
-       ft_attr.autogroup.num_reserved_entries = 1;
-       ft_attr.autogroup.max_num_groups = 1;
-       ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+       ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_ESP_FT_LEVEL, MLX5E_NIC_PRIO,
+                            2);
        if (IS_ERR(ft)) {
                err = PTR_ERR(ft);
                goto err_fs_ft;
        }
-       fs_prot->ft = ft;
+       rx->ft.sa = ft;
 
-       err = rx_fs_create(priv, fs_prot);
+       err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, dest);
        if (err)
                goto err_fs;
 
+       ft = ipsec_ft_create(ns, MLX5E_ACCEL_FS_POL_FT_LEVEL, MLX5E_NIC_PRIO,
+                            2);
+       if (IS_ERR(ft)) {
+               err = PTR_ERR(ft);
+               goto err_pol_ft;
+       }
+       rx->ft.pol = ft;
+       memset(dest, 0x00, 2 * sizeof(*dest));
+       dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest[0].ft = rx->ft.sa;
+       err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, dest);
+       if (err)
+               goto err_pol_miss;
+
        return 0;
 
+err_pol_miss:
+       mlx5_destroy_flow_table(rx->ft.pol);
+err_pol_ft:
+       mlx5_del_flow_rules(rx->sa.rule);
+       mlx5_destroy_flow_group(rx->sa.group);
 err_fs:
-       mlx5_destroy_flow_table(fs_prot->ft);
+       mlx5_destroy_flow_table(rx->ft.sa);
 err_fs_ft:
-       mlx5_del_flow_rules(fs_prot->rx_err.rule);
-       mlx5_modify_header_dealloc(priv->mdev, fs_prot->rx_err.copy_modify_hdr);
+       mlx5_del_flow_rules(rx->status.rule);
+       mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
 err_add:
-       mlx5_destroy_flow_table(fs_prot->rx_err.ft);
+       mlx5_destroy_flow_table(rx->ft.status);
        return err;
 }
 
-static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static struct mlx5e_ipsec_rx *rx_ft_get(struct mlx5_core_dev *mdev,
+                                       struct mlx5e_ipsec *ipsec, u32 family)
 {
-       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
        struct mlx5_flow_destination dest = {};
-       struct mlx5e_accel_fs_esp *accel_esp;
+       struct mlx5e_ipsec_rx *rx;
        int err = 0;
 
-       accel_esp = priv->ipsec->rx_fs;
-       fs_prot = &accel_esp->fs_prot[type];
-       mutex_lock(&fs_prot->prot_mutex);
-       if (fs_prot->refcnt)
+       if (family == AF_INET)
+               rx = ipsec->rx_ipv4;
+       else
+               rx = ipsec->rx_ipv6;
+
+       mutex_lock(&rx->ft.mutex);
+       if (rx->ft.refcnt)
                goto skip;
 
        /* create FT */
-       err = rx_create(priv, type);
+       err = rx_create(mdev, ipsec, rx, family);
        if (err)
                goto out;
 
        /* connect */
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-       dest.ft = fs_prot->ft;
-       mlx5_ttc_fwd_dest(ttc, fs_esp2tt(type), &dest);
+       dest.ft = rx->ft.pol;
+       mlx5_ttc_fwd_dest(ttc, family2tt(family), &dest);
 
 skip:
-       fs_prot->refcnt++;
+       rx->ft.refcnt++;
 out:
-       mutex_unlock(&fs_prot->prot_mutex);
-       return err;
+       mutex_unlock(&rx->ft.mutex);
+       if (err)
+               return ERR_PTR(err);
+       return rx;
 }
 
-static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
+static void rx_ft_put(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+                     u32 family)
 {
-       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(priv->fs, false);
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
-       struct mlx5e_accel_fs_esp *accel_esp;
+       struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(ipsec->fs, false);
+       struct mlx5e_ipsec_rx *rx;
+
+       if (family == AF_INET)
+               rx = ipsec->rx_ipv4;
+       else
+               rx = ipsec->rx_ipv6;
 
-       accel_esp = priv->ipsec->rx_fs;
-       fs_prot = &accel_esp->fs_prot[type];
-       mutex_lock(&fs_prot->prot_mutex);
-       fs_prot->refcnt--;
-       if (fs_prot->refcnt)
+       mutex_lock(&rx->ft.mutex);
+       rx->ft.refcnt--;
+       if (rx->ft.refcnt)
                goto out;
 
        /* disconnect */
-       mlx5_ttc_fwd_default_dest(ttc, fs_esp2tt(type));
+       mlx5_ttc_fwd_default_dest(ttc, family2tt(family));
 
        /* remove FT */
-       rx_destroy(priv, type);
+       rx_destroy(mdev, rx);
 
 out:
-       mutex_unlock(&fs_prot->prot_mutex);
+       mutex_unlock(&rx->ft.mutex);
 }
 
 /* IPsec TX flow steering */
-static int tx_create(struct mlx5e_priv *priv)
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
 {
-       struct mlx5_flow_table_attr ft_attr = {};
-       struct mlx5e_ipsec *ipsec = priv->ipsec;
+       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_table *ft;
        int err;
 
-       ft_attr.max_fte = NUM_IPSEC_FTE;
-       ft_attr.autogroup.max_num_groups = 1;
-       ft = mlx5_create_auto_grouped_flow_table(ipsec->tx_fs->ns, &ft_attr);
+       ft = ipsec_ft_create(tx->ns, 1, 0, 4);
+       if (IS_ERR(ft))
+               return PTR_ERR(ft);
+
+       tx->ft.sa = ft;
+
+       ft = ipsec_ft_create(tx->ns, 0, 0, 2);
        if (IS_ERR(ft)) {
                err = PTR_ERR(ft);
-               netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
-               return err;
+               goto err_pol_ft;
        }
-       ipsec->tx_fs->ft = ft;
+       tx->ft.pol = ft;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest.ft = tx->ft.sa;
+       err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
+       if (err)
+               goto err_pol_miss;
        return 0;
+
+err_pol_miss:
+       mlx5_destroy_flow_table(tx->ft.pol);
+err_pol_ft:
+       mlx5_destroy_flow_table(tx->ft.sa);
+       return err;
 }
 
-static int tx_ft_get(struct mlx5e_priv *priv)
+static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
+                                       struct mlx5e_ipsec *ipsec)
 {
-       struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+       struct mlx5e_ipsec_tx *tx = ipsec->tx;
        int err = 0;
 
-       mutex_lock(&tx_fs->mutex);
-       if (tx_fs->refcnt)
+       mutex_lock(&tx->ft.mutex);
+       if (tx->ft.refcnt)
                goto skip;
 
-       err = tx_create(priv);
+       err = tx_create(mdev, tx);
        if (err)
                goto out;
 skip:
-       tx_fs->refcnt++;
+       tx->ft.refcnt++;
 out:
-       mutex_unlock(&tx_fs->mutex);
-       return err;
+       mutex_unlock(&tx->ft.mutex);
+       if (err)
+               return ERR_PTR(err);
+       return tx;
 }
 
-static void tx_ft_put(struct mlx5e_priv *priv)
+static void tx_ft_put(struct mlx5e_ipsec *ipsec)
 {
-       struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+       struct mlx5e_ipsec_tx *tx = ipsec->tx;
 
-       mutex_lock(&tx_fs->mutex);
-       tx_fs->refcnt--;
-       if (tx_fs->refcnt)
+       mutex_lock(&tx->ft.mutex);
+       tx->ft.refcnt--;
+       if (tx->ft.refcnt)
                goto out;
 
-       mlx5_destroy_flow_table(tx_fs->ft);
+       mlx5_del_flow_rules(tx->pol.rule);
+       mlx5_destroy_flow_group(tx->pol.group);
+       mlx5_destroy_flow_table(tx->ft.pol);
+       mlx5_destroy_flow_table(tx->ft.sa);
 out:
-       mutex_unlock(&tx_fs->mutex);
+       mutex_unlock(&tx->ft.mutex);
 }
 
-static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
-                            u32 ipsec_obj_id,
-                            struct mlx5_flow_spec *spec,
-                            struct mlx5_flow_act *flow_act)
+static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
+                           __be32 *daddr)
 {
-       u8 ip_version = attrs->is_ipv6 ? 6 : 4;
-
-       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS;
+       spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 
-       /* ip_version */
        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
-       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ip_version);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
+
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+                        outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+}
 
-       /* Non fragmented */
-       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
-       MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
+                           __be32 *daddr)
+{
+       spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
+
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
+       memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
+       memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                           outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+       memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                           outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+}
+
+static void setup_fte_esp(struct mlx5_flow_spec *spec)
+{
        /* ESP header */
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
        MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+}
 
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi)
+{
        /* SPI number */
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
        MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+       MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+}
+
+static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
+{
+       /* Non fragmented */
+       spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+       MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
+       MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+}
+
+static void setup_fte_reg_a(struct mlx5_flow_spec *spec)
+{
+       /* Add IPsec indicator in metadata_reg_a */
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+       MLX5_SET(fte_match_param, spec->match_criteria,
+                misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
        MLX5_SET(fte_match_param, spec->match_value,
-                misc_parameters.outer_esp_spi, attrs->spi);
-
-       if (ip_version == 4) {
-               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                                   outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
-                      &attrs->saddr.a4, 4);
-               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                                   outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
-                      &attrs->daddr.a4, 4);
-               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-                                outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
-               MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
-                                outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
-       } else {
-               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
-                      &attrs->saddr.a6, 16);
-               memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
-                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-                      &attrs->daddr.a6, 16);
-               memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-                                   outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
-                      0xff, 16);
-               memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
-                                   outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-                      0xff, 16);
-       }
+                misc_parameters_2.metadata_reg_a, MLX5_ETH_WQE_FT_META_IPSEC);
+}
 
-       flow_act->crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
-       flow_act->crypto.obj_id = ipsec_obj_id;
-       flow_act->flags |= FLOW_ACT_NO_APPEND;
+static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+{
+       /* Pass policy check before choosing this SA */
+       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+       MLX5_SET(fte_match_param, spec->match_criteria,
+                misc_parameters_2.metadata_reg_c_0, reqid);
+       MLX5_SET(fte_match_param, spec->match_value,
+                misc_parameters_2.metadata_reg_c_0, reqid);
 }
 
-static int rx_add_rule(struct mlx5e_priv *priv,
-                      struct mlx5e_ipsec_sa_entry *sa_entry)
+static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+                              struct mlx5_flow_act *flow_act)
 {
        u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
-       struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+       enum mlx5_flow_namespace_type ns_type;
+       struct mlx5_modify_hdr *modify_hdr;
+
+       MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+       switch (dir) {
+       case XFRM_DEV_OFFLOAD_IN:
+               MLX5_SET(set_action_in, action, field,
+                        MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+               ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+               break;
+       case XFRM_DEV_OFFLOAD_OUT:
+               MLX5_SET(set_action_in, action, field,
+                        MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+               ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       MLX5_SET(set_action_in, action, data, val);
+       MLX5_SET(set_action_in, action, offset, 0);
+       MLX5_SET(set_action_in, action, length, 32);
+
+       modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+       if (IS_ERR(modify_hdr)) {
+               mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
+                             PTR_ERR(modify_hdr));
+               return PTR_ERR(modify_hdr);
+       }
+
+       flow_act->modify_hdr = modify_hdr;
+       flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+       return 0;
+}
+
+static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+                             struct mlx5_accel_esp_xfrm_attrs *attrs,
+                             struct mlx5_flow_act *flow_act)
+{
+       enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+       struct mlx5_pkt_reformat_params reformat_params = {};
+       struct mlx5_pkt_reformat *pkt_reformat;
+       u8 reformatbf[16] = {};
+       __be32 spi;
+
+       if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+               reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+               ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+               goto cmd;
+       }
+
+       if (attrs->family == AF_INET)
+               reformat_params.type =
+                       MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+       else
+               reformat_params.type =
+                       MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+
+       /* convert to network format */
+       spi = htonl(attrs->spi);
+       memcpy(reformatbf, &spi, 4);
+
+       reformat_params.param_0 = attrs->authsize;
+       reformat_params.size = sizeof(reformatbf);
+       reformat_params.data = &reformatbf;
+
+cmd:
+       pkt_reformat =
+               mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
+       if (IS_ERR(pkt_reformat))
+               return PTR_ERR(pkt_reformat);
+
+       flow_act->pkt_reformat = pkt_reformat;
+       flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+       return 0;
+}
+
+static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
        struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
-       u32 ipsec_obj_id = sa_entry->ipsec_obj_id;
-       struct mlx5_modify_hdr *modify_hdr = NULL;
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
+       struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
        struct mlx5_flow_destination dest = {};
-       struct mlx5e_accel_fs_esp *accel_esp;
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_handle *rule;
-       enum accel_fs_esp_type type;
        struct mlx5_flow_spec *spec;
-       int err = 0;
+       struct mlx5e_ipsec_rx *rx;
+       int err;
 
-       accel_esp = priv->ipsec->rx_fs;
-       type = attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4;
-       fs_prot = &accel_esp->fs_prot[type];
-
-       err = rx_ft_get(priv, type);
-       if (err)
-               return err;
+       rx = rx_ft_get(mdev, ipsec, attrs->family);
+       if (IS_ERR(rx))
+               return PTR_ERR(rx);
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec) {
                err = -ENOMEM;
-               goto out_err;
+               goto err_alloc;
        }
 
-       setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+       if (attrs->family == AF_INET)
+               setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+       else
+               setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
 
-       /* Set bit[31] ipsec marker */
-       /* Set bit[23-0] ipsec_obj_id */
-       MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
-       MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
-       MLX5_SET(set_action_in, action, data, (ipsec_obj_id | BIT(31)));
-       MLX5_SET(set_action_in, action, offset, 0);
-       MLX5_SET(set_action_in, action, length, 32);
+       setup_fte_spi(spec, attrs->spi);
+       setup_fte_esp(spec);
+       setup_fte_no_frags(spec);
 
-       modify_hdr = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL,
-                                             1, action);
-       if (IS_ERR(modify_hdr)) {
-               err = PTR_ERR(modify_hdr);
-               netdev_err(priv->netdev,
-                          "fail to alloc ipsec set modify_header_id err=%d\n", err);
-               modify_hdr = NULL;
-               goto out_err;
+       err = setup_modify_header(mdev, sa_entry->ipsec_obj_id | BIT(31),
+                                 XFRM_DEV_OFFLOAD_IN, &flow_act);
+       if (err)
+               goto err_mod_header;
+
+       switch (attrs->type) {
+       case XFRM_DEV_OFFLOAD_PACKET:
+               err = setup_pkt_reformat(mdev, attrs, &flow_act);
+               if (err)
+                       goto err_pkt_reformat;
+               break;
+       default:
+               break;
        }
 
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-                         MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
-                         MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+       flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+       flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+       flow_act.flags |= FLOW_ACT_NO_APPEND;
+       flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                          MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT;
        dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-       flow_act.modify_hdr = modify_hdr;
-       dest.ft = fs_prot->rx_err.ft;
-       rule = mlx5_add_flow_rules(fs_prot->ft, spec, &flow_act, &dest, 1);
+       dest.ft = rx->ft.status;
+       rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, &dest, 1);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
-               netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
-                          attrs->action, err);
-               goto out_err;
+               mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
+               goto err_add_flow;
        }
+       kvfree(spec);
 
-       ipsec_rule->rule = rule;
-       ipsec_rule->set_modify_hdr = modify_hdr;
-       goto out;
-
-out_err:
-       if (modify_hdr)
-               mlx5_modify_header_dealloc(priv->mdev, modify_hdr);
-       rx_ft_put(priv, type);
+       sa_entry->ipsec_rule.rule = rule;
+       sa_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+       sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
+       return 0;
 
-out:
+err_add_flow:
+       if (flow_act.pkt_reformat)
+               mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
+       mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
        kvfree(spec);
+err_alloc:
+       rx_ft_put(mdev, ipsec, attrs->family);
        return err;
 }
 
-static int tx_add_rule(struct mlx5e_priv *priv,
-                      struct mlx5e_ipsec_sa_entry *sa_entry)
+static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
+       struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+       struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5_flow_destination dest = {};
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_handle *rule;
        struct mlx5_flow_spec *spec;
+       struct mlx5e_ipsec_tx *tx;
        int err = 0;
 
-       err = tx_ft_get(priv);
-       if (err)
-               return err;
+       tx = tx_ft_get(mdev, ipsec);
+       if (IS_ERR(tx))
+               return PTR_ERR(tx);
 
        spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        if (!spec) {
                err = -ENOMEM;
-               goto out;
+               goto err_alloc;
        }
 
-       setup_fte_common(&sa_entry->attrs, sa_entry->ipsec_obj_id, spec,
-                        &flow_act);
+       if (attrs->family == AF_INET)
+               setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+       else
+               setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+       setup_fte_no_frags(spec);
+
+       switch (attrs->type) {
+       case XFRM_DEV_OFFLOAD_CRYPTO:
+               setup_fte_spi(spec, attrs->spi);
+               setup_fte_esp(spec);
+               setup_fte_reg_a(spec);
+               break;
+       case XFRM_DEV_OFFLOAD_PACKET:
+               setup_fte_reg_c0(spec, attrs->reqid);
+               err = setup_pkt_reformat(mdev, attrs, &flow_act);
+               if (err)
+                       goto err_pkt_reformat;
+               break;
+       default:
+               break;
+       }
 
-       /* Add IPsec indicator in metadata_reg_a */
-       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
-       MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
-                MLX5_ETH_WQE_FT_META_IPSEC);
-       MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
-                MLX5_ETH_WQE_FT_META_IPSEC);
-
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
-                         MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT;
-       rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
+       flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+       flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+       flow_act.flags |= FLOW_ACT_NO_APPEND;
+       flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+                          MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
+       dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+       dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+       rule = mlx5_add_flow_rules(tx->ft.sa, spec, &flow_act, &dest, 1);
        if (IS_ERR(rule)) {
                err = PTR_ERR(rule);
-               netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
-                               sa_entry->attrs.action, err);
-               goto out;
+               mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+               goto err_add_flow;
        }
 
+       kvfree(spec);
        sa_entry->ipsec_rule.rule = rule;
+       sa_entry->ipsec_rule.pkt_reformat = flow_act.pkt_reformat;
+       return 0;
 
-out:
+err_add_flow:
+       if (flow_act.pkt_reformat)
+               mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
        kvfree(spec);
+err_alloc:
+       tx_ft_put(ipsec);
+       return err;
+}
+
+static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+       struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+       struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+       struct mlx5_flow_destination dest[2] = {};
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
+       struct mlx5e_ipsec_tx *tx;
+       int err, dstn = 0;
+
+       tx = tx_ft_get(mdev, pol_entry->ipsec);
+       if (IS_ERR(tx))
+               return PTR_ERR(tx);
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
+
+       if (attrs->family == AF_INET)
+               setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+       else
+               setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+       setup_fte_no_frags(spec);
+
+       err = setup_modify_header(mdev, attrs->reqid, XFRM_DEV_OFFLOAD_OUT,
+                                 &flow_act);
        if (err)
-               tx_ft_put(priv);
+               goto err_mod_header;
+
+       switch (attrs->action) {
+       case XFRM_POLICY_ALLOW:
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+               break;
+       case XFRM_POLICY_BLOCK:
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+                                  MLX5_FLOW_CONTEXT_ACTION_COUNT;
+               dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+               dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+               dstn++;
+               break;
+       default:
+               WARN_ON(true);
+               err = -EINVAL;
+               goto err_action;
+       }
+
+       flow_act.flags |= FLOW_ACT_NO_APPEND;
+       dest[dstn].ft = tx->ft.sa;
+       dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dstn++;
+       rule = mlx5_add_flow_rules(tx->ft.pol, spec, &flow_act, dest, dstn);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+               goto err_action;
+       }
+
+       kvfree(spec);
+       pol_entry->ipsec_rule.rule = rule;
+       pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+       return 0;
+
+err_action:
+       mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
+       kvfree(spec);
+err_alloc:
+       tx_ft_put(pol_entry->ipsec);
        return err;
 }
 
-int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
-                                 struct mlx5e_ipsec_sa_entry *sa_entry)
+static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
-       if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT)
-               return tx_add_rule(priv, sa_entry);
+       struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+       struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+       struct mlx5_flow_destination dest[2];
+       struct mlx5_flow_act flow_act = {};
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
+       struct mlx5e_ipsec_rx *rx;
+       int err, dstn = 0;
+
+       rx = rx_ft_get(mdev, pol_entry->ipsec, attrs->family);
+       if (IS_ERR(rx))
+               return PTR_ERR(rx);
+
+       spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+       if (!spec) {
+               err = -ENOMEM;
+               goto err_alloc;
+       }
 
-       return rx_add_rule(priv, sa_entry);
+       if (attrs->family == AF_INET)
+               setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+       else
+               setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+       setup_fte_no_frags(spec);
+
+       switch (attrs->action) {
+       case XFRM_POLICY_ALLOW:
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+               break;
+       case XFRM_POLICY_BLOCK:
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+               dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+               dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+               dstn++;
+               break;
+       default:
+               WARN_ON(true);
+               err = -EINVAL;
+               goto err_action;
+       }
+
+       flow_act.flags |= FLOW_ACT_NO_APPEND;
+       dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+       dest[dstn].ft = rx->ft.sa;
+       dstn++;
+       rule = mlx5_add_flow_rules(rx->ft.pol, spec, &flow_act, dest, dstn);
+       if (IS_ERR(rule)) {
+               err = PTR_ERR(rule);
+               mlx5_core_err(mdev, "Fail to add RX IPsec policy rule err=%d\n", err);
+               goto err_action;
+       }
+
+       kvfree(spec);
+       pol_entry->ipsec_rule.rule = rule;
+       return 0;
+
+err_action:
+       kvfree(spec);
+err_alloc:
+       rx_ft_put(mdev, pol_entry->ipsec, attrs->family);
+       return err;
 }
 
-void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
-                                  struct mlx5e_ipsec_sa_entry *sa_entry)
+static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
+{
+       struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+       mlx5_fc_destroy(mdev, tx->fc->drop);
+       mlx5_fc_destroy(mdev, tx->fc->cnt);
+       kfree(tx->fc);
+       mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+       mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+       kfree(rx_ipv4->fc);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+       struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+       struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5e_ipsec_tx *tx = ipsec->tx;
+       struct mlx5e_ipsec_fc *fc;
+       struct mlx5_fc *counter;
+       int err;
+
+       fc = kzalloc(sizeof(*rx_ipv4->fc), GFP_KERNEL);
+       if (!fc)
+               return -ENOMEM;
+
+       /* Both IPv4 and IPv6 point to same flow counters struct. */
+       rx_ipv4->fc = fc;
+       rx_ipv6->fc = fc;
+       counter = mlx5_fc_create(mdev, false);
+       if (IS_ERR(counter)) {
+               err = PTR_ERR(counter);
+               goto err_rx_cnt;
+       }
+
+       fc->cnt = counter;
+       counter = mlx5_fc_create(mdev, false);
+       if (IS_ERR(counter)) {
+               err = PTR_ERR(counter);
+               goto err_rx_drop;
+       }
+
+       fc->drop = counter;
+       fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
+       if (!fc) {
+               err = -ENOMEM;
+               goto err_tx_fc;
+       }
+
+       tx->fc = fc;
+       counter = mlx5_fc_create(mdev, false);
+       if (IS_ERR(counter)) {
+               err = PTR_ERR(counter);
+               goto err_tx_cnt;
+       }
+
+       fc->cnt = counter;
+       counter = mlx5_fc_create(mdev, false);
+       if (IS_ERR(counter)) {
+               err = PTR_ERR(counter);
+               goto err_tx_drop;
+       }
+
+       fc->drop = counter;
+       return 0;
+
+err_tx_drop:
+       mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_cnt:
+       kfree(tx->fc);
+err_tx_fc:
+       mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+err_rx_drop:
+       mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+err_rx_cnt:
+       kfree(rx_ipv4->fc);
+       return err;
+}
+
+void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void *ipsec_stats)
+{
+       struct mlx5_core_dev *mdev = priv->mdev;
+       struct mlx5e_ipsec *ipsec = priv->ipsec;
+       struct mlx5e_ipsec_hw_stats *stats;
+       struct mlx5e_ipsec_fc *fc;
+
+       stats = (struct mlx5e_ipsec_hw_stats *)ipsec_stats;
+
+       stats->ipsec_rx_pkts = 0;
+       stats->ipsec_rx_bytes = 0;
+       stats->ipsec_rx_drop_pkts = 0;
+       stats->ipsec_rx_drop_bytes = 0;
+       stats->ipsec_tx_pkts = 0;
+       stats->ipsec_tx_bytes = 0;
+       stats->ipsec_tx_drop_pkts = 0;
+       stats->ipsec_tx_drop_bytes = 0;
+
+       fc = ipsec->rx_ipv4->fc;
+       mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_rx_pkts, &stats->ipsec_rx_bytes);
+       mlx5_fc_query(mdev, fc->drop, &stats->ipsec_rx_drop_pkts,
+                     &stats->ipsec_rx_drop_bytes);
+
+       fc = ipsec->tx->fc;
+       mlx5_fc_query(mdev, fc->cnt, &stats->ipsec_tx_pkts, &stats->ipsec_tx_bytes);
+       mlx5_fc_query(mdev, fc->drop, &stats->ipsec_tx_drop_pkts,
+                     &stats->ipsec_tx_drop_bytes);
+}
+
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+       if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+               return tx_add_rule(sa_entry);
+
+       return rx_add_rule(sa_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
        struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
        struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
 
        mlx5_del_flow_rules(ipsec_rule->rule);
 
-       if (sa_entry->attrs.action == MLX5_ACCEL_ESP_ACTION_ENCRYPT) {
-               tx_ft_put(priv);
+       if (ipsec_rule->pkt_reformat)
+               mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
+
+       if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT) {
+               tx_ft_put(sa_entry->ipsec);
                return;
        }
 
-       mlx5_modify_header_dealloc(mdev, ipsec_rule->set_modify_hdr);
-       rx_ft_put(priv,
-                 sa_entry->attrs.is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
+       mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+       rx_ft_put(mdev, sa_entry->ipsec, sa_entry->attrs.family);
 }
 
-void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
 {
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
-       struct mlx5e_accel_fs_esp *accel_esp;
-       enum accel_fs_esp_type i;
+       if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_OUT)
+               return tx_add_policy(pol_entry);
 
-       if (!ipsec->rx_fs)
-               return;
+       return rx_add_policy(pol_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+       struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
+       struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
 
-       mutex_destroy(&ipsec->tx_fs->mutex);
-       WARN_ON(ipsec->tx_fs->refcnt);
-       kfree(ipsec->tx_fs);
+       mlx5_del_flow_rules(ipsec_rule->rule);
 
-       accel_esp = ipsec->rx_fs;
-       for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
-               fs_prot = &accel_esp->fs_prot[i];
-               mutex_destroy(&fs_prot->prot_mutex);
-               WARN_ON(fs_prot->refcnt);
+       if (pol_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
+               rx_ft_put(mdev, pol_entry->ipsec, pol_entry->attrs.family);
+               return;
        }
-       kfree(ipsec->rx_fs);
+
+       mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+       tx_ft_put(pol_entry->ipsec);
+}
+
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
+{
+       if (!ipsec->tx)
+               return;
+
+       ipsec_fs_destroy_counters(ipsec);
+       mutex_destroy(&ipsec->tx->ft.mutex);
+       WARN_ON(ipsec->tx->ft.refcnt);
+       kfree(ipsec->tx);
+
+       mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
+       WARN_ON(ipsec->rx_ipv4->ft.refcnt);
+       kfree(ipsec->rx_ipv4);
+
+       mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
+       WARN_ON(ipsec->rx_ipv6->ft.refcnt);
+       kfree(ipsec->rx_ipv6);
 }
 
 int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
 {
-       struct mlx5e_accel_fs_esp_prot *fs_prot;
-       struct mlx5e_accel_fs_esp *accel_esp;
        struct mlx5_flow_namespace *ns;
-       enum accel_fs_esp_type i;
        int err = -ENOMEM;
 
        ns = mlx5_get_flow_namespace(ipsec->mdev,
@@ -581,26 +1032,34 @@ int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
        if (!ns)
                return -EOPNOTSUPP;
 
-       ipsec->tx_fs = kzalloc(sizeof(*ipsec->tx_fs), GFP_KERNEL);
-       if (!ipsec->tx_fs)
+       ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
+       if (!ipsec->tx)
                return -ENOMEM;
 
-       ipsec->rx_fs = kzalloc(sizeof(*ipsec->rx_fs), GFP_KERNEL);
-       if (!ipsec->rx_fs)
-               goto err_rx;
+       ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
+       if (!ipsec->rx_ipv4)
+               goto err_rx_ipv4;
 
-       mutex_init(&ipsec->tx_fs->mutex);
-       ipsec->tx_fs->ns = ns;
+       ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
+       if (!ipsec->rx_ipv6)
+               goto err_rx_ipv6;
 
-       accel_esp = ipsec->rx_fs;
-       for (i = 0; i < ACCEL_FS_ESP_NUM_TYPES; i++) {
-               fs_prot = &accel_esp->fs_prot[i];
-               mutex_init(&fs_prot->prot_mutex);
-       }
+       err = ipsec_fs_init_counters(ipsec);
+       if (err)
+               goto err_counters;
+
+       mutex_init(&ipsec->tx->ft.mutex);
+       mutex_init(&ipsec->rx_ipv4->ft.mutex);
+       mutex_init(&ipsec->rx_ipv6->ft.mutex);
+       ipsec->tx->ns = ns;
 
        return 0;
 
-err_rx:
-       kfree(ipsec->tx_fs);
+err_counters:
+       kfree(ipsec->rx_ipv6);
+err_rx_ipv6:
+       kfree(ipsec->rx_ipv4);
+err_rx_ipv4:
+       kfree(ipsec->tx);
        return err;
 }
index 792724c..8e36142 100644 (file)
@@ -2,9 +2,14 @@
 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */
 
 #include "mlx5_core.h"
+#include "en.h"
 #include "ipsec.h"
 #include "lib/mlx5.h"
 
+enum {
+       MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET,
+};
+
 u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
 {
        u32 caps = 0;
@@ -31,6 +36,12 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
            MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
                caps |= MLX5_IPSEC_CAP_CRYPTO;
 
+       if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) &&
+           MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) &&
+           MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) &&
+           MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+               caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
        if (!caps)
                return 0;
 
@@ -46,6 +57,52 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
 }
 EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
 
+static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+                                    struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+       void *aso_ctx;
+
+       aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
+       if (attrs->esn_trigger) {
+               MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
+
+               if (attrs->dir == XFRM_DEV_OFFLOAD_IN) {
+                       MLX5_SET(ipsec_aso, aso_ctx, window_sz,
+                                attrs->replay_window / 64);
+                       MLX5_SET(ipsec_aso, aso_ctx, mode,
+                                MLX5_IPSEC_ASO_REPLAY_PROTECTION);
+                       }
+       }
+
+       /* ASO context */
+       MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
+       MLX5_SET(ipsec_obj, obj, full_offload, 1);
+       MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
+       /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
+        * in flow steering to perform matching against. Please be
+        * aware that this register was chosen arbitrary and can't
+        * be used in other places as long as IPsec packet offload
+        * active.
+        */
+       MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
+       if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+               MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+
+       if (attrs->hard_packet_limit != XFRM_INF) {
+               MLX5_SET(ipsec_aso, aso_ctx, remove_flow_pkt_cnt,
+                        lower_32_bits(attrs->hard_packet_limit));
+               MLX5_SET(ipsec_aso, aso_ctx, hard_lft_arm, 1);
+               MLX5_SET(ipsec_aso, aso_ctx, remove_flow_enable, 1);
+       }
+
+       if (attrs->soft_packet_limit != XFRM_INF) {
+               MLX5_SET(ipsec_aso, aso_ctx, remove_flow_soft_lft,
+                        lower_32_bits(attrs->soft_packet_limit));
+
+               MLX5_SET(ipsec_aso, aso_ctx, soft_lft_arm, 1);
+       }
+}
+
 static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
 {
        struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
@@ -54,6 +111,7 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
        u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
        u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
        void *obj, *salt_p, *salt_iv_p;
+       struct mlx5e_hw_objs *res;
        int err;
 
        obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
@@ -66,11 +124,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
        salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
        memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
        /* esn */
-       if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED) {
+       if (attrs->esn_trigger) {
                MLX5_SET(ipsec_obj, obj, esn_en, 1);
                MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
-               if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
-                       MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+               MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
        }
 
        MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
@@ -81,6 +138,10 @@ static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
        MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
                 MLX5_GENERAL_OBJECT_TYPES_IPSEC);
 
+       res = &mdev->mlx5e_res.hw_objs;
+       if (attrs->type == XFRM_DEV_OFFLOAD_PACKET)
+               mlx5e_ipsec_packet_setup(obj, res->pdn, attrs);
+
        err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
        if (!err)
                sa_entry->ipsec_obj_id =
@@ -152,7 +213,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
        void *obj;
        int err;
 
-       if (!(attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_TRIGGERED))
+       if (!attrs->esn_trigger)
                return 0;
 
        general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
@@ -183,8 +244,7 @@ static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
                   MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
                           MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
        MLX5_SET(ipsec_obj, obj, esn_msb, attrs->esn);
-       if (attrs->flags & MLX5_ACCEL_ESP_FLAGS_ESN_STATE_OVERLAP)
-               MLX5_SET(ipsec_obj, obj, esn_overlap, 1);
+       MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->esn_overlap);
 
        /* general object fields set */
        MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
@@ -203,3 +263,234 @@ void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
 
        memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
 }
+
+static void
+mlx5e_ipsec_aso_update_esn(struct mlx5e_ipsec_sa_entry *sa_entry,
+                          const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+       struct mlx5_wqe_aso_ctrl_seg data = {};
+
+       data.data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
+       data.condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE
+                                                                   << 4;
+       data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+       data.bitwise_data = cpu_to_be64(BIT_ULL(54));
+       data.data_mask = data.bitwise_data;
+
+       mlx5e_ipsec_aso_query(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                        u32 mode_param)
+{
+       struct mlx5_accel_esp_xfrm_attrs attrs = {};
+
+       if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
+               sa_entry->esn_state.esn++;
+               sa_entry->esn_state.overlap = 0;
+       } else {
+               sa_entry->esn_state.overlap = 1;
+       }
+
+       mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
+       mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
+       mlx5e_ipsec_aso_update_esn(sa_entry, &attrs);
+}
+
+static void mlx5e_ipsec_handle_event(struct work_struct *_work)
+{
+       struct mlx5e_ipsec_work *work =
+               container_of(_work, struct mlx5e_ipsec_work, work);
+       struct mlx5_accel_esp_xfrm_attrs *attrs;
+       struct mlx5e_ipsec_sa_entry *sa_entry;
+       struct mlx5e_ipsec_aso *aso;
+       struct mlx5e_ipsec *ipsec;
+       int ret;
+
+       sa_entry = xa_load(&work->ipsec->sadb, work->id);
+       if (!sa_entry)
+               goto out;
+
+       ipsec = sa_entry->ipsec;
+       aso = ipsec->aso;
+       attrs = &sa_entry->attrs;
+
+       spin_lock(&sa_entry->x->lock);
+       ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
+       if (ret)
+               goto unlock;
+
+       aso->use_cache = true;
+       if (attrs->esn_trigger &&
+           !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
+               u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
+
+               mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
+       }
+
+       if (attrs->soft_packet_limit != XFRM_INF)
+               if (!MLX5_GET(ipsec_aso, aso->ctx, soft_lft_arm) ||
+                   !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
+                   !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
+                       xfrm_state_check_expire(sa_entry->x);
+       aso->use_cache = false;
+
+unlock:
+       spin_unlock(&sa_entry->x->lock);
+out:
+       kfree(work);
+}
+
+static int mlx5e_ipsec_event(struct notifier_block *nb, unsigned long event,
+                            void *data)
+{
+       struct mlx5e_ipsec *ipsec = container_of(nb, struct mlx5e_ipsec, nb);
+       struct mlx5_eqe_obj_change *object;
+       struct mlx5e_ipsec_work *work;
+       struct mlx5_eqe *eqe = data;
+       u16 type;
+
+       if (event != MLX5_EVENT_TYPE_OBJECT_CHANGE)
+               return NOTIFY_DONE;
+
+       object = &eqe->data.obj_change;
+       type = be16_to_cpu(object->obj_type);
+
+       if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
+               return NOTIFY_DONE;
+
+       work = kmalloc(sizeof(*work), GFP_ATOMIC);
+       if (!work)
+               return NOTIFY_DONE;
+
+       INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
+       work->ipsec = ipsec;
+       work->id = be32_to_cpu(object->obj_id);
+
+       queue_work(ipsec->wq, &work->work);
+       return NOTIFY_OK;
+}
+
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
+{
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5e_ipsec_aso *aso;
+       struct mlx5e_hw_objs *res;
+       struct device *pdev;
+       int err;
+
+       aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
+       if (!aso)
+               return -ENOMEM;
+
+       res = &mdev->mlx5e_res.hw_objs;
+
+       pdev = mlx5_core_dma_dev(mdev);
+       aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx),
+                                      DMA_BIDIRECTIONAL);
+       err = dma_mapping_error(pdev, aso->dma_addr);
+       if (err)
+               goto err_dma;
+
+       aso->aso = mlx5_aso_create(mdev, res->pdn);
+       if (IS_ERR(aso->aso)) {
+               err = PTR_ERR(aso->aso);
+               goto err_aso_create;
+       }
+
+       ipsec->nb.notifier_call = mlx5e_ipsec_event;
+       mlx5_notifier_register(mdev, &ipsec->nb);
+
+       ipsec->aso = aso;
+       return 0;
+
+err_aso_create:
+       dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+                        DMA_BIDIRECTIONAL);
+err_dma:
+       kfree(aso);
+       return err;
+}
+
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
+{
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5e_ipsec_aso *aso;
+       struct device *pdev;
+
+       aso = ipsec->aso;
+       pdev = mlx5_core_dma_dev(mdev);
+
+       mlx5_notifier_unregister(mdev, &ipsec->nb);
+       mlx5_aso_destroy(aso->aso);
+       dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx),
+                        DMA_BIDIRECTIONAL);
+       kfree(aso);
+}
+
+static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
+                                struct mlx5_wqe_aso_ctrl_seg *data)
+{
+       if (!data)
+               return;
+
+       ctrl->data_mask_mode = data->data_mask_mode;
+       ctrl->condition_1_0_operand = data->condition_1_0_operand;
+       ctrl->condition_1_0_offset = data->condition_1_0_offset;
+       ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
+       ctrl->condition_0_data = data->condition_0_data;
+       ctrl->condition_0_mask = data->condition_0_mask;
+       ctrl->condition_1_data = data->condition_1_data;
+       ctrl->condition_1_mask = data->condition_1_mask;
+       ctrl->bitwise_data = data->bitwise_data;
+       ctrl->data_mask = data->data_mask;
+}
+
+int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+                         struct mlx5_wqe_aso_ctrl_seg *data)
+{
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5e_ipsec_aso *aso = ipsec->aso;
+       struct mlx5_core_dev *mdev = ipsec->mdev;
+       struct mlx5_wqe_aso_ctrl_seg *ctrl;
+       struct mlx5e_hw_objs *res;
+       struct mlx5_aso_wqe *wqe;
+       u8 ds_cnt;
+
+       lockdep_assert_held(&sa_entry->x->lock);
+       if (aso->use_cache)
+               return 0;
+
+       res = &mdev->mlx5e_res.hw_objs;
+
+       memset(aso->ctx, 0, sizeof(aso->ctx));
+       wqe = mlx5_aso_get_wqe(aso->aso);
+       ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+       mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
+                          MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
+
+       ctrl = &wqe->aso_ctrl;
+       ctrl->va_l =
+               cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
+       ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
+       ctrl->l_key = cpu_to_be32(res->mkey);
+       mlx5e_ipsec_aso_copy(ctrl, data);
+
+       mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
+       return mlx5_aso_poll_cq(aso->aso, false);
+}
+
+void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
+                                  u64 *packets)
+{
+       struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+       struct mlx5e_ipsec_aso *aso = ipsec->aso;
+       u64 hard_cnt;
+
+       hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
+       /* HW decresases the limit till it reaches zero to fire an avent.
+        * We need to fix the calculations, so the returned count is a total
+        * number of passed packets and not how much left.
+        */
+       *packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
+}
index 6859f1c..eab5bc7 100644 (file)
@@ -312,27 +312,31 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
                                       struct mlx5_cqe64 *cqe)
 {
        u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
-       struct mlx5e_priv *priv;
+       struct mlx5e_priv *priv = netdev_priv(netdev);
+       struct mlx5e_ipsec *ipsec = priv->ipsec;
+       struct mlx5e_ipsec_sa_entry *sa_entry;
        struct xfrm_offload *xo;
-       struct xfrm_state *xs;
        struct sec_path *sp;
        u32  sa_handle;
 
        sa_handle = MLX5_IPSEC_METADATA_HANDLE(ipsec_meta_data);
-       priv = netdev_priv(netdev);
        sp = secpath_set(skb);
        if (unlikely(!sp)) {
-               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
+               atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sp_alloc);
                return;
        }
 
-       xs = mlx5e_ipsec_sadb_rx_lookup(priv->ipsec, sa_handle);
-       if (unlikely(!xs)) {
-               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
+       rcu_read_lock();
+       sa_entry = xa_load(&ipsec->sadb, sa_handle);
+       if (unlikely(!sa_entry)) {
+               rcu_read_unlock();
+               atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_sadb_miss);
                return;
        }
+       xfrm_state_hold(sa_entry->x);
+       rcu_read_unlock();
 
-       sp->xvec[sp->len++] = xs;
+       sp->xvec[sp->len++] = sa_entry->x;
        sp->olen++;
 
        xo = xfrm_offload(skb);
@@ -349,6 +353,6 @@ void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
                xo->status = CRYPTO_INVALID_PACKET_SYNTAX;
                break;
        default:
-               atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_syndrome);
+               atomic64_inc(&ipsec->sw_stats.ipsec_rx_drop_syndrome);
        }
 }
index 9de8482..e0e36a0 100644 (file)
 #include "en.h"
 #include "ipsec.h"
 
+static const struct counter_desc mlx5e_ipsec_hw_stats_desc[] = {
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_rx_drop_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_pkts) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_hw_stats, ipsec_tx_drop_bytes) },
+};
+
 static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sp_alloc) },
        { MLX5E_DECLARE_STAT(struct mlx5e_ipsec_sw_stats, ipsec_rx_drop_sadb_miss) },
@@ -50,8 +61,48 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = {
 #define MLX5E_READ_CTR_ATOMIC64(ptr, dsc, i) \
        atomic64_read((atomic64_t *)((char *)(ptr) + (dsc)[i].offset))
 
+#define NUM_IPSEC_HW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_hw_stats_desc)
 #define NUM_IPSEC_SW_COUNTERS ARRAY_SIZE(mlx5e_ipsec_sw_stats_desc)
 
+static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw)
+{
+       if (!priv->ipsec)
+               return 0;
+
+       return NUM_IPSEC_HW_COUNTERS;
+}
+
+static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) {}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
+{
+       unsigned int i;
+
+       if (!priv->ipsec)
+               return idx;
+
+       for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+               strcpy(data + (idx++) * ETH_GSTRING_LEN,
+                      mlx5e_ipsec_hw_stats_desc[i].format);
+
+       return idx;
+}
+
+static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
+{
+       int i;
+
+       if (!priv->ipsec)
+               return idx;
+
+       mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
+       for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
+               data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+                                                     mlx5e_ipsec_hw_stats_desc, i);
+
+       return idx;
+}
+
 static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
 {
        return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0;
@@ -81,4 +132,5 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
        return idx;
 }
 
+MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
 MLX5E_DEFINE_STATS_GRP(ipsec_sw, 0);
index 70c4ea3..6687b81 100644 (file)
@@ -2480,6 +2480,7 @@ mlx5e_stats_grp_t mlx5e_nic_stats_grps[] = {
        &MLX5E_STATS_GRP(per_prio),
        &MLX5E_STATS_GRP(pme),
 #ifdef CONFIG_MLX5_EN_IPSEC
+       &MLX5E_STATS_GRP(ipsec_hw),
        &MLX5E_STATS_GRP(ipsec_sw),
 #endif
        &MLX5E_STATS_GRP(tls),
index 37df58b..375752d 100644 (file)
@@ -506,6 +506,7 @@ extern MLX5E_DECLARE_STATS_GRP(per_prio);
 extern MLX5E_DECLARE_STATS_GRP(pme);
 extern MLX5E_DECLARE_STATS_GRP(channels);
 extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
+extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
 extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
 extern MLX5E_DECLARE_STATS_GRP(ptp);
 extern MLX5E_DECLARE_STATS_GRP(macsec_hw);
index a0242dc..8f7580f 100644 (file)
@@ -19,6 +19,7 @@
 #include "diag/fw_tracer.h"
 #include "mlx5_irq.h"
 #include "devlink.h"
+#include "en_accel/ipsec.h"
 
 enum {
        MLX5_EQE_OWNER_INIT_VAL = 0x1,
@@ -578,6 +579,10 @@ static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4])
        if (MLX5_CAP_MACSEC(dev, log_max_macsec_offload))
                async_event_mask |= (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
 
+       if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+               async_event_mask |=
+                       (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
        mask[0] = async_event_mask;
 
        if (MLX5_CAP_GEN(dev, event_cap))
index 4dcd26b..5a85d8c 100644 (file)
 #define ETHTOOL_PRIO_NUM_LEVELS 1
 #define ETHTOOL_NUM_PRIOS 11
 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
-/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}} */
-#define KERNEL_NIC_PRIO_NUM_LEVELS 7
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 8
 #define KERNEL_NIC_NUM_PRIOS 1
 /* One more level for tc */
 #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
 #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
 
 #define KERNEL_TX_IPSEC_NUM_PRIOS  1
-#define KERNEL_TX_IPSEC_NUM_LEVELS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 2
 #define KERNEL_TX_IPSEC_MIN_LEVEL        (KERNEL_TX_IPSEC_NUM_LEVELS)
 
 #define KERNEL_TX_MACSEC_NUM_PRIOS  1
index 0f9e4f0..5a80fb7 100644 (file)
@@ -353,12 +353,15 @@ void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
        cseg->general_id = cpu_to_be32(obj_id);
 }
 
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
 {
+       struct mlx5_aso_wqe *wqe;
        u16 pi;
 
        pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
-       return mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+       wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+       memset(wqe, 0, sizeof(*wqe));
+       return wqe;
 }
 
 void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
index 2d40dcf..afb078b 100644 (file)
@@ -15,6 +15,7 @@
 #define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
 #define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
 
+#define ASO_CTRL_READ_EN BIT(0)
 struct mlx5_wqe_aso_ctrl_seg {
        __be32  va_h;
        __be32  va_l; /* include read_enable */
@@ -71,13 +72,14 @@ enum {
 };
 
 enum {
+       MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
        MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
        MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
 };
 
 struct mlx5_aso;
 
-void *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
 void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
                        struct mlx5_aso_wqe *aso_wqe,
                        u32 obj_id, u32 opc_mode);
index 3728870..4632268 100644 (file)
@@ -302,6 +302,11 @@ static int nfp_net_xfrm_add_state(struct xfrm_state *x)
                return -EINVAL;
        }
 
+       if (x->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+               nn_err(nn, "Unsupported xfrm offload tyoe\n");
+               return -EINVAL;
+       }
+
        cfg->spi = ntohl(x->id.spi);
 
        /* Hash/Authentication */
index 386336a..b93baf5 100644 (file)
@@ -149,6 +149,11 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
                return -EINVAL;
        }
 
+       if (xs->xso.type != XFRM_DEV_OFFLOAD_CRYPTO) {
+               netdev_err(dev, "Unsupported ipsec offload type\n");
+               return -EINVAL;
+       }
+
        /* find the first unused index */
        ret = nsim_ipsec_find_empty_idx(ipsec);
        if (ret < 0) {
index 294cfe1..152d2d7 100644 (file)
@@ -446,7 +446,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
        u8         max_modify_header_actions[0x8];
        u8         max_ft_level[0x8];
 
-       u8         reserved_at_40[0x6];
+       u8         reformat_add_esp_trasport[0x1];
+       u8         reserved_at_41[0x2];
+       u8         reformat_del_esp_trasport[0x1];
+       u8         reserved_at_44[0x2];
        u8         execute_aso[0x1];
        u8         reserved_at_47[0x19];
 
@@ -639,8 +642,10 @@ struct mlx5_ifc_fte_match_set_misc2_bits {
        u8         reserved_at_1a0[0x8];
 
        u8         macsec_syndrome[0x8];
+       u8         ipsec_syndrome[0x8];
+       u8         reserved_at_1b8[0x8];
 
-       u8         reserved_at_1b0[0x50];
+       u8         reserved_at_1c0[0x40];
 };
 
 struct mlx5_ifc_fte_match_set_misc3_bits {
@@ -6452,6 +6457,9 @@ enum mlx5_reformat_ctx_type {
        MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
        MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
        MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+       MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+       MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+       MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
        MLX5_REFORMAT_TYPE_INSERT_HDR = 0xf,
        MLX5_REFORMAT_TYPE_REMOVE_HDR = 0x10,
        MLX5_REFORMAT_TYPE_ADD_MACSEC = 0x11,
@@ -11631,6 +11639,41 @@ enum {
        MLX5_IPSEC_OBJECT_ICV_LEN_16B,
 };
 
+enum {
+       MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+       MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+       MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+       MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+       MLX5_IPSEC_ASO_MODE              = 0x0,
+       MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+       MLX5_IPSEC_ASO_INC_SN            = 0x2,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+       u8         valid[0x1];
+       u8         reserved_at_201[0x1];
+       u8         mode[0x2];
+       u8         window_sz[0x2];
+       u8         soft_lft_arm[0x1];
+       u8         hard_lft_arm[0x1];
+       u8         remove_flow_enable[0x1];
+       u8         esn_event_arm[0x1];
+       u8         reserved_at_20a[0x16];
+
+       u8         remove_flow_pkt_cnt[0x20];
+
+       u8         remove_flow_soft_lft[0x20];
+
+       u8         reserved_at_260[0x80];
+
+       u8         mode_parameter[0x20];
+
+       u8         replay_protection_window[0x100];
+};
+
 struct mlx5_ifc_ipsec_obj_bits {
        u8         modify_field_select[0x40];
        u8         full_offload[0x1];
@@ -11652,7 +11695,11 @@ struct mlx5_ifc_ipsec_obj_bits {
 
        u8         implicit_iv[0x40];
 
-       u8         reserved_at_100[0x700];
+       u8         reserved_at_100[0x8];
+       u8         ipsec_aso_access_pd[0x18];
+       u8         reserved_at_120[0xe0];
+
+       struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
 };
 
 struct mlx5_ifc_create_ipsec_obj_in_bits {
index f78db61..2287cb8 100644 (file)
@@ -1041,6 +1041,10 @@ struct xfrmdev_ops {
        bool    (*xdo_dev_offload_ok) (struct sk_buff *skb,
                                       struct xfrm_state *x);
        void    (*xdo_dev_state_advance_esn) (struct xfrm_state *x);
+       void    (*xdo_dev_state_update_curlft) (struct xfrm_state *x);
+       int     (*xdo_dev_policy_add) (struct xfrm_policy *x);
+       void    (*xdo_dev_policy_delete) (struct xfrm_policy *x);
+       void    (*xdo_dev_policy_free) (struct xfrm_policy *x);
 };
 #endif
 
index e0cc679..5413cdd 100644 (file)
@@ -129,6 +129,13 @@ struct xfrm_state_walk {
 enum {
        XFRM_DEV_OFFLOAD_IN = 1,
        XFRM_DEV_OFFLOAD_OUT,
+       XFRM_DEV_OFFLOAD_FWD,
+};
+
+enum {
+       XFRM_DEV_OFFLOAD_UNSPECIFIED,
+       XFRM_DEV_OFFLOAD_CRYPTO,
+       XFRM_DEV_OFFLOAD_PACKET,
 };
 
 struct xfrm_dev_offload {
@@ -137,6 +144,7 @@ struct xfrm_dev_offload {
        struct net_device       *real_dev;
        unsigned long           offload_handle;
        u8                      dir : 2;
+       u8                      type : 2;
 };
 
 struct xfrm_mode {
@@ -534,6 +542,8 @@ struct xfrm_policy {
        struct xfrm_tmpl        xfrm_vec[XFRM_MAX_DEPTH];
        struct hlist_node       bydst_inexact_list;
        struct rcu_head         rcu;
+
+       struct xfrm_dev_offload xdo;
 };
 
 static inline struct net *xp_net(const struct xfrm_policy *xp)
@@ -1093,6 +1103,29 @@ xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, un
 }
 
 #ifdef CONFIG_XFRM
+static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
+{
+       struct sec_path *sp = skb_sec_path(skb);
+
+       return sp->xvec[sp->len - 1];
+}
+#endif
+
+static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
+{
+#ifdef CONFIG_XFRM
+       struct sec_path *sp = skb_sec_path(skb);
+
+       if (!sp || !sp->olen || sp->len != sp->olen)
+               return NULL;
+
+       return &sp->ovec[sp->olen - 1];
+#else
+       return NULL;
+#endif
+}
+
+#ifdef CONFIG_XFRM
 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
                        unsigned short family);
 
@@ -1123,10 +1156,19 @@ static inline int __xfrm_policy_check2(struct sock *sk, int dir,
 {
        struct net *net = dev_net(skb->dev);
        int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
+       struct xfrm_offload *xo = xfrm_offload(skb);
+       struct xfrm_state *x;
 
        if (sk && sk->sk_policy[XFRM_POLICY_IN])
                return __xfrm_policy_check(sk, ndir, skb, family);
 
+       if (xo) {
+               x = xfrm_input_state(skb);
+               if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+                       return (xo->flags & CRYPTO_DONE) &&
+                              (xo->status & CRYPTO_SUCCESS);
+       }
+
        return __xfrm_check_nopolicy(net, skb, dir) ||
               __xfrm_check_dev_nopolicy(skb, dir, family) ||
               __xfrm_policy_check(sk, ndir, skb, family);
@@ -1529,6 +1571,23 @@ struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
                                              unsigned short family);
 int xfrm_state_check_expire(struct xfrm_state *x);
+#ifdef CONFIG_XFRM_OFFLOAD
+static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x)
+{
+       struct xfrm_dev_offload *xdo = &x->xso;
+       struct net_device *dev = xdo->dev;
+
+       if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+               return;
+
+       if (dev && dev->xfrmdev_ops &&
+           dev->xfrmdev_ops->xdo_dev_state_update_curlft)
+               dev->xfrmdev_ops->xdo_dev_state_update_curlft(x);
+
+}
+#else
+static inline void xfrm_dev_state_update_curlft(struct xfrm_state *x) {}
+#endif
 void xfrm_state_insert(struct xfrm_state *x);
 int xfrm_state_add(struct xfrm_state *x);
 int xfrm_state_update(struct xfrm_state *x);
@@ -1578,6 +1637,8 @@ struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
 int xfrm_state_delete(struct xfrm_state *x);
 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
+int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
+                         bool task_valid);
 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
@@ -1860,29 +1921,6 @@ static inline void xfrm_states_delete(struct xfrm_state **states, int n)
 }
 #endif
 
-#ifdef CONFIG_XFRM
-static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
-{
-       struct sec_path *sp = skb_sec_path(skb);
-
-       return sp->xvec[sp->len - 1];
-}
-#endif
-
-static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
-{
-#ifdef CONFIG_XFRM
-       struct sec_path *sp = skb_sec_path(skb);
-
-       if (!sp || !sp->olen || sp->len != sp->olen)
-               return NULL;
-
-       return &sp->ovec[sp->olen - 1];
-#else
-       return NULL;
-#endif
-}
-
 void __init xfrm_dev_init(void);
 
 #ifdef CONFIG_XFRM_OFFLOAD
@@ -1892,6 +1930,9 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                       struct xfrm_user_offload *xuo,
                       struct netlink_ext_ack *extack);
+int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+                       struct xfrm_user_offload *xuo, u8 dir,
+                       struct netlink_ext_ack *extack);
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
 
 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
@@ -1940,6 +1981,28 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
                netdev_put(dev, &xso->dev_tracker);
        }
 }
+
+static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+{
+       struct xfrm_dev_offload *xdo = &x->xdo;
+       struct net_device *dev = xdo->dev;
+
+       if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
+               dev->xfrmdev_ops->xdo_dev_policy_delete(x);
+}
+
+static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
+{
+       struct xfrm_dev_offload *xdo = &x->xdo;
+       struct net_device *dev = xdo->dev;
+
+       if (dev && dev->xfrmdev_ops) {
+               if (dev->xfrmdev_ops->xdo_dev_policy_free)
+                       dev->xfrmdev_ops->xdo_dev_policy_free(x);
+               xdo->dev = NULL;
+               netdev_put(dev, &xdo->dev_tracker);
+       }
+}
 #else
 static inline void xfrm_dev_resume(struct sk_buff *skb)
 {
@@ -1967,6 +2030,21 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
 {
 }
 
+static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+                                     struct xfrm_user_offload *xuo, u8 dir,
+                                     struct netlink_ext_ack *extack)
+{
+       return 0;
+}
+
+static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
+{
+}
+
+static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
+{
+}
+
 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 {
        return false;
index 4f84ea7..23543c3 100644 (file)
@@ -519,6 +519,12 @@ struct xfrm_user_offload {
  */
 #define XFRM_OFFLOAD_IPV6      1
 #define XFRM_OFFLOAD_INBOUND   2
+/* Two bits above are relevant for state path only, while
+ * offload is used for both policy and state flows.
+ *
+ * In policy offload mode, they are free and can be safely reused.
+ */
+#define XFRM_OFFLOAD_PACKET    4
 
 struct xfrm_userpolicy_default {
 #define XFRM_USERPOLICY_UNSPEC 0
index 21269e8..4aff76c 100644 (file)
@@ -132,6 +132,16 @@ struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t featur
        if (xo->flags & XFRM_GRO || x->xso.dir == XFRM_DEV_OFFLOAD_IN)
                return skb;
 
+       /* The packet was sent to HW IPsec packet offload engine,
+        * but to wrong device. Drop the packet, so it won't skip
+        * XFRM stack.
+        */
+       if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET && x->xso.dev != dev) {
+               kfree_skb(skb);
+               dev_core_stats_tx_dropped_inc(dev);
+               return NULL;
+       }
+
        /* This skb was already validated on the upper/virtual dev */
        if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
                return skb;
@@ -229,6 +239,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        struct xfrm_dev_offload *xso = &x->xso;
        xfrm_address_t *saddr;
        xfrm_address_t *daddr;
+       bool is_packet_offload;
 
        if (!x->type_offload) {
                NL_SET_ERR_MSG(extack, "Type doesn't support offload");
@@ -241,11 +252,13 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                return -EINVAL;
        }
 
-       if (xuo->flags & ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND)) {
+       if (xuo->flags &
+           ~(XFRM_OFFLOAD_IPV6 | XFRM_OFFLOAD_INBOUND | XFRM_OFFLOAD_PACKET)) {
                NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
                return -EINVAL;
        }
 
+       is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET;
        dev = dev_get_by_index(net, xuo->ifindex);
        if (!dev) {
                if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
@@ -260,7 +273,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
                                        x->props.family,
                                        xfrm_smark_get(0, x));
                if (IS_ERR(dst))
-                       return 0;
+                       return (is_packet_offload) ? -EINVAL : 0;
 
                dev = dst->dev;
 
@@ -271,7 +284,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
                xso->dev = NULL;
                dev_put(dev);
-               return 0;
+               return (is_packet_offload) ? -EINVAL : 0;
        }
 
        if (x->props.flags & XFRM_STATE_ESN &&
@@ -291,14 +304,28 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
        else
                xso->dir = XFRM_DEV_OFFLOAD_OUT;
 
+       if (is_packet_offload)
+               xso->type = XFRM_DEV_OFFLOAD_PACKET;
+       else
+               xso->type = XFRM_DEV_OFFLOAD_CRYPTO;
+
        err = dev->xfrmdev_ops->xdo_dev_state_add(x);
        if (err) {
                xso->dev = NULL;
                xso->dir = 0;
                xso->real_dev = NULL;
                netdev_put(dev, &xso->dev_tracker);
-
-               if (err != -EOPNOTSUPP) {
+               xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+
+               /* User explicitly requested packet offload mode and configured
+                * policy in addition to the XFRM state. So be civil to users,
+                * and return an error instead of taking fallback path.
+                *
+                * This WARN_ON() can be seen as a documentation for driver
+                * authors to do not return -EOPNOTSUPP in packet offload mode.
+                */
+               WARN_ON(err == -EOPNOTSUPP && is_packet_offload);
+               if (err != -EOPNOTSUPP || is_packet_offload) {
                        NL_SET_ERR_MSG(extack, "Device failed to offload this state");
                        return err;
                }
@@ -308,6 +335,69 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
 }
 EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
 
+int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
+                       struct xfrm_user_offload *xuo, u8 dir,
+                       struct netlink_ext_ack *extack)
+{
+       struct xfrm_dev_offload *xdo = &xp->xdo;
+       struct net_device *dev;
+       int err;
+
+       if (!xuo->flags || xuo->flags & ~XFRM_OFFLOAD_PACKET) {
+               /* We support only packet offload mode and it means
+                * that user must set XFRM_OFFLOAD_PACKET bit.
+                */
+               NL_SET_ERR_MSG(extack, "Unrecognized flags in offload request");
+               return -EINVAL;
+       }
+
+       dev = dev_get_by_index(net, xuo->ifindex);
+       if (!dev)
+               return -EINVAL;
+
+       if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_policy_add) {
+               xdo->dev = NULL;
+               dev_put(dev);
+               NL_SET_ERR_MSG(extack, "Policy offload is not supported");
+               return -EINVAL;
+       }
+
+       xdo->dev = dev;
+       netdev_tracker_alloc(dev, &xdo->dev_tracker, GFP_ATOMIC);
+       xdo->real_dev = dev;
+       xdo->type = XFRM_DEV_OFFLOAD_PACKET;
+       switch (dir) {
+       case XFRM_POLICY_IN:
+               xdo->dir = XFRM_DEV_OFFLOAD_IN;
+               break;
+       case XFRM_POLICY_OUT:
+               xdo->dir = XFRM_DEV_OFFLOAD_OUT;
+               break;
+       case XFRM_POLICY_FWD:
+               xdo->dir = XFRM_DEV_OFFLOAD_FWD;
+               break;
+       default:
+               xdo->dev = NULL;
+               dev_put(dev);
+               NL_SET_ERR_MSG(extack, "Unrecognized offload direction");
+               return -EINVAL;
+       }
+
+       err = dev->xfrmdev_ops->xdo_dev_policy_add(xp);
+       if (err) {
+               xdo->dev = NULL;
+               xdo->real_dev = NULL;
+               xdo->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+               xdo->dir = 0;
+               netdev_put(dev, &xdo->dev_tracker);
+               NL_SET_ERR_MSG(extack, "Device failed to offload this policy");
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(xfrm_dev_policy_add);
+
 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
 {
        int mtu;
@@ -318,8 +408,9 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
        if (!x->type_offload || x->encap)
                return false;
 
-       if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
-           (!xdst->child->xfrm)) {
+       if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET ||
+           ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
+            !xdst->child->xfrm)) {
                mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
                if (skb->len <= mtu)
                        goto ok;
@@ -410,8 +501,10 @@ static int xfrm_api_check(struct net_device *dev)
 
 static int xfrm_dev_down(struct net_device *dev)
 {
-       if (dev->features & NETIF_F_HW_ESP)
+       if (dev->features & NETIF_F_HW_ESP) {
                xfrm_dev_state_flush(dev_net(dev), dev, true);
+               xfrm_dev_policy_flush(dev_net(dev), dev, true);
+       }
 
        return NOTIFY_DONE;
 }
index 78cb8d0..ff114d6 100644 (file)
@@ -492,7 +492,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
        struct xfrm_state *x = dst->xfrm;
        struct net *net = xs_net(x);
 
-       if (err <= 0)
+       if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
                goto resume;
 
        do {
@@ -717,6 +717,16 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
                break;
        }
 
+       if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
+               if (!xfrm_dev_offload_ok(skb, x)) {
+                       XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+                       kfree_skb(skb);
+                       return -EHOSTUNREACH;
+               }
+
+               return xfrm_output_resume(sk, skb, 0);
+       }
+
        secpath_reset(skb);
 
        if (xfrm_dev_offload_ok(skb, x)) {
index 9b9e276..e9eb82c 100644 (file)
@@ -425,6 +425,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
        if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
                BUG();
 
+       xfrm_dev_policy_free(policy);
        call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
 }
 EXPORT_SYMBOL(xfrm_policy_destroy);
@@ -535,7 +536,7 @@ redo:
                __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
                h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
                                pol->family, nhashmask, dbits, sbits);
-               if (!entry0) {
+               if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
                        hlist_del_rcu(&pol->bydst);
                        hlist_add_head_rcu(&pol->bydst, ndsttable + h);
                        h0 = h;
@@ -866,7 +867,7 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net,
                                break;
                }
 
-               if (newpos)
+               if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
                        hlist_add_behind_rcu(&policy->bydst, newpos);
                else
                        hlist_add_head_rcu(&policy->bydst, &n->hhead);
@@ -1347,7 +1348,7 @@ static void xfrm_hash_rebuild(struct work_struct *work)
                        else
                                break;
                }
-               if (newpos)
+               if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
                        hlist_add_behind_rcu(&policy->bydst, newpos);
                else
                        hlist_add_head_rcu(&policy->bydst, chain);
@@ -1524,7 +1525,7 @@ static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
                        break;
        }
 
-       if (newpos)
+       if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
                hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
        else
                hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
@@ -1561,9 +1562,12 @@ static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
                        break;
        }
 
-       if (newpos)
+       if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
                hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
        else
+               /* Packet offload policies enter to the head
+                * to speed-up lookups.
+                */
                hlist_add_head_rcu(&policy->bydst, chain);
 
        return delpol;
@@ -1769,12 +1773,41 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
        }
        return err;
 }
+
+static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
+                                                    struct net_device *dev,
+                                                    bool task_valid)
+{
+       struct xfrm_policy *pol;
+       int err = 0;
+
+       list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+               if (pol->walk.dead ||
+                   xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
+                   pol->xdo.dev != dev)
+                       continue;
+
+               err = security_xfrm_policy_delete(pol->security);
+               if (err) {
+                       xfrm_audit_policy_delete(pol, 0, task_valid);
+                       return err;
+               }
+       }
+       return err;
+}
 #else
 static inline int
 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
 {
        return 0;
 }
+
+static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
+                                                    struct net_device *dev,
+                                                    bool task_valid)
+{
+       return 0;
+}
 #endif
 
 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
@@ -1814,6 +1847,44 @@ out:
 }
 EXPORT_SYMBOL(xfrm_policy_flush);
 
+int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
+                         bool task_valid)
+{
+       int dir, err = 0, cnt = 0;
+       struct xfrm_policy *pol;
+
+       spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+
+       err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
+       if (err)
+               goto out;
+
+again:
+       list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
+               dir = xfrm_policy_id2dir(pol->index);
+               if (pol->walk.dead ||
+                   dir >= XFRM_POLICY_MAX ||
+                   pol->xdo.dev != dev)
+                       continue;
+
+               __xfrm_policy_unlink(pol, dir);
+               spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+               cnt++;
+               xfrm_audit_policy_delete(pol, 1, task_valid);
+               xfrm_policy_kill(pol);
+               spin_lock_bh(&net->xfrm.xfrm_policy_lock);
+               goto again;
+       }
+       if (cnt)
+               __xfrm_policy_inexact_flush(net);
+       else
+               err = -ESRCH;
+out:
+       spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
+       return err;
+}
+EXPORT_SYMBOL(xfrm_dev_policy_flush);
+
 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
                     int (*func)(struct xfrm_policy *, int, int, void*),
                     void *data)
@@ -2113,6 +2184,9 @@ static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
                        break;
                }
        }
+       if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
+               goto skip_inexact;
+
        bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
        if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
                                                         daddr))
@@ -2245,6 +2319,7 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
        pol = __xfrm_policy_unlink(pol, dir);
        spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
        if (pol) {
+               xfrm_dev_policy_delete(pol);
                xfrm_policy_kill(pol);
                return 0;
        }
index 9ec481f..cc1d0ea 100644 (file)
@@ -84,6 +84,25 @@ static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
        return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
 }
 
+#define XFRM_STATE_INSERT(by, _n, _h, _type)                               \
+       {                                                                  \
+               struct xfrm_state *_x = NULL;                              \
+                                                                          \
+               if (_type != XFRM_DEV_OFFLOAD_PACKET) {                    \
+                       hlist_for_each_entry_rcu(_x, _h, by) {             \
+                               if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
+                                       continue;                          \
+                               break;                                     \
+                       }                                                  \
+               }                                                          \
+                                                                          \
+               if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET)        \
+                       /* SAD is empty or consist from HW SAs only */     \
+                       hlist_add_head_rcu(_n, _h);                        \
+               else                                                       \
+                       hlist_add_before_rcu(_n, &_x->by);                 \
+       }
+
 static void xfrm_hash_transfer(struct hlist_head *list,
                               struct hlist_head *ndsttable,
                               struct hlist_head *nsrctable,
@@ -100,23 +119,25 @@ static void xfrm_hash_transfer(struct hlist_head *list,
                h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
                                    x->props.reqid, x->props.family,
                                    nhashmask);
-               hlist_add_head_rcu(&x->bydst, ndsttable + h);
+               XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
 
                h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
                                    x->props.family,
                                    nhashmask);
-               hlist_add_head_rcu(&x->bysrc, nsrctable + h);
+               XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
 
                if (x->id.spi) {
                        h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
                                            x->id.proto, x->props.family,
                                            nhashmask);
-                       hlist_add_head_rcu(&x->byspi, nspitable + h);
+                       XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
+                                         x->xso.type);
                }
 
                if (x->km.seq) {
                        h = __xfrm_seq_hash(x->km.seq, nhashmask);
-                       hlist_add_head_rcu(&x->byseq, nseqtable + h);
+                       XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
+                                         x->xso.type);
                }
        }
 }
@@ -549,6 +570,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
        int err = 0;
 
        spin_lock(&x->lock);
+       xfrm_dev_state_update_curlft(x);
+
        if (x->km.state == XFRM_STATE_DEAD)
                goto out;
        if (x->km.state == XFRM_STATE_EXPIRED)
@@ -951,6 +974,49 @@ xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
        x->props.family = tmpl->encap_family;
 }
 
+static struct xfrm_state *__xfrm_state_lookup_all(struct net *net, u32 mark,
+                                                 const xfrm_address_t *daddr,
+                                                 __be32 spi, u8 proto,
+                                                 unsigned short family,
+                                                 struct xfrm_dev_offload *xdo)
+{
+       unsigned int h = xfrm_spi_hash(net, daddr, spi, proto, family);
+       struct xfrm_state *x;
+
+       hlist_for_each_entry_rcu(x, net->xfrm.state_byspi + h, byspi) {
+#ifdef CONFIG_XFRM_OFFLOAD
+               if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
+                       if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+                               /* HW states are in the head of list, there is
+                                * no need to iterate further.
+                                */
+                               break;
+
+                       /* Packet offload: both policy and SA should
+                        * have same device.
+                        */
+                       if (xdo->dev != x->xso.dev)
+                               continue;
+               } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+                       /* Skip HW policy for SW lookups */
+                       continue;
+#endif
+               if (x->props.family != family ||
+                   x->id.spi       != spi ||
+                   x->id.proto     != proto ||
+                   !xfrm_addr_equal(&x->id.daddr, daddr, family))
+                       continue;
+
+               if ((mark & x->mark.m) != x->mark.v)
+                       continue;
+               if (!xfrm_state_hold_rcu(x))
+                       continue;
+               return x;
+       }
+
+       return NULL;
+}
+
 static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
                                              const xfrm_address_t *daddr,
                                              __be32 spi, u8 proto,
@@ -1092,6 +1158,23 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
        rcu_read_lock();
        h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
        hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) {
+#ifdef CONFIG_XFRM_OFFLOAD
+               if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+                       if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+                               /* HW states are in the head of list, there is
+                                * no need to iterate further.
+                                */
+                               break;
+
+                       /* Packet offload: both policy and SA should
+                        * have same device.
+                        */
+                       if (pol->xdo.dev != x->xso.dev)
+                               continue;
+               } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+                       /* Skip HW policy for SW lookups */
+                       continue;
+#endif
                if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -1109,6 +1192,23 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
 
        h_wildcard = xfrm_dst_hash(net, daddr, &saddr_wildcard, tmpl->reqid, encap_family);
        hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h_wildcard, bydst) {
+#ifdef CONFIG_XFRM_OFFLOAD
+               if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+                       if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
+                               /* HW states are in the head of list, there is
+                                * no need to iterate further.
+                                */
+                               break;
+
+                       /* Packet offload: both policy and SA should
+                        * have same device.
+                        */
+                       if (pol->xdo.dev != x->xso.dev)
+                               continue;
+               } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+                       /* Skip HW policy for SW lookups */
+                       continue;
+#endif
                if (x->props.family == encap_family &&
                    x->props.reqid == tmpl->reqid &&
                    (mark & x->mark.m) == x->mark.v &&
@@ -1126,8 +1226,10 @@ found:
        x = best;
        if (!x && !error && !acquire_in_progress) {
                if (tmpl->id.spi &&
-                   (x0 = __xfrm_state_lookup(net, mark, daddr, tmpl->id.spi,
-                                             tmpl->id.proto, encap_family)) != NULL) {
+                   (x0 = __xfrm_state_lookup_all(net, mark, daddr,
+                                                 tmpl->id.spi, tmpl->id.proto,
+                                                 encap_family,
+                                                 &pol->xdo)) != NULL) {
                        to_put = x0;
                        error = -EEXIST;
                        goto out;
@@ -1161,21 +1263,53 @@ found:
                        x = NULL;
                        goto out;
                }
-
+#ifdef CONFIG_XFRM_OFFLOAD
+               if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
+                       struct xfrm_dev_offload *xdo = &pol->xdo;
+                       struct xfrm_dev_offload *xso = &x->xso;
+
+                       xso->type = XFRM_DEV_OFFLOAD_PACKET;
+                       xso->dir = xdo->dir;
+                       xso->dev = xdo->dev;
+                       xso->real_dev = xdo->real_dev;
+                       netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
+                                            GFP_ATOMIC);
+                       error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x);
+                       if (error) {
+                               xso->dir = 0;
+                               netdev_put(xso->dev, &xso->dev_tracker);
+                               xso->dev = NULL;
+                               xso->real_dev = NULL;
+                               xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+                               x->km.state = XFRM_STATE_DEAD;
+                               to_put = x;
+                               x = NULL;
+                               goto out;
+                       }
+               }
+#endif
                if (km_query(x, tmpl, pol) == 0) {
                        spin_lock_bh(&net->xfrm.xfrm_state_lock);
                        x->km.state = XFRM_STATE_ACQ;
                        list_add(&x->km.all, &net->xfrm.state_all);
-                       hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
+                       XFRM_STATE_INSERT(bydst, &x->bydst,
+                                         net->xfrm.state_bydst + h,
+                                         x->xso.type);
                        h = xfrm_src_hash(net, daddr, saddr, encap_family);
-                       hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
+                       XFRM_STATE_INSERT(bysrc, &x->bysrc,
+                                         net->xfrm.state_bysrc + h,
+                                         x->xso.type);
                        if (x->id.spi) {
                                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
-                               hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+                               XFRM_STATE_INSERT(byspi, &x->byspi,
+                                                 net->xfrm.state_byspi + h,
+                                                 x->xso.type);
                        }
                        if (x->km.seq) {
                                h = xfrm_seq_hash(net, x->km.seq);
-                               hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+                               XFRM_STATE_INSERT(byseq, &x->byseq,
+                                                 net->xfrm.state_byseq + h,
+                                                 x->xso.type);
                        }
                        x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
                        hrtimer_start(&x->mtimer,
@@ -1185,6 +1319,18 @@ found:
                        xfrm_hash_grow_check(net, x->bydst.next != NULL);
                        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                } else {
+#ifdef CONFIG_XFRM_OFFLOAD
+                       struct xfrm_dev_offload *xso = &x->xso;
+
+                       if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
+                               xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
+                               xso->dir = 0;
+                               netdev_put(xso->dev, &xso->dev_tracker);
+                               xso->dev = NULL;
+                               xso->real_dev = NULL;
+                               xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+                       }
+#endif
                        x->km.state = XFRM_STATE_DEAD;
                        to_put = x;
                        x = NULL;
@@ -1280,22 +1426,26 @@ static void __xfrm_state_insert(struct xfrm_state *x)
 
        h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
                          x->props.reqid, x->props.family);
-       hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
+       XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
+                         x->xso.type);
 
        h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
-       hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
+       XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
+                         x->xso.type);
 
        if (x->id.spi) {
                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
                                  x->props.family);
 
-               hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+               XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
+                                 x->xso.type);
        }
 
        if (x->km.seq) {
                h = xfrm_seq_hash(net, x->km.seq);
 
-               hlist_add_head_rcu(&x->byseq, net->xfrm.state_byseq + h);
+               XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
+                                 x->xso.type);
        }
 
        hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
@@ -1409,9 +1559,11 @@ static struct xfrm_state *__find_acq_core(struct net *net,
                              ktime_set(net->xfrm.sysctl_acq_expires, 0),
                              HRTIMER_MODE_REL_SOFT);
                list_add(&x->km.all, &net->xfrm.state_all);
-               hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
+               XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
+                                 x->xso.type);
                h = xfrm_src_hash(net, daddr, saddr, family);
-               hlist_add_head_rcu(&x->bysrc, net->xfrm.state_bysrc + h);
+               XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
+                                 x->xso.type);
 
                net->xfrm.state_num++;
 
@@ -1786,6 +1938,8 @@ EXPORT_SYMBOL(xfrm_state_update);
 
 int xfrm_state_check_expire(struct xfrm_state *x)
 {
+       xfrm_dev_state_update_curlft(x);
+
        if (!x->curlft.use_time)
                x->curlft.use_time = ktime_get_real_seconds();
 
@@ -2094,7 +2248,8 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
                spin_lock_bh(&net->xfrm.xfrm_state_lock);
                x->id.spi = newspi;
                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
-               hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
+               XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
+                                 x->xso.type);
                spin_unlock_bh(&net->xfrm.xfrm_state_lock);
 
                err = 0;
index 0eb4696..cf5172d 100644 (file)
@@ -956,6 +956,8 @@ static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
        xuo->ifindex = xso->dev->ifindex;
        if (xso->dir == XFRM_DEV_OFFLOAD_IN)
                xuo->flags = XFRM_OFFLOAD_INBOUND;
+       if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
+               xuo->flags |= XFRM_OFFLOAD_PACKET;
 
        return 0;
 }
@@ -1890,6 +1892,15 @@ static struct xfrm_policy *xfrm_policy_construct(struct net *net,
        if (attrs[XFRMA_IF_ID])
                xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
 
+       /* configure the hardware if offload is requested */
+       if (attrs[XFRMA_OFFLOAD_DEV]) {
+               err = xfrm_dev_policy_add(net, xp,
+                                         nla_data(attrs[XFRMA_OFFLOAD_DEV]),
+                                         p->dir, extack);
+               if (err)
+                       goto error;
+       }
+
        return xp;
  error:
        *errp = err;
@@ -1929,6 +1940,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        xfrm_audit_policy_add(xp, err ? 0 : 1, true);
 
        if (err) {
+               xfrm_dev_policy_delete(xp);
                security_xfrm_policy_free(xp->security);
                kfree(xp);
                return err;
@@ -2041,6 +2053,8 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
                err = xfrm_mark_put(skb, &xp->mark);
        if (!err)
                err = xfrm_if_id_put(skb, xp->if_id);
+       if (!err && xp->xdo.dev)
+               err = copy_user_offload(&xp->xdo, skb);
        if (err) {
                nlmsg_cancel(skb, nlh);
                return err;
@@ -3379,6 +3393,8 @@ static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
                err = xfrm_mark_put(skb, &xp->mark);
        if (!err)
                err = xfrm_if_id_put(skb, xp->if_id);
+       if (!err && xp->xdo.dev)
+               err = copy_user_offload(&xp->xdo, skb);
        if (err) {
                nlmsg_cancel(skb, nlh);
                return err;
@@ -3497,6 +3513,8 @@ static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
                err = xfrm_mark_put(skb, &xp->mark);
        if (!err)
                err = xfrm_if_id_put(skb, xp->if_id);
+       if (!err && xp->xdo.dev)
+               err = copy_user_offload(&xp->xdo, skb);
        if (err) {
                nlmsg_cancel(skb, nlh);
                return err;
@@ -3580,6 +3598,8 @@ static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_e
                err = xfrm_mark_put(skb, &xp->mark);
        if (!err)
                err = xfrm_if_id_put(skb, xp->if_id);
+       if (!err && xp->xdo.dev)
+               err = copy_user_offload(&xp->xdo, skb);
        if (err)
                goto out_free_skb;