Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[platform/kernel/linux-starfive.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
index 29c9542..21499a5 100644 (file)
         BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
 
 static int
-nfp_flower_xmit_flow(struct net_device *netdev,
-                    struct nfp_fl_payload *nfp_flow, u8 mtype)
+nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow,
+                    u8 mtype)
 {
        u32 meta_len, key_len, mask_len, act_len, tot_len;
-       struct nfp_repr *priv = netdev_priv(netdev);
        struct sk_buff *skb;
        unsigned char *msg;
 
@@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
        nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
        nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
 
-       skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
+       skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
@@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev,
        nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
        nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
 
-       nfp_ctrl_tx(priv->app->ctrl, skb);
+       nfp_ctrl_tx(app->ctrl, skb);
 
        return 0;
 }
@@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
 
 static int
 nfp_flower_calculate_key_layers(struct nfp_app *app,
+                               struct net_device *netdev,
                                struct nfp_fl_key_ls *ret_key_ls,
                                struct tc_cls_flower_offload *flow,
-                               bool egress,
                                enum nfp_flower_tun_type *tun_type)
 {
        struct flow_dissector_key_basic *mask_basic = NULL;
@@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                        skb_flow_dissector_target(flow->dissector,
                                                  FLOW_DISSECTOR_KEY_ENC_CONTROL,
                                                  flow->key);
-               if (!egress)
-                       return -EOPNOTSUPP;
 
                if (mask_enc_ctl->addr_type != 0xffff ||
                    enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
@@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                default:
                        return -EOPNOTSUPP;
                }
-       } else if (egress) {
-               /* Reject non tunnel matches offloaded to egress repr. */
-               return -EOPNOTSUPP;
+
+               /* Ensure the ingress netdev matches the expected tun type. */
+               if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type))
+                       return -EOPNOTSUPP;
        }
 
        if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -374,7 +372,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
 }
 
 static struct nfp_fl_payload *
-nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
+nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer)
 {
        struct nfp_fl_payload *flow_pay;
 
@@ -398,7 +396,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
 
        flow_pay->nfp_tun_ipv4_addr = 0;
        flow_pay->meta.flags = 0;
-       flow_pay->ingress_offload = !egress;
 
        return flow_pay;
 
@@ -416,7 +413,6 @@ err_free_flow:
  * @app:       Pointer to the APP handle
  * @netdev:    netdev structure.
  * @flow:      TC flower classifier offload structure.
- * @egress:    NFP netdev is the egress.
  *
  * Adds a new flow to the repeated hash structure and action payload.
  *
@@ -424,46 +420,35 @@ err_free_flow:
  */
 static int
 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
-                      struct tc_cls_flower_offload *flow, bool egress)
+                      struct tc_cls_flower_offload *flow)
 {
        enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
-       struct nfp_port *port = nfp_port_from_netdev(netdev);
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *flow_pay;
        struct nfp_fl_key_ls *key_layer;
-       struct net_device *ingr_dev;
+       struct nfp_port *port = NULL;
        int err;
 
-       ingr_dev = egress ? NULL : netdev;
-       flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-                                             NFP_FL_STATS_CTX_DONT_CARE);
-       if (flow_pay) {
-               /* Ignore as duplicate if it has been added by different cb. */
-               if (flow_pay->ingress_offload && egress)
-                       return 0;
-               else
-                       return -EOPNOTSUPP;
-       }
+       if (nfp_netdev_is_nfp_repr(netdev))
+               port = nfp_port_from_netdev(netdev);
 
        key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
        if (!key_layer)
                return -ENOMEM;
 
-       err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
+       err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow,
                                              &tun_type);
        if (err)
                goto err_free_key_ls;
 
-       flow_pay = nfp_flower_allocate_new(key_layer, egress);
+       flow_pay = nfp_flower_allocate_new(key_layer);
        if (!flow_pay) {
                err = -ENOMEM;
                goto err_free_key_ls;
        }
 
-       flow_pay->ingress_dev = egress ? NULL : netdev;
-
-       err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
-                                           tun_type);
+       err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev,
+                                           flow_pay, tun_type);
        if (err)
                goto err_destroy_flow;
 
@@ -471,13 +456,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        if (err)
                goto err_destroy_flow;
 
-       err = nfp_compile_flow_metadata(app, flow, flow_pay,
-                                       flow_pay->ingress_dev);
-       if (err)
-               goto err_destroy_flow;
-
-       err = nfp_flower_xmit_flow(netdev, flow_pay,
-                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+       err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev);
        if (err)
                goto err_destroy_flow;
 
@@ -485,15 +464,27 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
        err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node,
                                     nfp_flower_table_params);
        if (err)
-               goto err_destroy_flow;
+               goto err_release_metadata;
 
-       port->tc_offload_cnt++;
+       err = nfp_flower_xmit_flow(app, flow_pay,
+                                  NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
+       if (err)
+               goto err_remove_rhash;
+
+       if (port)
+               port->tc_offload_cnt++;
 
        /* Deallocate flow payload when flower rule has been destroyed. */
        kfree(key_layer);
 
        return 0;
 
+err_remove_rhash:
+       WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table,
+                                           &flow_pay->fl_node,
+                                           nfp_flower_table_params));
+err_release_metadata:
+       nfp_modify_flow_metadata(app, flow_pay);
 err_destroy_flow:
        kfree(flow_pay->action_data);
        kfree(flow_pay->mask_data);
@@ -509,7 +500,6 @@ err_free_key_ls:
  * @app:       Pointer to the APP handle
  * @netdev:    netdev structure.
  * @flow:      TC flower classifier offload structure
- * @egress:    Netdev is the egress dev.
  *
  * Removes a flow from the repeated hash structure and clears the
  * action payload.
@@ -518,19 +508,19 @@ err_free_key_ls:
  */
 static int
 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
-                      struct tc_cls_flower_offload *flow, bool egress)
+                      struct tc_cls_flower_offload *flow)
 {
-       struct nfp_port *port = nfp_port_from_netdev(netdev);
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *nfp_flow;
-       struct net_device *ingr_dev;
+       struct nfp_port *port = NULL;
        int err;
 
-       ingr_dev = egress ? NULL : netdev;
-       nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-                                             NFP_FL_STATS_CTX_DONT_CARE);
+       if (nfp_netdev_is_nfp_repr(netdev))
+               port = nfp_port_from_netdev(netdev);
+
+       nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
        if (!nfp_flow)
-               return egress ? 0 : -ENOENT;
+               return -ENOENT;
 
        err = nfp_modify_flow_metadata(app, nfp_flow);
        if (err)
@@ -539,13 +529,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
        if (nfp_flow->nfp_tun_ipv4_addr)
                nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
 
-       err = nfp_flower_xmit_flow(netdev, nfp_flow,
+       err = nfp_flower_xmit_flow(app, nfp_flow,
                                   NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
        if (err)
                goto err_free_flow;
 
 err_free_flow:
-       port->tc_offload_cnt--;
+       if (port)
+               port->tc_offload_cnt--;
        kfree(nfp_flow->action_data);
        kfree(nfp_flow->mask_data);
        kfree(nfp_flow->unmasked_data);
@@ -561,7 +552,6 @@ err_free_flow:
  * @app:       Pointer to the APP handle
  * @netdev:    Netdev structure.
  * @flow:      TC flower classifier offload structure
- * @egress:    Netdev is the egress dev.
  *
  * Populates a flow statistics structure which which corresponds to a
  * specific flow.
@@ -570,22 +560,16 @@ err_free_flow:
  */
 static int
 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
-                    struct tc_cls_flower_offload *flow, bool egress)
+                    struct tc_cls_flower_offload *flow)
 {
        struct nfp_flower_priv *priv = app->priv;
        struct nfp_fl_payload *nfp_flow;
-       struct net_device *ingr_dev;
        u32 ctx_id;
 
-       ingr_dev = egress ? NULL : netdev;
-       nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
-                                             NFP_FL_STATS_CTX_DONT_CARE);
+       nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev);
        if (!nfp_flow)
                return -EINVAL;
 
-       if (nfp_flow->ingress_offload && egress)
-               return 0;
-
        ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id);
 
        spin_lock_bh(&priv->stats_lock);
@@ -602,35 +586,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
 
 static int
 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
-                       struct tc_cls_flower_offload *flower, bool egress)
+                       struct tc_cls_flower_offload *flower)
 {
        if (!eth_proto_is_802_3(flower->common.protocol))
                return -EOPNOTSUPP;
 
        switch (flower->command) {
        case TC_CLSFLOWER_REPLACE:
-               return nfp_flower_add_offload(app, netdev, flower, egress);
+               return nfp_flower_add_offload(app, netdev, flower);
        case TC_CLSFLOWER_DESTROY:
-               return nfp_flower_del_offload(app, netdev, flower, egress);
+               return nfp_flower_del_offload(app, netdev, flower);
        case TC_CLSFLOWER_STATS:
-               return nfp_flower_get_stats(app, netdev, flower, egress);
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
-                                 void *cb_priv)
-{
-       struct nfp_repr *repr = cb_priv;
-
-       if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
-               return -EOPNOTSUPP;
-
-       switch (type) {
-       case TC_SETUP_CLSFLOWER:
-               return nfp_flower_repr_offload(repr->app, repr->netdev,
-                                              type_data, true);
+               return nfp_flower_get_stats(app, netdev, flower);
        default:
                return -EOPNOTSUPP;
        }
@@ -647,7 +614,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
        switch (type) {
        case TC_SETUP_CLSFLOWER:
                return nfp_flower_repr_offload(repr->app, repr->netdev,
-                                              type_data, false);
+                                              type_data);
        default:
                return -EOPNOTSUPP;
        }
@@ -686,3 +653,129 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
                return -EOPNOTSUPP;
        }
 }
+
+struct nfp_flower_indr_block_cb_priv {
+       struct net_device *netdev;
+       struct nfp_app *app;
+       struct list_head list;
+};
+
+static struct nfp_flower_indr_block_cb_priv *
+nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app,
+                                    struct net_device *netdev)
+{
+       struct nfp_flower_indr_block_cb_priv *cb_priv;
+       struct nfp_flower_priv *priv = app->priv;
+
+       /* All callback list access should be protected by RTNL. */
+       ASSERT_RTNL();
+
+       list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list)
+               if (cb_priv->netdev == netdev)
+                       return cb_priv;
+
+       return NULL;
+}
+
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+                                         void *type_data, void *cb_priv)
+{
+       struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
+       struct tc_cls_flower_offload *flower = type_data;
+
+       if (flower->common.chain_index)
+               return -EOPNOTSUPP;
+
+       switch (type) {
+       case TC_SETUP_CLSFLOWER:
+               return nfp_flower_repr_offload(priv->app, priv->netdev,
+                                              type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+static int
+nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app,
+                              struct tc_block_offload *f)
+{
+       struct nfp_flower_indr_block_cb_priv *cb_priv;
+       struct nfp_flower_priv *priv = app->priv;
+       int err;
+
+       if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+               return -EOPNOTSUPP;
+
+       switch (f->command) {
+       case TC_BLOCK_BIND:
+               cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL);
+               if (!cb_priv)
+                       return -ENOMEM;
+
+               cb_priv->netdev = netdev;
+               cb_priv->app = app;
+               list_add(&cb_priv->list, &priv->indr_block_cb_priv);
+
+               err = tcf_block_cb_register(f->block,
+                                           nfp_flower_setup_indr_block_cb,
+                                           netdev, cb_priv, f->extack);
+               if (err) {
+                       list_del(&cb_priv->list);
+                       kfree(cb_priv);
+               }
+
+               return err;
+       case TC_BLOCK_UNBIND:
+               tcf_block_cb_unregister(f->block,
+                                       nfp_flower_setup_indr_block_cb, netdev);
+               cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev);
+               if (cb_priv) {
+                       list_del(&cb_priv->list);
+                       kfree(cb_priv);
+               }
+
+               return 0;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int
+nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
+                           enum tc_setup_type type, void *type_data)
+{
+       switch (type) {
+       case TC_SETUP_BLOCK:
+               return nfp_flower_setup_indr_tc_block(netdev, cb_priv,
+                                                     type_data);
+       default:
+               return -EOPNOTSUPP;
+       }
+}
+
+int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
+                                      struct net_device *netdev,
+                                      unsigned long event)
+{
+       int err;
+
+       if (!nfp_fl_is_netdev_to_offload(netdev))
+               return NOTIFY_OK;
+
+       if (event == NETDEV_REGISTER) {
+               err = __tc_indr_block_cb_register(netdev, app,
+                                                 nfp_flower_indr_setup_tc_cb,
+                                                 netdev);
+               if (err)
+                       nfp_flower_cmsg_warn(app,
+                                            "Indirect block reg failed - %s\n",
+                                            netdev->name);
+       } else if (event == NETDEV_UNREGISTER) {
+               __tc_indr_block_cb_unregister(netdev,
+                                             nfp_flower_indr_setup_tc_cb,
+                                             netdev);
+       }
+
+       return NOTIFY_OK;
+}