Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <davem@davemloft.net>
Tue, 6 Mar 2018 05:53:44 +0000 (00:53 -0500)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Mar 2018 06:20:46 +0000 (01:20 -0500)
All of the conflicts were cases of overlapping changes.

In net/core/devlink.c, we have to make care that the
resouce size_params have become a struct member rather
than a pointer to such an object.

Signed-off-by: David S. Miller <davem@davemloft.net>
48 files changed:
1  2 
MAINTAINERS
drivers/bluetooth/btusb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/renesas/sh_eth.h
drivers/net/ppp/ppp_generic.c
drivers/net/tun.c
include/linux/phy.h
include/linux/skbuff.h
include/net/devlink.h
kernel/bpf/verifier.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/fragmentation.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bridge/br_netfilter_hooks.c
net/core/dev.c
net/core/devlink.c
net/core/skbuff.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/route.c
net/ipv4/tcp_input.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/netfilter/ip6t_rpfilter.c
net/ipv6/netfilter/nft_fib_ipv6.c
net/ipv6/sit.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/mac80211/rx.c
net/mac80211/tx.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_llc.c
net/tipc/group.c
net/tipc/socket.c
tools/testing/selftests/bpf/test_verifier.c

diff --cc MAINTAINERS
Simple merge
Simple merge
@@@ -3816,13 -4204,21 +3822,12 @@@ static struct devlink_resource_ops mlxs
        .occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
  };
  
- static struct devlink_resource_size_params mlxsw_sp_kvd_size_params;
- static struct devlink_resource_size_params mlxsw_sp_linear_size_params;
- static struct devlink_resource_size_params mlxsw_sp_hash_single_size_params;
- static struct devlink_resource_size_params mlxsw_sp_hash_double_size_params;
 -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
 -      .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
 -};
 -
 -static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
 -      .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
 -};
--
  static void
- mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core)
+ mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
+                                     struct devlink_resource_size_params *kvd_size_params,
+                                     struct devlink_resource_size_params *linear_size_params,
+                                     struct devlink_resource_size_params *hash_double_size_params,
+                                     struct devlink_resource_size_params *hash_single_size_params)
  {
        u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
                                                 KVD_SINGLE_MIN_SIZE);
@@@ -3876,8 -4274,8 +3883,8 @@@ static int mlxsw_sp_resources_register(
                                        true, kvd_size,
                                        MLXSW_SP_RESOURCE_KVD,
                                        DEVLINK_RESOURCE_ID_PARENT_TOP,
-                                       &mlxsw_sp_kvd_size_params,
+                                       &kvd_size_params,
 -                                      &mlxsw_sp_resource_kvd_ops);
 +                                      NULL);
        if (err)
                return err;
  
                                        false, double_size,
                                        MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
                                        MLXSW_SP_RESOURCE_KVD,
-                                       &mlxsw_sp_hash_double_size_params,
+                                       &hash_double_size_params,
 -                                      &mlxsw_sp_resource_kvd_hash_double_ops);
 +                                      NULL);
        if (err)
                return err;
  
                                        false, single_size,
                                        MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
                                        MLXSW_SP_RESOURCE_KVD,
-                                       &mlxsw_sp_hash_single_size_params,
+                                       &hash_single_size_params,
 -                                      &mlxsw_sp_resource_kvd_hash_single_ops);
 +                                      NULL);
        if (err)
                return err;
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
diff --cc net/core/dev.c
Simple merge
        resource->size_valid = size_valid;
  }
  
-       if (size > resource->size_params->size_max) {
 +static int
 +devlink_resource_validate_size(struct devlink_resource *resource, u64 size,
 +                             struct netlink_ext_ack *extack)
 +{
 +      u64 reminder;
 +      int err = 0;
 +
-       if (size < resource->size_params->size_min) {
++      if (size > resource->size_params.size_max) {
 +              NL_SET_ERR_MSG_MOD(extack, "Size larger than maximum");
 +              err = -EINVAL;
 +      }
 +
-       div64_u64_rem(size, resource->size_params->size_granularity, &reminder);
++      if (size < resource->size_params.size_min) {
 +              NL_SET_ERR_MSG_MOD(extack, "Size smaller than minimum");
 +              err = -EINVAL;
 +      }
 +
++      div64_u64_rem(size, resource->size_params.size_granularity, &reminder);
 +      if (reminder) {
 +              NL_SET_ERR_MSG_MOD(extack, "Wrong granularity");
 +              err = -EINVAL;
 +      }
 +
 +      return err;
 +}
 +
  static int devlink_nl_cmd_resource_set(struct sk_buff *skb,
                                       struct genl_info *info)
  {
Simple merge
Simple merge
@@@ -694,16 -710,9 +694,9 @@@ void ip_tunnel_xmit(struct sk_buff *skb
                }
        }
  
-       if (tunnel->fwmark) {
-               ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                                   tunnel->parms.o_key, RT_TOS(tos),
-                                   tunnel->parms.link, tunnel->fwmark);
-       }
-       else {
-               ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
-                                   tunnel->parms.o_key, RT_TOS(tos),
-                                   tunnel->parms.link, skb->mark);
-       }
 -      init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
 -                       tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
 -                       tunnel->fwmark);
++      ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
++                          tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link,
++                          tunnel->fwmark);
  
        if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
                goto tx_error;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -48,12 -48,8 +48,8 @@@ static bool rpfilter_lookup_reverse6(st
        }
  
        fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
-       if ((flags & XT_RPFILTER_LOOSE) == 0) {
-               fl6.flowi6_oif = dev->ifindex;
-               lookup_flags |= RT6_LOOKUP_F_IFACE;
-       }
  
 -      rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
 +      rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
        if (rt->dst.error)
                goto out;
  
@@@ -180,9 -180,7 +180,8 @@@ void nft_fib6_eval(const struct nft_exp
        }
  
        *dest = 0;
-  again:
 -      rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
 +      rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, pkt->skb,
 +                                    lookup_flags);
        if (rt->dst.error)
                goto put_rt_err;
  
diff --cc net/ipv6/sit.c
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -176,7 -177,7 +176,8 @@@ static int smc_lgr_create(struct smc_so
  
        lnk = &lgr->lnk[SMC_SINGLE_LINK];
        /* initialize link */
 +      lnk->state = SMC_LNK_ACTIVATING;
+       lnk->link_id = SMC_SINGLE_LINK;
        lnk->smcibdev = smcibdev;
        lnk->ibport = ibport;
        lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu;
@@@ -206,106 -92,9 +206,106 @@@ int smc_llc_send_confirm_link(struct sm
        memcpy(confllc->sender_mac, mac, ETH_ALEN);
        memcpy(confllc->sender_gid, gid, SMC_GID_SIZE);
        hton24(confllc->sender_qp_num, link->roce_qp->qp_num);
-       /* confllc->link_num = SMC_SINGLE_LINK; already done by memset above */
+       confllc->link_num = link->link_id;
        memcpy(confllc->link_uid, lgr->id, SMC_LGR_ID_SIZE);
 -      confllc->max_links = SMC_LINKS_PER_LGR_MAX;
 +      confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; /* enforce peer resp. */
 +      /* send llc message */
 +      rc = smc_wr_tx_send(link, pend);
 +      return rc;
 +}
 +
 +/* send ADD LINK request or response */
 +int smc_llc_send_add_link(struct smc_link *link, u8 mac[],
 +                        union ib_gid *gid,
 +                        enum smc_llc_reqresp reqresp)
 +{
 +      struct smc_llc_msg_add_link *addllc;
 +      struct smc_wr_tx_pend_priv *pend;
 +      struct smc_wr_buf *wr_buf;
 +      int rc;
 +
 +      rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +      if (rc)
 +              return rc;
 +      addllc = (struct smc_llc_msg_add_link *)wr_buf;
 +      memset(addllc, 0, sizeof(*addllc));
 +      addllc->hd.common.type = SMC_LLC_ADD_LINK;
 +      addllc->hd.length = sizeof(struct smc_llc_msg_add_link);
 +      if (reqresp == SMC_LLC_RESP) {
 +              addllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +              /* always reject more links for now */
 +              addllc->hd.flags |= SMC_LLC_FLAG_ADD_LNK_REJ;
 +              addllc->hd.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH;
 +      }
 +      memcpy(addllc->sender_mac, mac, ETH_ALEN);
 +      memcpy(addllc->sender_gid, gid, SMC_GID_SIZE);
 +      /* send llc message */
 +      rc = smc_wr_tx_send(link, pend);
 +      return rc;
 +}
 +
 +/* send DELETE LINK request or response */
 +int smc_llc_send_delete_link(struct smc_link *link,
 +                           enum smc_llc_reqresp reqresp)
 +{
 +      struct smc_llc_msg_del_link *delllc;
 +      struct smc_wr_tx_pend_priv *pend;
 +      struct smc_wr_buf *wr_buf;
 +      int rc;
 +
 +      rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +      if (rc)
 +              return rc;
 +      delllc = (struct smc_llc_msg_del_link *)wr_buf;
 +      memset(delllc, 0, sizeof(*delllc));
 +      delllc->hd.common.type = SMC_LLC_DELETE_LINK;
 +      delllc->hd.length = sizeof(struct smc_llc_msg_add_link);
 +      if (reqresp == SMC_LLC_RESP)
 +              delllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +      /* DEL_LINK_ALL because only 1 link supported */
 +      delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL;
 +      delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY;
 +      delllc->link_num = link->link_id;
 +      /* send llc message */
 +      rc = smc_wr_tx_send(link, pend);
 +      return rc;
 +}
 +
 +/* send LLC test link request or response */
 +int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16],
 +                         enum smc_llc_reqresp reqresp)
 +{
 +      struct smc_llc_msg_test_link *testllc;
 +      struct smc_wr_tx_pend_priv *pend;
 +      struct smc_wr_buf *wr_buf;
 +      int rc;
 +
 +      rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +      if (rc)
 +              return rc;
 +      testllc = (struct smc_llc_msg_test_link *)wr_buf;
 +      memset(testllc, 0, sizeof(*testllc));
 +      testllc->hd.common.type = SMC_LLC_TEST_LINK;
 +      testllc->hd.length = sizeof(struct smc_llc_msg_test_link);
 +      if (reqresp == SMC_LLC_RESP)
 +              testllc->hd.flags |= SMC_LLC_FLAG_RESP;
 +      memcpy(testllc->user_data, user_data, sizeof(testllc->user_data));
 +      /* send llc message */
 +      rc = smc_wr_tx_send(link, pend);
 +      return rc;
 +}
 +
 +/* send a prepared message */
 +static int smc_llc_send_message(struct smc_link *link, void *llcbuf, int llclen)
 +{
 +      struct smc_wr_tx_pend_priv *pend;
 +      struct smc_wr_buf *wr_buf;
 +      int rc;
 +
 +      rc = smc_llc_add_pending_send(link, &wr_buf, &pend);
 +      if (rc)
 +              return rc;
 +      memcpy(wr_buf, llcbuf, llclen);
        /* send llc message */
        rc = smc_wr_tx_send(link, pend);
        return rc;
Simple merge
Simple merge
@@@ -11224,94 -11164,63 +11224,151 @@@ static struct bpf_test tests[] = 
                .prog_type = BPF_PROG_TYPE_TRACEPOINT,
        },
        {
 +              "jit: lsh, rsh, arsh by 1",
 +              .insns = {
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_MOV64_IMM(BPF_REG_1, 0xff),
 +                      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
 +                      BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
 +                      BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
 +                      BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
 +                      BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
 +                      BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_MOV64_IMM(BPF_REG_0, 2),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .result = ACCEPT,
 +              .retval = 2,
 +      },
 +      {
 +              "jit: mov32 for ldimm64, 1",
 +              .insns = {
 +                      BPF_MOV64_IMM(BPF_REG_0, 2),
 +                      BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
 +                      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
 +                      BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .result = ACCEPT,
 +              .retval = 2,
 +      },
 +      {
 +              "jit: mov32 for ldimm64, 2",
 +              .insns = {
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
 +                      BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
 +                      BPF_MOV64_IMM(BPF_REG_0, 2),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .result = ACCEPT,
 +              .retval = 2,
 +      },
 +      {
 +              "jit: various mul tests",
 +              .insns = {
 +                      BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
 +                      BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
 +                      BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
 +                      BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
 +                      BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
 +                      BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
 +                      BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
 +                      BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
 +                      BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
 +                      BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
 +                      BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
 +                      BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
 +                      BPF_MOV64_IMM(BPF_REG_0, 1),
 +                      BPF_EXIT_INSN(),
 +                      BPF_MOV64_IMM(BPF_REG_0, 2),
 +                      BPF_EXIT_INSN(),
 +              },
 +              .result = ACCEPT,
 +              .retval = 2,
 +      },
++      {
+               "xadd/w check unaligned stack",
+               .insns = {
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+                       BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -7),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "misaligned stack access off",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "xadd/w check unaligned map",
+               .insns = {
+                       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+                       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+                       BPF_LD_MAP_FD(BPF_REG_1, 0),
+                       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
+                                    BPF_FUNC_map_lookup_elem),
+                       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+                       BPF_EXIT_INSN(),
+                       BPF_MOV64_IMM(BPF_REG_1, 1),
+                       BPF_STX_XADD(BPF_W, BPF_REG_0, BPF_REG_1, 3),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 3),
+                       BPF_EXIT_INSN(),
+               },
+               .fixup_map1 = { 3 },
+               .result = REJECT,
+               .errstr = "misaligned value access off",
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       },
+       {
+               "xadd/w check unaligned pkt",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+                                   offsetof(struct xdp_md, data)),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+                                   offsetof(struct xdp_md, data_end)),
+                       BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+                       BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 2),
+                       BPF_MOV64_IMM(BPF_REG_0, 99),
+                       BPF_JMP_IMM(BPF_JA, 0, 0, 6),
+                       BPF_MOV64_IMM(BPF_REG_0, 1),
+                       BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+                       BPF_ST_MEM(BPF_W, BPF_REG_2, 3, 0),
+                       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 1),
+                       BPF_STX_XADD(BPF_W, BPF_REG_2, BPF_REG_0, 2),
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = REJECT,
+               .errstr = "BPF_XADD stores into R2 packet",
+               .prog_type = BPF_PROG_TYPE_XDP,
+       },
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)