Merge tag 'selinux-pr-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2020 19:01:04 +0000 (11:01 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 16 Dec 2020 19:01:04 +0000 (11:01 -0800)
Pull selinux updates from Paul Moore:
 "While we have a small number of SELinux patches for v5.11, there are a
  few changes worth highlighting:

   - Change the LSM network hooks to pass flowi_common structs instead
     of the parent flowi struct as the LSMs do not currently need the
     full flowi struct and they do not have enough information to use it
     safely (missing information on the address family).

     This patch was discussed both with Herbert Xu (representing team
     netdev) and James Morris (representing team
     LSMs-other-than-SELinux).

   - Fix how we handle errors in inode_doinit_with_dentry() so that we
     attempt to properly label the inode on following lookups instead of
     continuing to treat it as unlabeled.

   - Tweak the kernel logic around allowx, auditallowx, and dontauditx
     SELinux policy statements such that the auditx/dontauditx are
     effective even without the allowx statement.

  Everything passes our test suite"

* tag 'selinux-pr-20201214' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux:
  lsm,selinux: pass flowi_common instead of flowi to the LSM hooks
  selinux: Fix fall-through warnings for Clang
  selinux: drop super_block backpointer from superblock_security_struct
  selinux: fix inode_doinit_with_dentry() LABEL_INVALID error handling
  selinux: allow dontauditx and auditallowx rules to take effect without allowx
  selinux: fix error initialization in inode_doinit_with_dentry()

18 files changed:
1  2 
drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
include/linux/lsm_hook_defs.h
include/linux/security.h
net/dccp/ipv4.c
net/dccp/ipv6.c
net/ipv4/inet_connection_sock.c
net/ipv4/syncookies.c
net/ipv4/udp.c
net/ipv6/af_inet6.c
net/ipv6/icmp.c
net/ipv6/netfilter/nf_reject_ipv6.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/netfilter/nf_synproxy_core.c
net/xfrm/xfrm_state.c
security/security.c
security/selinux/hooks.c

@@@ -212,7 -212,7 +212,7 @@@ static struct sk_buff *alloc_ctrl_skb(s
  {
        if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) {
                __skb_trim(skb, 0);
 -              refcount_add(2, &skb->users);
 +              refcount_inc(&skb->users);
        } else {
                skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
        }
@@@ -772,13 -772,14 +772,13 @@@ static int chtls_pass_open_rpl(struct c
        if (rpl->status != CPL_ERR_NONE) {
                pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
                        rpl->status, stid);
 -              return CPL_RET_BUF_DONE;
 +      } else {
 +              cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 +              sock_put(listen_ctx->lsk);
 +              kfree(listen_ctx);
 +              module_put(THIS_MODULE);
        }
 -      cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 -      sock_put(listen_ctx->lsk);
 -      kfree(listen_ctx);
 -      module_put(THIS_MODULE);
 -
 -      return 0;
 +      return CPL_RET_BUF_DONE;
  }
  
  static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
        if (rpl->status != CPL_ERR_NONE) {
                pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
                        rpl->status, stid);
 -              return CPL_RET_BUF_DONE;
 +      } else {
 +              cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 +              sock_put(listen_ctx->lsk);
 +              kfree(listen_ctx);
 +              module_put(THIS_MODULE);
        }
 -
 -      cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
 -      sock_put(listen_ctx->lsk);
 -      kfree(listen_ctx);
 -      module_put(THIS_MODULE);
 -
 -      return 0;
 +      return CPL_RET_BUF_DONE;
  }
  
  static void chtls_purge_wr_queue(struct sock *sk)
@@@ -1145,7 -1148,7 +1145,7 @@@ static struct sock *chtls_recv_sock(str
                fl6.daddr = ip6h->saddr;
                fl6.fl6_dport = inet_rsk(oreq)->ir_rmt_port;
                fl6.fl6_sport = htons(inet_rsk(oreq)->ir_num);
-               security_req_classify_flow(oreq, flowi6_to_flowi(&fl6));
+               security_req_classify_flow(oreq, flowi6_to_flowi_common(&fl6));
                dst = ip6_dst_lookup_flow(sock_net(lsk), lsk, &fl6, NULL);
                if (IS_ERR(dst))
                        goto free_sk;
        sk_setup_caps(newsk, dst);
        ctx = tls_get_ctx(lsk);
        newsk->sk_destruct = ctx->sk_destruct;
 +      newsk->sk_prot_creator = lsk->sk_prot_creator;
        csk->sk = newsk;
        csk->passive_reap_next = oreq;
        csk->tx_chan = cxgb4_port_chan(ndev);
        csk->sndbuf = csk->snd_win;
        csk->ulp_mode = ULP_MODE_TLS;
        step = cdev->lldi->nrxq / cdev->lldi->nchan;
 -      csk->rss_qid = cdev->lldi->rxq_ids[port_id * step];
        rxq_idx = port_id * step;
 +      rxq_idx += cdev->round_robin_cnt++ % step;
 +      csk->rss_qid = cdev->lldi->rxq_ids[rxq_idx];
        csk->txq_idx = (rxq_idx < cdev->lldi->ntxq) ? rxq_idx :
                        port_id * step;
        csk->sndbuf = newsk->sk_sndbuf;
@@@ -1513,6 -1514,7 +1513,6 @@@ static void add_to_reap_list(struct soc
        struct chtls_sock *csk = sk->sk_user_data;
  
        local_bh_disable();
 -      bh_lock_sock(sk);
        release_tcp_port(sk); /* release the port immediately */
  
        spin_lock(&reap_list_lock);
        if (!csk->passive_reap_next)
                schedule_work(&reap_task);
        spin_unlock(&reap_list_lock);
 -      bh_unlock_sock(sk);
        local_bh_enable();
  }
  
@@@ -301,7 -301,7 +301,7 @@@ LSM_HOOK(void, LSM_RET_VOID, sk_clone_s
         struct sock *newsk)
  LSM_HOOK(void, LSM_RET_VOID, sk_getsecid, struct sock *sk, u32 *secid)
  LSM_HOOK(void, LSM_RET_VOID, sock_graft, struct sock *sk, struct socket *parent)
 -LSM_HOOK(int, 0, inet_conn_request, struct sock *sk, struct sk_buff *skb,
 +LSM_HOOK(int, 0, inet_conn_request, const struct sock *sk, struct sk_buff *skb,
         struct request_sock *req)
  LSM_HOOK(void, LSM_RET_VOID, inet_csk_clone, struct sock *newsk,
         const struct request_sock *req)
@@@ -311,7 -311,7 +311,7 @@@ LSM_HOOK(int, 0, secmark_relabel_packet
  LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void)
  LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void)
  LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req,
-        struct flowi *fl)
+        struct flowi_common *flic)
  LSM_HOOK(int, 0, tun_dev_alloc_security, void **security)
  LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security)
  LSM_HOOK(int, 0, tun_dev_create, void)
@@@ -351,7 -351,7 +351,7 @@@ LSM_HOOK(int, 0, xfrm_state_delete_secu
  LSM_HOOK(int, 0, xfrm_policy_lookup, struct xfrm_sec_ctx *ctx, u32 fl_secid,
         u8 dir)
  LSM_HOOK(int, 1, xfrm_state_pol_flow_match, struct xfrm_state *x,
-        struct xfrm_policy *xp, const struct flowi *fl)
+        struct xfrm_policy *xp, const struct flowi_common *flic)
  LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid,
         int ckall)
  #endif /* CONFIG_SECURITY_NETWORK_XFRM */
diff --combined include/linux/security.h
@@@ -127,7 -127,6 +127,7 @@@ enum lockdown_reason 
        LOCKDOWN_PERF,
        LOCKDOWN_TRACEFS,
        LOCKDOWN_XMON_RW,
 +      LOCKDOWN_XFRM_SECRET,
        LOCKDOWN_CONFIDENTIALITY_MAX,
  };
  
@@@ -168,7 -167,7 +168,7 @@@ struct sk_buff
  struct sock;
  struct sockaddr;
  struct socket;
- struct flowi;
+ struct flowi_common;
  struct dst_entry;
  struct xfrm_selector;
  struct xfrm_policy;
@@@ -870,7 -869,7 +870,7 @@@ static inline int security_inode_killpr
  
  static inline int security_inode_getsecurity(struct inode *inode, const char *name, void **buffer, bool alloc)
  {
 -      return -EOPNOTSUPP;
 +      return cap_inode_getsecurity(inode, name, buffer, alloc);
  }
  
  static inline int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
@@@ -1356,10 -1355,11 +1356,11 @@@ int security_socket_getpeersec_dgram(st
  int security_sk_alloc(struct sock *sk, int family, gfp_t priority);
  void security_sk_free(struct sock *sk);
  void security_sk_clone(const struct sock *sk, struct sock *newsk);
- void security_sk_classify_flow(struct sock *sk, struct flowi *fl);
- void security_req_classify_flow(const struct request_sock *req, struct flowi *fl);
+ void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic);
+ void security_req_classify_flow(const struct request_sock *req,
+                               struct flowi_common *flic);
  void security_sock_graft(struct sock*sk, struct socket *parent);
 -int security_inet_conn_request(struct sock *sk,
 +int security_inet_conn_request(const struct sock *sk,
                        struct sk_buff *skb, struct request_sock *req);
  void security_inet_csk_clone(struct sock *newsk,
                        const struct request_sock *req);
@@@ -1508,11 -1508,13 +1509,13 @@@ static inline void security_sk_clone(co
  {
  }
  
- static inline void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+ static inline void security_sk_classify_flow(struct sock *sk,
+                                            struct flowi_common *flic)
  {
  }
  
- static inline void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+ static inline void security_req_classify_flow(const struct request_sock *req,
+                                             struct flowi_common *flic)
  {
  }
  
@@@ -1520,7 -1522,7 +1523,7 @@@ static inline void security_sock_graft(
  {
  }
  
 -static inline int security_inet_conn_request(struct sock *sk,
 +static inline int security_inet_conn_request(const struct sock *sk,
                        struct sk_buff *skb, struct request_sock *req)
  {
        return 0;
@@@ -1639,9 -1641,9 +1642,9 @@@ void security_xfrm_state_free(struct xf
  int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
  int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
                                       struct xfrm_policy *xp,
-                                      const struct flowi *fl);
+                                      const struct flowi_common *flic);
  int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid);
- void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
+ void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic);
  
  #else /* CONFIG_SECURITY_NETWORK_XFRM */
  
@@@ -1693,7 -1695,8 +1696,8 @@@ static inline int security_xfrm_policy_
  }
  
  static inline int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
-                       struct xfrm_policy *xp, const struct flowi *fl)
+                                                    struct xfrm_policy *xp,
+                                                    const struct flowi_common *flic)
  {
        return 1;
  }
@@@ -1703,7 -1706,8 +1707,8 @@@ static inline int security_xfrm_decode_
        return 0;
  }
  
- static inline void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+ static inline void security_skb_classify_flow(struct sk_buff *skb,
+                                             struct flowi_common *flic)
  {
  }
  
diff --combined net/dccp/ipv4.c
@@@ -427,7 -427,7 +427,7 @@@ struct sock *dccp_v4_request_recv_sock(
  
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
 -      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 +      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
        if (*own_req)
                ireq->ireq_opt = NULL;
        else
@@@ -464,7 -464,7 +464,7 @@@ static struct dst_entry* dccp_v4_route_
                .fl4_dport = dccp_hdr(skb)->dccph_sport,
        };
  
-       security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
+       security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
        rt = ip_route_output_flow(net, &fl4, sk);
        if (IS_ERR(rt)) {
                IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
diff --combined net/dccp/ipv6.c
@@@ -203,7 -203,7 +203,7 @@@ static int dccp_v6_send_response(const 
        fl6.flowi6_oif = ireq->ir_iif;
        fl6.fl6_dport = ireq->ir_rmt_port;
        fl6.fl6_sport = htons(ireq->ir_num);
-       security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+       security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
  
  
        rcu_read_lock();
@@@ -279,7 -279,7 +279,7 @@@ static void dccp_v6_ctl_send_reset(cons
        fl6.flowi6_oif = inet6_iif(rxskb);
        fl6.fl6_dport = dccp_hdr(skb)->dccph_dport;
        fl6.fl6_sport = dccp_hdr(skb)->dccph_sport;
-       security_skb_classify_flow(rxskb, flowi6_to_flowi(&fl6));
+       security_skb_classify_flow(rxskb, flowi6_to_flowi_common(&fl6));
  
        /* sk = NULL, but it is safe for now. RST socket required. */
        dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
@@@ -533,7 -533,7 +533,7 @@@ static struct sock *dccp_v6_request_rec
                dccp_done(newsk);
                goto out;
        }
 -      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 +      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash), NULL);
        /* Clone pktoptions received with SYN, if we own the req */
        if (*own_req && ireq->pktopts) {
                newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
@@@ -907,7 -907,7 +907,7 @@@ static int dccp_v6_connect(struct sock 
        fl6.flowi6_oif = sk->sk_bound_dev_if;
        fl6.fl6_dport = usin->sin6_port;
        fl6.fl6_sport = inet->inet_sport;
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+       security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
  
        opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
        final_p = fl6_update_dst(&fl6, opt, &final);
@@@ -602,7 -602,7 +602,7 @@@ struct dst_entry *inet_csk_route_req(co
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
                           htons(ireq->ir_num), sk->sk_uid);
-       security_req_classify_flow(req, flowi4_to_flowi(fl4));
+       security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
                goto no_route;
@@@ -640,7 -640,7 +640,7 @@@ struct dst_entry *inet_csk_route_child_
                           (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, ireq->ir_rmt_port,
                           htons(ireq->ir_num), sk->sk_uid);
-       security_req_classify_flow(req, flowi4_to_flowi(fl4));
+       security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
        rt = ip_route_output_flow(net, fl4, sk);
        if (IS_ERR(rt))
                goto no_route;
@@@ -787,7 -787,7 +787,7 @@@ static void reqsk_queue_hash_req(struc
        timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
        mod_timer(&req->rsk_timer, jiffies + timeout);
  
 -      inet_ehash_insert(req_to_sk(req), NULL);
 +      inet_ehash_insert(req_to_sk(req), NULL, NULL);
        /* before letting lookups find us, make sure all req fields
         * are committed to memory and refcnt initialized.
         */
diff --combined net/ipv4/syncookies.c
@@@ -331,7 -331,7 +331,7 @@@ struct sock *cookie_v4_check(struct soc
        __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
 -      int mss;
 +      int full_space, mss;
        struct rtable *rt;
        __u8 rcv_wscale;
        struct flowi4 fl4;
                           inet_sk_flowi_flags(sk),
                           opt->srr ? opt->faddr : ireq->ir_rmt_addr,
                           ireq->ir_loc_addr, th->source, th->dest, sk->sk_uid);
-       security_req_classify_flow(req, flowi4_to_flowi(&fl4));
+       security_req_classify_flow(req, flowi4_to_flowi_common(&fl4));
        rt = ip_route_output_key(sock_net(sk), &fl4);
        if (IS_ERR(rt)) {
                reqsk_free(req);
  
        /* Try to redo what tcp_v4_send_synack did. */
        req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
 +      /* limit the window selection if the user enforce a smaller rx buffer */
 +      full_space = tcp_full_space(sk);
 +      if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
 +          (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
 +              req->rsk_window_clamp = full_space;
  
 -      tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
 +      tcp_select_initial_window(sk, full_space, req->mss,
                                  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
                                  ireq->wscale_ok, &rcv_wscale,
                                  dst_metric(&rt->dst, RTAX_INITRWND));
diff --combined net/ipv4/udp.c
@@@ -541,7 -541,7 +541,7 @@@ static inline struct sock *__udp4_lib_l
                                 inet_sdif(skb), udptable, skb);
  }
  
 -struct sock *udp4_lib_lookup_skb(struct sk_buff *skb,
 +struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
                                 __be16 sport, __be16 dport)
  {
        const struct iphdr *iph = ip_hdr(skb);
                                 iph->daddr, dport, inet_iif(skb),
                                 inet_sdif(skb), &udp_table, NULL);
  }
 -EXPORT_SYMBOL_GPL(udp4_lib_lookup_skb);
  
  /* Must be called under rcu_read_lock().
   * Does increment socket refcount.
@@@ -701,7 -702,7 +701,7 @@@ int __udp4_lib_err(struct sk_buff *skb
        sk = __udp4_lib_lookup(net, iph->daddr, uh->dest,
                               iph->saddr, uh->source, skb->dev->ifindex,
                               inet_sdif(skb), udptable, NULL);
 -      if (!sk) {
 +      if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
                sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udp_encap_needed_key)) {
@@@ -873,7 -874,7 +873,7 @@@ static int udp_send_skb(struct sk_buff 
        struct sock *sk = skb->sk;
        struct inet_sock *inet = inet_sk(sk);
        struct udphdr *uh;
 -      int err = 0;
 +      int err;
        int is_udplite = IS_UDPLITE(sk);
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
@@@ -1196,7 -1197,7 +1196,7 @@@ int udp_sendmsg(struct sock *sk, struc
                                   faddr, saddr, dport, inet->inet_sport,
                                   sk->sk_uid);
  
-               security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
+               security_sk_classify_flow(sk, flowi4_to_flowi_common(fl4));
                rt = ip_route_output_flow(net, fl4, sk);
                if (IS_ERR(rt)) {
                        err = PTR_ERR(rt);
@@@ -2037,9 -2038,6 +2037,9 @@@ static int __udp_queue_rcv_skb(struct s
                if (rc == -ENOMEM)
                        UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
                                        is_udplite);
 +              else
 +                      UDP_INC_STATS(sock_net(sk), UDP_MIB_MEMERRORS,
 +                                    is_udplite);
                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
                kfree_skb(skb);
                trace_udp_fail_queue_rcv_skb(rc, sk);
@@@ -2175,7 -2173,7 +2175,7 @@@ static int udp_queue_rcv_skb(struct soc
                __skb_pull(skb, skb_transport_offset(skb));
                ret = udp_queue_rcv_one_skb(sk, skb);
                if (ret > 0)
 -                      ip_protocol_deliver_rcu(dev_net(skb->dev), skb, -ret);
 +                      ip_protocol_deliver_rcu(dev_net(skb->dev), skb, ret);
        }
        return 0;
  }
diff --combined net/ipv6/af_inet6.c
@@@ -451,7 -451,7 +451,7 @@@ int inet6_bind(struct socket *sock, str
        /* BPF prog is run before any checks are done so that if the prog
         * changes context in a wrong way it will be caught.
         */
 -      err = BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr);
 +      err = BPF_CGROUP_RUN_PROG_INET6_BIND_LOCK(sk, uaddr);
        if (err)
                return err;
  
@@@ -819,7 -819,7 +819,7 @@@ int inet6_sk_rebuild_header(struct soc
                fl6.fl6_dport = inet->inet_dport;
                fl6.fl6_sport = inet->inet_sport;
                fl6.flowi6_uid = sk->sk_uid;
-               security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+               security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
  
                rcu_read_lock();
                final_p = fl6_update_dst(&fl6, rcu_dereference(np->opt),
diff --combined net/ipv6/icmp.c
@@@ -158,13 -158,7 +158,13 @@@ static bool is_ineligible(const struct 
                tp = skb_header_pointer(skb,
                        ptr+offsetof(struct icmp6hdr, icmp6_type),
                        sizeof(_type), &_type);
 -              if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
 +
 +              /* Based on RFC 8200, Section 4.5 Fragment Header, return
 +               * false if this is a fragment packet with no icmp header info.
 +               */
 +              if (!tp && frag_off != 0)
 +                      return false;
 +              else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK))
                        return true;
        }
        return false;
@@@ -573,7 -567,7 +573,7 @@@ void icmp6_send(struct sk_buff *skb, u
        fl6.fl6_icmp_code = code;
        fl6.flowi6_uid = sock_net_uid(net, NULL);
        fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, NULL);
-       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+       security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
  
        np = inet6_sk(sk);
  
@@@ -755,7 -749,7 +755,7 @@@ static void icmpv6_echo_reply(struct sk
        fl6.fl6_icmp_type = ICMPV6_ECHO_REPLY;
        fl6.flowi6_mark = mark;
        fl6.flowi6_uid = sock_net_uid(net, NULL);
-       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+       security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
  
        local_bh_disable();
        sk = icmpv6_xmit_lock(net);
@@@ -1008,7 -1002,7 +1008,7 @@@ void icmpv6_flow_init(struct sock *sk, 
        fl6->fl6_icmp_type      = type;
        fl6->fl6_icmp_code      = 0;
        fl6->flowi6_oif         = oif;
-       security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
+       security_sk_classify_flow(sk, flowi6_to_flowi_common(fl6));
  }
  
  static void __net_exit icmpv6_sk_exit(struct net *net)
  #include <linux/netfilter_ipv6.h>
  #include <linux/netfilter_bridge.h>
  
 +static bool nf_reject_v6_csum_ok(struct sk_buff *skb, int hook)
 +{
 +      const struct ipv6hdr *ip6h = ipv6_hdr(skb);
 +      int thoff;
 +      __be16 fo;
 +      u8 proto = ip6h->nexthdr;
 +
 +      if (skb_csum_unnecessary(skb))
 +              return true;
 +
 +      if (ip6h->payload_len &&
 +          pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
 +              return false;
 +
 +      ip6h = ipv6_hdr(skb);
 +      thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
 +      if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
 +              return false;
 +
 +      if (!nf_reject_verify_csum(proto))
 +              return true;
 +
 +      return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
 +}
 +
 +static int nf_reject_ip6hdr_validate(struct sk_buff *skb)
 +{
 +      struct ipv6hdr *hdr;
 +      u32 pkt_len;
 +
 +      if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
 +              return 0;
 +
 +      hdr = ipv6_hdr(skb);
 +      if (hdr->version != 6)
 +              return 0;
 +
 +      pkt_len = ntohs(hdr->payload_len);
 +      if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
 +              return 0;
 +
 +      return 1;
 +}
 +
 +struct sk_buff *nf_reject_skb_v6_tcp_reset(struct net *net,
 +                                         struct sk_buff *oldskb,
 +                                         const struct net_device *dev,
 +                                         int hook)
 +{
 +      struct sk_buff *nskb;
 +      const struct tcphdr *oth;
 +      struct tcphdr _oth;
 +      unsigned int otcplen;
 +      struct ipv6hdr *nip6h;
 +
 +      if (!nf_reject_ip6hdr_validate(oldskb))
 +              return NULL;
 +
 +      oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
 +      if (!oth)
 +              return NULL;
 +
 +      nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
 +                       LL_MAX_HEADER, GFP_ATOMIC);
 +      if (!nskb)
 +              return NULL;
 +
 +      nskb->dev = (struct net_device *)dev;
 +
 +      skb_reserve(nskb, LL_MAX_HEADER);
 +      nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
 +                                   net->ipv6.devconf_all->hop_limit);
 +      nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
 +      nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
 +
 +      return nskb;
 +}
 +EXPORT_SYMBOL_GPL(nf_reject_skb_v6_tcp_reset);
 +
 +struct sk_buff *nf_reject_skb_v6_unreach(struct net *net,
 +                                       struct sk_buff *oldskb,
 +                                       const struct net_device *dev,
 +                                       int hook, u8 code)
 +{
 +      struct sk_buff *nskb;
 +      struct ipv6hdr *nip6h;
 +      struct icmp6hdr *icmp6h;
 +      unsigned int len;
 +
 +      if (!nf_reject_ip6hdr_validate(oldskb))
 +              return NULL;
 +
 +      /* Include "As much of invoking packet as possible without the ICMPv6
 +       * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
 +       */
 +      len = min_t(unsigned int, 1220, oldskb->len);
 +
 +      if (!pskb_may_pull(oldskb, len))
 +              return NULL;
 +
 +      if (!nf_reject_v6_csum_ok(oldskb, hook))
 +              return NULL;
 +
 +      nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
 +                       LL_MAX_HEADER + len, GFP_ATOMIC);
 +      if (!nskb)
 +              return NULL;
 +
 +      nskb->dev = (struct net_device *)dev;
 +
 +      skb_reserve(nskb, LL_MAX_HEADER);
 +      nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
 +                                   net->ipv6.devconf_all->hop_limit);
 +
 +      skb_reset_transport_header(nskb);
 +      icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
 +      icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
 +      icmp6h->icmp6_code = code;
 +
 +      skb_put_data(nskb, skb_network_header(oldskb), len);
 +      nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
 +
 +      icmp6h->icmp6_cksum =
 +              csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
 +                              nskb->len - sizeof(struct ipv6hdr),
 +                              IPPROTO_ICMPV6,
 +                              csum_partial(icmp6h,
 +                                           nskb->len - sizeof(struct ipv6hdr),
 +                                           0));
 +
 +      return nskb;
 +}
 +EXPORT_SYMBOL_GPL(nf_reject_skb_v6_unreach);
 +
  const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
                                              struct tcphdr *otcph,
                                              unsigned int *otcplen, int hook)
@@@ -275,8 -141,7 +275,8 @@@ static int nf_reject6_fill_skb_dst(stru
        return 0;
  }
  
 -void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
 +void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
 +                  int hook)
  {
        struct net_device *br_indev __maybe_unused;
        struct sk_buff *nskb;
        fl6.fl6_sport = otcph->dest;
        fl6.fl6_dport = otcph->source;
  
 -      if (hook == NF_INET_PRE_ROUTING) {
 +      if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
                nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
                if (!dst)
                        return;
  
        fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
        fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
-       security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
+       security_skb_classify_flow(oldskb, flowi6_to_flowi_common(&fl6));
        dst = ip6_route_output(net, NULL, &fl6);
        if (dst->error) {
                dst_release(dst);
                dev_queue_xmit(nskb);
        } else
  #endif
 -              ip6_local_out(net, nskb->sk, nskb);
 +              ip6_local_out(net, sk, nskb);
  }
  EXPORT_SYMBOL_GPL(nf_send_reset6);
  
@@@ -403,8 -268,7 +403,8 @@@ void nf_send_unreach6(struct net *net, 
        if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
                skb_in->dev = net->loopback_dev;
  
 -      if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
 +      if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
 +          nf_reject6_fill_skb_dst(skb_in) < 0)
                return;
  
        icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
diff --combined net/ipv6/syncookies.c
@@@ -136,7 -136,7 +136,7 @@@ struct sock *cookie_v6_check(struct soc
        __u32 cookie = ntohl(th->ack_seq) - 1;
        struct sock *ret = sk;
        struct request_sock *req;
 -      int mss;
 +      int full_space, mss;
        struct dst_entry *dst;
        __u8 rcv_wscale;
        u32 tsoff = 0;
                fl6.fl6_dport = ireq->ir_rmt_port;
                fl6.fl6_sport = inet_sk(sk)->inet_sport;
                fl6.flowi6_uid = sk->sk_uid;
-               security_req_classify_flow(req, flowi6_to_flowi(&fl6));
+               security_req_classify_flow(req, flowi6_to_flowi_common(&fl6));
  
                dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
                if (IS_ERR(dst))
        }
  
        req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
 -      tcp_select_initial_window(sk, tcp_full_space(sk), req->mss,
 +      /* limit the window selection if the user enforce a smaller rx buffer */
 +      full_space = tcp_full_space(sk);
 +      if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
 +          (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
 +              req->rsk_window_clamp = full_space;
 +
 +      tcp_select_initial_window(sk, full_space, req->mss,
                                  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
                                  ireq->wscale_ok, &rcv_wscale,
                                  dst_metric(dst, RTAX_INITRWND));
diff --combined net/ipv6/tcp_ipv6.c
@@@ -278,7 -278,7 +278,7 @@@ static int tcp_v6_connect(struct sock *
        opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
        final_p = fl6_update_dst(&fl6, opt, &final);
  
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+       security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
  
        dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
        if (IS_ERR(dst)) {
@@@ -527,21 -527,15 +527,21 @@@ static int tcp_v6_send_synack(const str
                if (np->repflow && ireq->pktopts)
                        fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
  
 +              tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
 +                              (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
 +                              (np->tclass & INET_ECN_MASK) :
 +                              np->tclass;
 +
 +              if (!INET_ECN_is_capable(tclass) &&
 +                  tcp_bpf_ca_needs_ecn((struct sock *)req))
 +                      tclass |= INET_ECN_ECT_0;
 +
                rcu_read_lock();
                opt = ireq->ipv6_opt;
 -              tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
 -                              tcp_rsk(req)->syn_tos : np->tclass;
                if (!opt)
                        opt = rcu_dereference(np->opt);
                err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
 -                             tclass & ~INET_ECN_MASK,
 -                             sk->sk_priority);
 +                             tclass, sk->sk_priority);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@@ -829,15 -823,9 +829,15 @@@ static void tcp_v6_init_req(struct requ
  }
  
  static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
 +                                        struct sk_buff *skb,
                                          struct flowi *fl,
 -                                        const struct request_sock *req)
 +                                        struct request_sock *req)
  {
 +      tcp_v6_init_req(req, sk, skb);
 +
 +      if (security_inet_conn_request(sk, skb, req))
 +              return NULL;
 +
        return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
  }
  
@@@ -858,6 -846,7 +858,6 @@@ const struct tcp_request_sock_ops tcp_r
        .req_md5_lookup =       tcp_v6_md5_lookup,
        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
  #endif
 -      .init_req       =       tcp_v6_init_req,
  #ifdef CONFIG_SYN_COOKIES
        .cookie_init_seq =      cookie_v6_init_sequence,
  #endif
@@@ -965,7 -954,7 +965,7 @@@ static void tcp_v6_send_response(const 
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
        fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
-       security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
+       security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
  
        /* Pass a socket to ip6_dst_lookup either it is for RST
         * Underlying function will use this to retrieve the network
@@@ -1204,7 -1193,6 +1204,7 @@@ static struct sock *tcp_v6_syn_recv_soc
        const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
        struct ipv6_txoptions *opt;
        struct inet_sock *newinet;
 +      bool found_dup_sk = false;
        struct tcp_sock *newtp;
        struct sock *newsk;
  #ifdef CONFIG_TCP_MD5SIG
        if (np->repflow)
                newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
  
 -      /* Set ToS of the new socket based upon the value of incoming SYN. */
 +      /* Set ToS of the new socket based upon the value of incoming SYN.
 +       * ECT bits are set later in tcp_init_transfer().
 +       */
        if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
                newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
  
                tcp_done(newsk);
                goto out;
        }
 -      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
 +      *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
 +                                     &found_dup_sk);
        if (*own_req) {
                tcp_move_syn(newtp, req);
  
                                skb_set_owner_r(newnp->pktoptions, newsk);
                        }
                }
 +      } else {
 +              if (!req_unhash && found_dup_sk) {
 +                      /* This code path should only be executed in the
 +                       * syncookie case only
 +                       */
 +                      bh_unlock_sock(newsk);
 +                      sock_put(newsk);
 +                      newsk = NULL;
 +              }
        }
  
        return newsk;
diff --combined net/ipv6/udp.c
@@@ -276,7 -276,7 +276,7 @@@ static struct sock *__udp6_lib_lookup_s
                                 inet6_sdif(skb), udptable, skb);
  }
  
 -struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
 +struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
                                 __be16 sport, __be16 dport)
  {
        const struct ipv6hdr *iph = ipv6_hdr(skb);
                                 &iph->daddr, dport, inet6_iif(skb),
                                 inet6_sdif(skb), &udp_table, NULL);
  }
 -EXPORT_SYMBOL_GPL(udp6_lib_lookup_skb);
  
  /* Must be called under rcu_read_lock().
   * Does increment socket refcount.
@@@ -559,7 -560,7 +559,7 @@@ int __udp6_lib_err(struct sk_buff *skb
  
        sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
                               inet6_iif(skb), inet6_sdif(skb), udptable, NULL);
 -      if (!sk) {
 +      if (!sk || udp_sk(sk)->encap_type) {
                /* No socket for error: try tunnels before discarding */
                sk = ERR_PTR(-ENOENT);
                if (static_branch_unlikely(&udpv6_encap_needed_key)) {
@@@ -636,9 -637,6 +636,9 @@@ static int __udpv6_queue_rcv_skb(struc
                if (rc == -ENOMEM)
                        UDP6_INC_STATS(sock_net(sk),
                                         UDP_MIB_RCVBUFERRORS, is_udplite);
 +              else
 +                      UDP6_INC_STATS(sock_net(sk),
 +                                     UDP_MIB_MEMERRORS, is_udplite);
                UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
                kfree_skb(skb);
                return -1;
@@@ -1498,7 -1496,7 +1498,7 @@@ do_udp_sendmsg
        } else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
  
-       security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
+       security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
  
        if (ipc6.tclass < 0)
                ipc6.tclass = np->tclass;
@@@ -446,7 -446,7 +446,7 @@@ synproxy_send_tcp(struct net *net
  
        skb_dst_set_noref(nskb, skb_dst(skb));
        nskb->protocol = htons(ETH_P_IP);
 -      if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
 +      if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC))
                goto free_nskb;
  
        if (nfct) {
@@@ -849,7 -849,7 +849,7 @@@ synproxy_send_tcp_ipv6(struct net *net
        fl6.fl6_sport = nth->source;
        fl6.fl6_dport = nth->dest;
        security_skb_classify_flow((struct sk_buff *)skb,
-                                  flowi6_to_flowi(&fl6));
+                                  flowi6_to_flowi_common(&fl6));
        err = nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
        if (err) {
                goto free_nskb;
diff --combined net/xfrm/xfrm_state.c
@@@ -1021,7 -1021,8 +1021,8 @@@ static void xfrm_state_look_at(struct x
                if ((x->sel.family &&
                     (x->sel.family != family ||
                      !xfrm_selector_match(&x->sel, fl, family))) ||
-                   !security_xfrm_state_pol_flow_match(x, pol, fl))
+                   !security_xfrm_state_pol_flow_match(x, pol,
+                                                       &fl->u.__fl_common))
                        return;
  
                if (!*best ||
                if ((!x->sel.family ||
                     (x->sel.family == family &&
                      xfrm_selector_match(&x->sel, fl, family))) &&
-                   security_xfrm_state_pol_flow_match(x, pol, fl))
+                   security_xfrm_state_pol_flow_match(x, pol,
+                                                      &fl->u.__fl_common))
                        *error = -ESRCH;
        }
  }
@@@ -2004,7 -2006,6 +2006,7 @@@ int xfrm_alloc_spi(struct xfrm_state *x
        int err = -ENOENT;
        __be32 minspi = htonl(low);
        __be32 maxspi = htonl(high);
 +      __be32 newspi = 0;
        u32 mark = x->mark.v & x->mark.m;
  
        spin_lock_bh(&x->lock);
                        xfrm_state_put(x0);
                        goto unlock;
                }
 -              x->id.spi = minspi;
 +              newspi = minspi;
        } else {
                u32 spi = 0;
                for (h = 0; h < high-low+1; h++) {
                        spi = low + prandom_u32()%(high-low+1);
                        x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
                        if (x0 == NULL) {
 -                              x->id.spi = htonl(spi);
 +                              newspi = htonl(spi);
                                break;
                        }
                        xfrm_state_put(x0);
                }
        }
 -      if (x->id.spi) {
 +      if (newspi) {
                spin_lock_bh(&net->xfrm.xfrm_state_lock);
 +              x->id.spi = newspi;
                h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
                hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
                spin_unlock_bh(&net->xfrm.xfrm_state_lock);
@@@ -2382,10 -2382,8 +2384,10 @@@ int xfrm_user_policy(struct sock *sk, i
        if (in_compat_syscall()) {
                struct xfrm_translator *xtr = xfrm_get_translator();
  
 -              if (!xtr)
 +              if (!xtr) {
 +                      kfree(data);
                        return -EOPNOTSUPP;
 +              }
  
                err = xtr->xlate_user_policy_sockptr(&data, optlen);
                xfrm_put_translator(xtr);
diff --combined security/security.c
@@@ -65,7 -65,6 +65,7 @@@ const char *const lockdown_reasons[LOCK
        [LOCKDOWN_PERF] = "unsafe use of perf",
        [LOCKDOWN_TRACEFS] = "use of tracefs",
        [LOCKDOWN_XMON_RW] = "xmon read and write access",
 +      [LOCKDOWN_XFRM_SECRET] = "xfrm SA secret",
        [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
  };
  
@@@ -2208,15 -2207,16 +2208,16 @@@ void security_sk_clone(const struct soc
  }
  EXPORT_SYMBOL(security_sk_clone);
  
- void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
+ void security_sk_classify_flow(struct sock *sk, struct flowi_common *flic)
  {
-       call_void_hook(sk_getsecid, sk, &fl->flowi_secid);
+       call_void_hook(sk_getsecid, sk, &flic->flowic_secid);
  }
  EXPORT_SYMBOL(security_sk_classify_flow);
  
- void security_req_classify_flow(const struct request_sock *req, struct flowi *fl)
+ void security_req_classify_flow(const struct request_sock *req,
+                               struct flowi_common *flic)
  {
-       call_void_hook(req_classify_flow, req, fl);
+       call_void_hook(req_classify_flow, req, flic);
  }
  EXPORT_SYMBOL(security_req_classify_flow);
  
@@@ -2226,7 -2226,7 +2227,7 @@@ void security_sock_graft(struct sock *s
  }
  EXPORT_SYMBOL(security_sock_graft);
  
 -int security_inet_conn_request(struct sock *sk,
 +int security_inet_conn_request(const struct sock *sk,
                        struct sk_buff *skb, struct request_sock *req)
  {
        return call_int_hook(inet_conn_request, 0, sk, skb, req);
@@@ -2408,7 -2408,7 +2409,7 @@@ int security_xfrm_policy_lookup(struct 
  
  int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
                                       struct xfrm_policy *xp,
-                                      const struct flowi *fl)
+                                      const struct flowi_common *flic)
  {
        struct security_hook_list *hp;
        int rc = LSM_RET_DEFAULT(xfrm_state_pol_flow_match);
         */
        hlist_for_each_entry(hp, &security_hook_heads.xfrm_state_pol_flow_match,
                                list) {
-               rc = hp->hook.xfrm_state_pol_flow_match(x, xp, fl);
+               rc = hp->hook.xfrm_state_pol_flow_match(x, xp, flic);
                break;
        }
        return rc;
@@@ -2435,9 -2435,9 +2436,9 @@@ int security_xfrm_decode_session(struc
        return call_int_hook(xfrm_decode_session, 0, skb, secid, 1);
  }
  
- void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
+ void security_skb_classify_flow(struct sk_buff *skb, struct flowi_common *flic)
  {
-       int rc = call_int_hook(xfrm_decode_session, 0, skb, &fl->flowi_secid,
+       int rc = call_int_hook(xfrm_decode_session, 0, skb, &flic->flowic_secid,
                                0);
  
        BUG_ON(rc);
diff --combined security/selinux/hooks.c
@@@ -600,7 -600,7 +600,7 @@@ static int selinux_set_mnt_opts(struct 
  {
        const struct cred *cred = current_cred();
        struct superblock_security_struct *sbsec = sb->s_security;
-       struct dentry *root = sbsec->sb->s_root;
+       struct dentry *root = sb->s_root;
        struct selinux_mnt_opts *opts = mnt_opts;
        struct inode_security_struct *root_isec;
        u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
@@@ -1080,7 -1080,7 +1080,7 @@@ static int selinux_sb_show_options(stru
                        return rc;
        }
        if (sbsec->flags & ROOTCONTEXT_MNT) {
-               struct dentry *root = sbsec->sb->s_root;
+               struct dentry *root = sb->s_root;
                struct inode_security_struct *isec = backing_inode_security(root);
                seq_putc(m, ',');
                seq_puts(m, ROOTCONTEXT_STR);
@@@ -1451,7 -1451,7 +1451,7 @@@ static int inode_doinit_with_dentry(str
                         * inode_doinit with a dentry, before these inodes could
                         * be used again by userspace.
                         */
-                       goto out;
+                       goto out_invalid;
                }
  
                rc = inode_doinit_use_xattr(inode, dentry, sbsec->def_sid,
                         * could be used again by userspace.
                         */
                        if (!dentry)
-                               goto out;
+                               goto out_invalid;
                        rc = selinux_genfs_get_sid(dentry, sclass,
                                                   sbsec->flags, &sid);
                        if (rc) {
  out:
        spin_lock(&isec->lock);
        if (isec->initialized == LABEL_PENDING) {
-               if (!sid || rc) {
+               if (rc) {
                        isec->initialized = LABEL_INVALID;
                        goto out_unlock;
                }
                isec->initialized = LABEL_INITIALIZED;
                isec->sid = sid;
        }
  out_unlock:
        spin_unlock(&isec->lock);
        return rc;
+ out_invalid:
+       spin_lock(&isec->lock);
+       if (isec->initialized == LABEL_PENDING) {
+               isec->initialized = LABEL_INVALID;
+               isec->sid = sid;
+       }
+       spin_unlock(&isec->lock);
+       return 0;
  }
  
  /* Convert a Linux signal to an access vector. */
@@@ -2560,7 -2568,6 +2568,6 @@@ static int selinux_sb_alloc_security(st
        mutex_init(&sbsec->lock);
        INIT_LIST_HEAD(&sbsec->isec_head);
        spin_lock_init(&sbsec->isec_lock);
-       sbsec->sb = sb;
        sbsec->sid = SECINITSID_UNLABELED;
        sbsec->def_sid = SECINITSID_FILE;
        sbsec->mntpoint_sid = SECINITSID_UNLABELED;
@@@ -4029,6 -4036,7 +4036,7 @@@ static int selinux_kernel_load_data(enu
        switch (id) {
        case LOADING_MODULE:
                rc = selinux_kernel_module_from_file(NULL);
+               break;
        default:
                break;
        }
@@@ -5355,7 -5363,7 +5363,7 @@@ static void selinux_sctp_sk_clone(struc
        selinux_netlbl_sctp_sk_clone(sk, newsk);
  }
  
 -static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
 +static int selinux_inet_conn_request(const struct sock *sk, struct sk_buff *skb,
                                     struct request_sock *req)
  {
        struct sk_security_struct *sksec = sk->sk_security;
@@@ -5429,9 -5437,9 +5437,9 @@@ static void selinux_secmark_refcount_de
  }
  
  static void selinux_req_classify_flow(const struct request_sock *req,
-                                     struct flowi *fl)
+                                     struct flowi_common *flic)
  {
-       fl->flowi_secid = req->secid;
+       flic->flowic_secid = req->secid;
  }
  
  static int selinux_tun_dev_alloc_security(void **security)