netfilter: nft_compat: narrow down revision to unsigned 8-bits
[platform/kernel/linux-starfive.git] / net / core / skbuff.c
index 4570705..011d690 100644 (file)
@@ -550,7 +550,7 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
                             bool *pfmemalloc)
 {
        bool ret_pfmemalloc = false;
-       unsigned int obj_size;
+       size_t obj_size;
        void *obj;
 
        obj_size = SKB_HEAD_ALIGN(*size);
@@ -567,7 +567,13 @@ static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
                obj = kmem_cache_alloc_node(skb_small_head_cache, flags, node);
                goto out;
        }
-       *size = obj_size = kmalloc_size_roundup(obj_size);
+
+       obj_size = kmalloc_size_roundup(obj_size);
+       /* The following cast might truncate high-order bits of obj_size, this
+        * is harmless because kmalloc(obj_size >= 2^32) will fail anyway.
+        */
+       *size = (unsigned int)obj_size;
+
        /*
         * Try a regular allocation, when that fails and we're not entitled
         * to the reserves, fail.
@@ -4248,6 +4254,7 @@ static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
                           unsigned int to, struct ts_config *config)
 {
+       unsigned int patlen = config->ops->get_pattern_len(config);
        struct ts_state state;
        unsigned int ret;
 
@@ -4259,7 +4266,7 @@ unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
        skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
 
        ret = textsearch_find(config, &state);
-       return (ret <= to - from ? ret : UINT_MAX);
+       return (ret + patlen <= to - from ? ret : UINT_MAX);
 }
 EXPORT_SYMBOL(skb_find_text);
 
@@ -4423,21 +4430,20 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
        struct sk_buff *segs = NULL;
        struct sk_buff *tail = NULL;
        struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
-       skb_frag_t *frag = skb_shinfo(head_skb)->frags;
        unsigned int mss = skb_shinfo(head_skb)->gso_size;
        unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
-       struct sk_buff *frag_skb = head_skb;
        unsigned int offset = doffset;
        unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
        unsigned int partial_segs = 0;
        unsigned int headroom;
        unsigned int len = head_skb->len;
+       struct sk_buff *frag_skb;
+       skb_frag_t *frag;
        __be16 proto;
        bool csum, sg;
-       int nfrags = skb_shinfo(head_skb)->nr_frags;
        int err = -ENOMEM;
        int i = 0;
-       int pos;
+       int nfrags, pos;
 
        if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
            mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
@@ -4502,8 +4508,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                /* GSO partial only requires that we trim off any excess that
                 * doesn't fit into an MSS sized block, so take care of that
                 * now.
+                * Cap len to not accidentally hit GSO_BY_FRAGS.
                 */
-               partial_segs = len / mss;
+               partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss;
                if (partial_segs > 1)
                        mss *= partial_segs;
                else
@@ -4514,6 +4521,13 @@ normal:
        headroom = skb_headroom(head_skb);
        pos = skb_headlen(head_skb);
 
+       if (skb_orphan_frags(head_skb, GFP_ATOMIC))
+               return ERR_PTR(-ENOMEM);
+
+       nfrags = skb_shinfo(head_skb)->nr_frags;
+       frag = skb_shinfo(head_skb)->frags;
+       frag_skb = head_skb;
+
        do {
                struct sk_buff *nskb;
                skb_frag_t *nskb_frag;
@@ -4534,6 +4548,10 @@ normal:
                    (skb_headlen(list_skb) == len || sg)) {
                        BUG_ON(skb_headlen(list_skb) > len);
 
+                       nskb = skb_clone(list_skb, GFP_ATOMIC);
+                       if (unlikely(!nskb))
+                               goto err;
+
                        i = 0;
                        nfrags = skb_shinfo(list_skb)->nr_frags;
                        frag = skb_shinfo(list_skb)->frags;
@@ -4552,12 +4570,8 @@ normal:
                                frag++;
                        }
 
-                       nskb = skb_clone(list_skb, GFP_ATOMIC);
                        list_skb = list_skb->next;
 
-                       if (unlikely(!nskb))
-                               goto err;
-
                        if (unlikely(pskb_trim(nskb, len))) {
                                kfree_skb(nskb);
                                goto err;
@@ -4633,12 +4647,16 @@ normal:
                skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
                                           SKBFL_SHARED_FRAG;
 
-               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
-                   skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
+               if (skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
                        goto err;
 
                while (pos < offset + len) {
                        if (i >= nfrags) {
+                               if (skb_orphan_frags(list_skb, GFP_ATOMIC) ||
+                                   skb_zerocopy_clone(nskb, list_skb,
+                                                      GFP_ATOMIC))
+                                       goto err;
+
                                i = 0;
                                nfrags = skb_shinfo(list_skb)->nr_frags;
                                frag = skb_shinfo(list_skb)->frags;
@@ -4652,10 +4670,6 @@ normal:
                                        i--;
                                        frag--;
                                }
-                               if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
-                                   skb_zerocopy_clone(nskb, frag_skb,
-                                                      GFP_ATOMIC))
-                                       goto err;
 
                                list_skb = list_skb->next;
                        }
@@ -4797,7 +4811,9 @@ static __always_inline unsigned int skb_ext_total_length(void)
 static void skb_extensions_init(void)
 {
        BUILD_BUG_ON(SKB_EXT_NUM >= 8);
+#if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
        BUILD_BUG_ON(skb_ext_total_length() > 255);
+#endif
 
        skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
                                             SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
@@ -5207,7 +5223,7 @@ static void __skb_complete_tx_timestamp(struct sk_buff *skb,
        serr->ee.ee_info = tstype;
        serr->opt_stats = opt_stats;
        serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
-       if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
+       if (READ_ONCE(sk->sk_tsflags) & SOF_TIMESTAMPING_OPT_ID) {
                serr->ee.ee_data = skb_shinfo(skb)->tskey;
                if (sk_is_tcp(sk))
                        serr->ee.ee_data -= atomic_read(&sk->sk_tskey);
@@ -5263,21 +5279,23 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb,
 {
        struct sk_buff *skb;
        bool tsonly, opt_stats = false;
+       u32 tsflags;
 
        if (!sk)
                return;
 
-       if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
+       tsflags = READ_ONCE(sk->sk_tsflags);
+       if (!hwtstamps && !(tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
            skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
                return;
 
-       tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
+       tsonly = tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
        if (!skb_may_tx_timestamp(sk, tsonly))
                return;
 
        if (tsonly) {
 #ifdef CONFIG_INET
-               if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
+               if ((tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
                    sk_is_tcp(sk)) {
                        skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
                                                             ack_skb);