1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * Definitions for the 'struct skb_array' datastructure.
6 * Michael S. Tsirkin <mst@redhat.com>
8 * Copyright (C) 2016 Red Hat, Inc.
10 * Limited-size FIFO of skbs. Can be used more or less whenever
11 * sk_buff_head can be used, except you need to know the queue size in
13 * Implemented as a type-safe wrapper around ptr_ring.
16 #ifndef _LINUX_SKB_ARRAY_H
17 #define _LINUX_SKB_ARRAY_H 1
20 #include <linux/ptr_ring.h>
21 #include <linux/skbuff.h>
22 #include <linux/if_vlan.h>
29 /* Might be slightly faster than skb_array_full below, but callers invoking
30 * this in a loop must use a compiler barrier, for example cpu_relax().
32 static inline bool __skb_array_full(struct skb_array *a)
34 return __ptr_ring_full(&a->ring);
37 static inline bool skb_array_full(struct skb_array *a)
39 return ptr_ring_full(&a->ring);
42 static inline int skb_array_produce(struct skb_array *a, struct sk_buff *skb)
44 return ptr_ring_produce(&a->ring, skb);
47 static inline int skb_array_produce_irq(struct skb_array *a, struct sk_buff *skb)
49 return ptr_ring_produce_irq(&a->ring, skb);
52 static inline int skb_array_produce_bh(struct skb_array *a, struct sk_buff *skb)
54 return ptr_ring_produce_bh(&a->ring, skb);
57 static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb)
59 return ptr_ring_produce_any(&a->ring, skb);
62 /* Might be slightly faster than skb_array_empty below, but only safe if the
63 * array is never resized. Also, callers invoking this in a loop must take care
64 * to use a compiler barrier, for example cpu_relax().
66 static inline bool __skb_array_empty(struct skb_array *a)
68 return __ptr_ring_empty(&a->ring);
71 static inline struct sk_buff *__skb_array_peek(struct skb_array *a)
73 return __ptr_ring_peek(&a->ring);
76 static inline bool skb_array_empty(struct skb_array *a)
78 return ptr_ring_empty(&a->ring);
81 static inline bool skb_array_empty_bh(struct skb_array *a)
83 return ptr_ring_empty_bh(&a->ring);
86 static inline bool skb_array_empty_irq(struct skb_array *a)
88 return ptr_ring_empty_irq(&a->ring);
91 static inline bool skb_array_empty_any(struct skb_array *a)
93 return ptr_ring_empty_any(&a->ring);
96 static inline struct sk_buff *__skb_array_consume(struct skb_array *a)
98 return __ptr_ring_consume(&a->ring);
101 static inline struct sk_buff *skb_array_consume(struct skb_array *a)
103 return ptr_ring_consume(&a->ring);
106 static inline int skb_array_consume_batched(struct skb_array *a,
107 struct sk_buff **array, int n)
109 return ptr_ring_consume_batched(&a->ring, (void **)array, n);
112 static inline struct sk_buff *skb_array_consume_irq(struct skb_array *a)
114 return ptr_ring_consume_irq(&a->ring);
117 static inline int skb_array_consume_batched_irq(struct skb_array *a,
118 struct sk_buff **array, int n)
120 return ptr_ring_consume_batched_irq(&a->ring, (void **)array, n);
123 static inline struct sk_buff *skb_array_consume_any(struct skb_array *a)
125 return ptr_ring_consume_any(&a->ring);
128 static inline int skb_array_consume_batched_any(struct skb_array *a,
129 struct sk_buff **array, int n)
131 return ptr_ring_consume_batched_any(&a->ring, (void **)array, n);
135 static inline struct sk_buff *skb_array_consume_bh(struct skb_array *a)
137 return ptr_ring_consume_bh(&a->ring);
140 static inline int skb_array_consume_batched_bh(struct skb_array *a,
141 struct sk_buff **array, int n)
143 return ptr_ring_consume_batched_bh(&a->ring, (void **)array, n);
146 static inline int __skb_array_len_with_tag(struct sk_buff *skb)
151 if (skb_vlan_tag_present(skb))
160 static inline int skb_array_peek_len(struct skb_array *a)
162 return PTR_RING_PEEK_CALL(&a->ring, __skb_array_len_with_tag);
165 static inline int skb_array_peek_len_irq(struct skb_array *a)
167 return PTR_RING_PEEK_CALL_IRQ(&a->ring, __skb_array_len_with_tag);
170 static inline int skb_array_peek_len_bh(struct skb_array *a)
172 return PTR_RING_PEEK_CALL_BH(&a->ring, __skb_array_len_with_tag);
175 static inline int skb_array_peek_len_any(struct skb_array *a)
177 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag);
180 static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp)
182 return ptr_ring_init(&a->ring, size, gfp);
185 static void __skb_array_destroy_skb(void *ptr)
190 static inline void skb_array_unconsume(struct skb_array *a,
191 struct sk_buff **skbs, int n)
193 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb);
196 static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
198 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb);
201 static inline int skb_array_resize_multiple(struct skb_array **rings,
202 int nrings, unsigned int size,
205 BUILD_BUG_ON(offsetof(struct skb_array, ring));
206 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
208 __skb_array_destroy_skb);
211 static inline void skb_array_cleanup(struct skb_array *a)
213 ptr_ring_cleanup(&a->ring, __skb_array_destroy_skb);
216 #endif /* _LINUX_SKB_ARRAY_H */