1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2014 Protonic Holland,
4 * Copyright (C) 2014-2021, 2023 Pengutronix,
5 * Marc Kleine-Budde <kernel@pengutronix.de>
8 #include <linux/can/dev.h>
9 #include <linux/can/rx-offload.h>
11 struct can_rx_offload_cb {
15 static inline struct can_rx_offload_cb *
16 can_rx_offload_get_cb(struct sk_buff *skb)
18 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
20 return (struct can_rx_offload_cb *)skb->cb;
24 can_rx_offload_le(struct can_rx_offload *offload,
25 unsigned int a, unsigned int b)
33 static inline unsigned int
34 can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
42 static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
44 struct can_rx_offload *offload = container_of(napi,
45 struct can_rx_offload,
47 struct net_device *dev = offload->dev;
48 struct net_device_stats *stats = &dev->stats;
52 while ((work_done < quota) &&
53 (skb = skb_dequeue(&offload->skb_queue))) {
54 struct can_frame *cf = (struct can_frame *)skb->data;
57 if (!(cf->can_id & CAN_ERR_FLAG)) {
59 if (!(cf->can_id & CAN_RTR_FLAG))
60 stats->rx_bytes += cf->len;
62 netif_receive_skb(skb);
65 if (work_done < quota) {
66 napi_complete_done(napi, work_done);
68 /* Check if there was another interrupt */
69 if (!skb_queue_empty(&offload->skb_queue))
70 napi_reschedule(&offload->napi);
77 __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
78 int (*compare)(struct sk_buff *a, struct sk_buff *b))
80 struct sk_buff *pos, *insert = NULL;
82 skb_queue_reverse_walk(head, pos) {
83 const struct can_rx_offload_cb *cb_pos, *cb_new;
85 cb_pos = can_rx_offload_get_cb(pos);
86 cb_new = can_rx_offload_get_cb(new);
89 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
91 cb_pos->timestamp, cb_new->timestamp,
92 cb_new->timestamp - cb_pos->timestamp,
95 if (compare(pos, new) < 0)
101 __skb_queue_head(head, new);
103 __skb_queue_after(head, insert, new);
106 static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
108 const struct can_rx_offload_cb *cb_a, *cb_b;
110 cb_a = can_rx_offload_get_cb(a);
111 cb_b = can_rx_offload_get_cb(b);
113 /* Subtract two u32 and return result as int, to keep
114 * difference steady around the u32 overflow.
116 return cb_b->timestamp - cb_a->timestamp;
120 * can_rx_offload_offload_one() - Read one CAN frame from HW
121 * @offload: pointer to rx_offload context
122 * @n: number of mailbox to read
124 * The task of this function is to read a CAN frame from mailbox @n
125 * from the device and return the mailbox's content as a struct
128 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
129 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
130 * allocated, the mailbox contents is discarded by reading it into an
131 * overflow buffer. This way the mailbox is marked as free by the
134 * Return: A pointer to skb containing the CAN frame on success.
136 * NULL if the mailbox @n is empty.
138 * ERR_PTR() in case of an error
140 static struct sk_buff *
141 can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
144 struct can_rx_offload_cb *cb;
148 /* If queue is full drop frame */
149 if (unlikely(skb_queue_len(&offload->skb_queue) >
150 offload->skb_queue_len_max))
153 skb = offload->mailbox_read(offload, n, ×tamp, drop);
154 /* Mailbox was empty. */
158 /* There was a problem reading the mailbox, propagate
162 offload->dev->stats.rx_dropped++;
163 offload->dev->stats.rx_fifo_errors++;
168 /* Mailbox was read. */
169 cb = can_rx_offload_get_cb(skb);
170 cb->timestamp = timestamp;
175 int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
181 for (i = offload->mb_first;
182 can_rx_offload_le(offload, i, offload->mb_last);
183 can_rx_offload_inc(offload, &i)) {
186 if (!(pending & BIT_ULL(i)))
189 skb = can_rx_offload_offload_one(offload, i);
190 if (IS_ERR_OR_NULL(skb))
193 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
194 can_rx_offload_compare);
200 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
202 int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
208 skb = can_rx_offload_offload_one(offload, 0);
214 __skb_queue_tail(&offload->skb_irq_queue, skb);
220 EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
222 int can_rx_offload_queue_timestamp(struct can_rx_offload *offload,
223 struct sk_buff *skb, u32 timestamp)
225 struct can_rx_offload_cb *cb;
227 if (skb_queue_len(&offload->skb_queue) >
228 offload->skb_queue_len_max) {
229 dev_kfree_skb_any(skb);
233 cb = can_rx_offload_get_cb(skb);
234 cb->timestamp = timestamp;
236 __skb_queue_add_sort(&offload->skb_irq_queue, skb,
237 can_rx_offload_compare);
241 EXPORT_SYMBOL_GPL(can_rx_offload_queue_timestamp);
244 can_rx_offload_get_echo_skb_queue_timestamp(struct can_rx_offload *offload,
245 unsigned int idx, u32 timestamp,
246 unsigned int *frame_len_ptr)
248 struct net_device *dev = offload->dev;
249 struct net_device_stats *stats = &dev->stats;
254 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
258 err = can_rx_offload_queue_timestamp(offload, skb, timestamp);
261 stats->tx_fifo_errors++;
266 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_timestamp);
268 int can_rx_offload_queue_tail(struct can_rx_offload *offload,
271 if (skb_queue_len(&offload->skb_queue) >
272 offload->skb_queue_len_max) {
273 dev_kfree_skb_any(skb);
277 __skb_queue_tail(&offload->skb_irq_queue, skb);
281 EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
284 can_rx_offload_get_echo_skb_queue_tail(struct can_rx_offload *offload,
286 unsigned int *frame_len_ptr)
288 struct net_device *dev = offload->dev;
289 struct net_device_stats *stats = &dev->stats;
294 skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
298 err = can_rx_offload_queue_tail(offload, skb);
301 stats->tx_fifo_errors++;
306 EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb_queue_tail);
308 void can_rx_offload_irq_finish(struct can_rx_offload *offload)
313 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
316 spin_lock_irqsave(&offload->skb_queue.lock, flags);
317 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
318 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
320 queue_len = skb_queue_len(&offload->skb_queue);
321 if (queue_len > offload->skb_queue_len_max / 8)
322 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
323 __func__, queue_len);
325 napi_schedule(&offload->napi);
327 EXPORT_SYMBOL_GPL(can_rx_offload_irq_finish);
329 void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload)
334 if (skb_queue_empty_lockless(&offload->skb_irq_queue))
337 spin_lock_irqsave(&offload->skb_queue.lock, flags);
338 skb_queue_splice_tail_init(&offload->skb_irq_queue, &offload->skb_queue);
339 spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
341 queue_len = skb_queue_len(&offload->skb_queue);
342 if (queue_len > offload->skb_queue_len_max / 8)
343 netdev_dbg(offload->dev, "%s: queue_len=%d\n",
344 __func__, queue_len);
347 napi_schedule(&offload->napi);
350 EXPORT_SYMBOL_GPL(can_rx_offload_threaded_irq_finish);
352 static int can_rx_offload_init_queue(struct net_device *dev,
353 struct can_rx_offload *offload,
358 /* Limit queue len to 4x the weight (rounded to next power of two) */
359 offload->skb_queue_len_max = 2 << fls(weight);
360 offload->skb_queue_len_max *= 4;
361 skb_queue_head_init(&offload->skb_queue);
362 __skb_queue_head_init(&offload->skb_irq_queue);
364 netif_napi_add_weight(dev, &offload->napi, can_rx_offload_napi_poll,
367 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
368 __func__, offload->skb_queue_len_max);
373 int can_rx_offload_add_timestamp(struct net_device *dev,
374 struct can_rx_offload *offload)
378 if (offload->mb_first > BITS_PER_LONG_LONG ||
379 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
382 if (offload->mb_first < offload->mb_last) {
384 weight = offload->mb_last - offload->mb_first;
386 offload->inc = false;
387 weight = offload->mb_first - offload->mb_last;
390 return can_rx_offload_init_queue(dev, offload, weight);
392 EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
394 int can_rx_offload_add_fifo(struct net_device *dev,
395 struct can_rx_offload *offload, unsigned int weight)
397 if (!offload->mailbox_read)
400 return can_rx_offload_init_queue(dev, offload, weight);
402 EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
404 int can_rx_offload_add_manual(struct net_device *dev,
405 struct can_rx_offload *offload,
408 if (offload->mailbox_read)
411 return can_rx_offload_init_queue(dev, offload, weight);
413 EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
415 void can_rx_offload_enable(struct can_rx_offload *offload)
417 napi_enable(&offload->napi);
419 EXPORT_SYMBOL_GPL(can_rx_offload_enable);
421 void can_rx_offload_del(struct can_rx_offload *offload)
423 netif_napi_del(&offload->napi);
424 skb_queue_purge(&offload->skb_queue);
425 __skb_queue_purge(&offload->skb_irq_queue);
427 EXPORT_SYMBOL_GPL(can_rx_offload_del);