1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
16 #include "rx_common.h"
18 /* This is the percentage fill level below which new RX descriptors
19 * will be added to the RX descriptor ring.
21 static unsigned int rx_refill_threshold;
22 module_param(rx_refill_threshold, uint, 0444);
23 MODULE_PARM_DESC(rx_refill_threshold,
24 "RX descriptor ring refill threshold (%)");
26 /* RX maximum head room required.
28 * This must be at least 1 to prevent overflow, plus one packet-worth
29 * to allow pipelined receives.
31 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
33 /* Check the RX page recycle ring for a page that can be reused. */
34 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
36 struct efx_nic *efx = rx_queue->efx;
37 struct efx_rx_page_state *state;
41 if (unlikely(!rx_queue->page_ring))
43 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
44 page = rx_queue->page_ring[index];
48 rx_queue->page_ring[index] = NULL;
49 /* page_remove cannot exceed page_add. */
50 if (rx_queue->page_remove != rx_queue->page_add)
51 ++rx_queue->page_remove;
53 /* If page_count is 1 then we hold the only reference to this page. */
54 if (page_count(page) == 1) {
55 ++rx_queue->page_recycle_count;
58 state = page_address(page);
59 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
60 PAGE_SIZE << efx->rx_buffer_order,
63 ++rx_queue->page_recycle_failed;
69 /* Attempt to recycle the page if there is an RX recycle ring; the page can
70 * only be added if this is the final RX buffer, to prevent pages being used in
71 * the descriptor ring and appearing in the recycle ring simultaneously.
73 static void efx_recycle_rx_page(struct efx_channel *channel,
74 struct efx_rx_buffer *rx_buf)
76 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
77 struct efx_nic *efx = rx_queue->efx;
78 struct page *page = rx_buf->page;
81 /* Only recycle the page after processing the final buffer. */
82 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
85 index = rx_queue->page_add & rx_queue->page_ptr_mask;
86 if (rx_queue->page_ring[index] == NULL) {
87 unsigned int read_index = rx_queue->page_remove &
88 rx_queue->page_ptr_mask;
90 /* The next slot in the recycle ring is available, but
91 * increment page_remove if the read pointer currently
94 if (read_index == index)
95 ++rx_queue->page_remove;
96 rx_queue->page_ring[index] = page;
100 ++rx_queue->page_recycle_full;
101 efx_unmap_rx_buffer(efx, rx_buf);
102 put_page(rx_buf->page);
105 /* Recycle the pages that are used by buffers that have just been received. */
106 void efx_recycle_rx_pages(struct efx_channel *channel,
107 struct efx_rx_buffer *rx_buf,
108 unsigned int n_frags)
110 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
112 if (unlikely(!rx_queue->page_ring))
116 efx_recycle_rx_page(channel, rx_buf);
117 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
121 void efx_discard_rx_packet(struct efx_channel *channel,
122 struct efx_rx_buffer *rx_buf,
123 unsigned int n_frags)
125 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
127 efx_recycle_rx_pages(channel, rx_buf, n_frags);
129 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
132 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
134 unsigned int bufs_in_recycle_ring, page_ring_size;
135 struct efx_nic *efx = rx_queue->efx;
137 bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
138 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
139 efx->rx_bufs_per_page);
140 rx_queue->page_ring = kcalloc(page_ring_size,
141 sizeof(*rx_queue->page_ring), GFP_KERNEL);
142 if (!rx_queue->page_ring)
143 rx_queue->page_ptr_mask = 0;
145 rx_queue->page_ptr_mask = page_ring_size - 1;
148 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
150 struct efx_nic *efx = rx_queue->efx;
153 /* Unmap and release the pages in the recycle ring. Remove the ring. */
154 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
155 struct page *page = rx_queue->page_ring[i];
156 struct efx_rx_page_state *state;
161 state = page_address(page);
162 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
163 PAGE_SIZE << efx->rx_buffer_order,
167 kfree(rx_queue->page_ring);
168 rx_queue->page_ring = NULL;
171 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
172 struct efx_rx_buffer *rx_buf)
174 /* Release the page reference we hold for the buffer. */
176 put_page(rx_buf->page);
178 /* If this is the last buffer in a page, unmap and free it. */
179 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
180 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
181 efx_free_rx_buffers(rx_queue, rx_buf, 1);
186 int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
188 struct efx_nic *efx = rx_queue->efx;
189 unsigned int entries;
192 /* Create the smallest power-of-two aligned ring */
193 entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
194 EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
195 rx_queue->ptr_mask = entries - 1;
197 netif_dbg(efx, probe, efx->net_dev,
198 "creating RX queue %d size %#x mask %#x\n",
199 efx_rx_queue_index(rx_queue), efx->rxq_entries,
202 /* Allocate RX buffers */
203 rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
205 if (!rx_queue->buffer)
208 rc = efx_nic_probe_rx(rx_queue);
210 kfree(rx_queue->buffer);
211 rx_queue->buffer = NULL;
217 void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
219 unsigned int max_fill, trigger, max_trigger;
220 struct efx_nic *efx = rx_queue->efx;
223 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
224 "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
226 /* Initialise ptr fields */
227 rx_queue->added_count = 0;
228 rx_queue->notified_count = 0;
229 rx_queue->removed_count = 0;
230 rx_queue->min_fill = -1U;
231 efx_init_rx_recycle_ring(rx_queue);
233 rx_queue->page_remove = 0;
234 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
235 rx_queue->page_recycle_count = 0;
236 rx_queue->page_recycle_failed = 0;
237 rx_queue->page_recycle_full = 0;
239 /* Initialise limit fields */
240 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
242 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
243 if (rx_refill_threshold != 0) {
244 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
245 if (trigger > max_trigger)
246 trigger = max_trigger;
248 trigger = max_trigger;
251 rx_queue->max_fill = max_fill;
252 rx_queue->fast_fill_trigger = trigger;
253 rx_queue->refill_enabled = true;
255 /* Initialise XDP queue information */
256 rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
257 rx_queue->core_index, 0);
260 netif_err(efx, rx_err, efx->net_dev,
261 "Failure to initialise XDP queue information rc=%d\n",
263 efx->xdp_rxq_info_failed = true;
265 rx_queue->xdp_rxq_info_valid = true;
268 /* Set up RX descriptor ring */
269 efx_nic_init_rx(rx_queue);
272 void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
274 struct efx_rx_buffer *rx_buf;
277 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
278 "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
280 del_timer_sync(&rx_queue->slow_fill);
282 /* Release RX buffers from the current read ptr to the write ptr */
283 if (rx_queue->buffer) {
284 for (i = rx_queue->removed_count; i < rx_queue->added_count;
286 unsigned int index = i & rx_queue->ptr_mask;
288 rx_buf = efx_rx_buffer(rx_queue, index);
289 efx_fini_rx_buffer(rx_queue, rx_buf);
293 efx_fini_rx_recycle_ring(rx_queue);
295 if (rx_queue->xdp_rxq_info_valid)
296 xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
298 rx_queue->xdp_rxq_info_valid = false;
301 void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
303 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
304 "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
306 efx_nic_remove_rx(rx_queue);
308 kfree(rx_queue->buffer);
309 rx_queue->buffer = NULL;
312 /* Unmap a DMA-mapped page. This function is only called for the final RX
315 void efx_unmap_rx_buffer(struct efx_nic *efx,
316 struct efx_rx_buffer *rx_buf)
318 struct page *page = rx_buf->page;
321 struct efx_rx_page_state *state = page_address(page);
323 dma_unmap_page(&efx->pci_dev->dev,
325 PAGE_SIZE << efx->rx_buffer_order,
330 void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
331 struct efx_rx_buffer *rx_buf,
332 unsigned int num_bufs)
336 put_page(rx_buf->page);
339 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
340 } while (--num_bufs);
343 void efx_rx_slow_fill(struct timer_list *t)
345 struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
347 /* Post an event to cause NAPI to run and refill the queue */
348 efx_nic_generate_fill_event(rx_queue);
349 ++rx_queue->slow_fill_count;
352 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
354 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
357 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
359 * @rx_queue: Efx RX queue
361 * This allocates a batch of pages, maps them for DMA, and populates
362 * struct efx_rx_buffers for each one. Return a negative error code or
363 * 0 on success. If a single page can be used for multiple buffers,
364 * then the page will either be inserted fully, or not at all.
366 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
368 unsigned int page_offset, index, count;
369 struct efx_nic *efx = rx_queue->efx;
370 struct efx_rx_page_state *state;
371 struct efx_rx_buffer *rx_buf;
377 page = efx_reuse_page(rx_queue);
379 page = alloc_pages(__GFP_COMP |
380 (atomic ? GFP_ATOMIC : GFP_KERNEL),
381 efx->rx_buffer_order);
382 if (unlikely(page == NULL))
385 dma_map_page(&efx->pci_dev->dev, page, 0,
386 PAGE_SIZE << efx->rx_buffer_order,
388 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
390 __free_pages(page, efx->rx_buffer_order);
393 state = page_address(page);
394 state->dma_addr = dma_addr;
396 state = page_address(page);
397 dma_addr = state->dma_addr;
400 dma_addr += sizeof(struct efx_rx_page_state);
401 page_offset = sizeof(struct efx_rx_page_state);
404 index = rx_queue->added_count & rx_queue->ptr_mask;
405 rx_buf = efx_rx_buffer(rx_queue, index);
406 rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
409 rx_buf->page_offset = page_offset + efx->rx_ip_align +
411 rx_buf->len = efx->rx_dma_len;
413 ++rx_queue->added_count;
415 dma_addr += efx->rx_page_buf_step;
416 page_offset += efx->rx_page_buf_step;
417 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
419 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
420 } while (++count < efx->rx_pages_per_batch);
425 void efx_rx_config_page_split(struct efx_nic *efx)
427 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
428 EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
429 EFX_RX_BUF_ALIGNMENT);
430 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
431 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
432 efx->rx_page_buf_step);
433 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
434 efx->rx_bufs_per_page;
435 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
436 efx->rx_bufs_per_page);
439 /* efx_fast_push_rx_descriptors - push new RX descriptors quickly
440 * @rx_queue: RX descriptor queue
442 * This will aim to fill the RX descriptor queue up to
443 * @rx_queue->@max_fill. If there is insufficient atomic
444 * memory to do so, a slow fill will be scheduled.
446 * The caller must provide serialisation (none is used here). In practise,
447 * this means this function must run from the NAPI handler, or be called
448 * when NAPI is disabled.
450 void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
452 struct efx_nic *efx = rx_queue->efx;
453 unsigned int fill_level, batch_size;
456 if (!rx_queue->refill_enabled)
459 /* Calculate current fill level, and exit if we don't need to fill */
460 fill_level = (rx_queue->added_count - rx_queue->removed_count);
461 EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
462 if (fill_level >= rx_queue->fast_fill_trigger)
465 /* Record minimum fill level */
466 if (unlikely(fill_level < rx_queue->min_fill)) {
468 rx_queue->min_fill = fill_level;
471 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
472 space = rx_queue->max_fill - fill_level;
473 EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
475 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
476 "RX queue %d fast-filling descriptor ring from"
477 " level %d to level %d\n",
478 efx_rx_queue_index(rx_queue), fill_level,
482 rc = efx_init_rx_buffers(rx_queue, atomic);
484 /* Ensure that we don't leave the rx queue empty */
485 efx_schedule_slow_fill(rx_queue);
488 } while ((space -= batch_size) >= batch_size);
490 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
491 "RX queue %d fast-filled descriptor ring "
492 "to level %d\n", efx_rx_queue_index(rx_queue),
493 rx_queue->added_count - rx_queue->removed_count);
496 if (rx_queue->notified_count != rx_queue->added_count)
497 efx_nic_notify_rx_desc(rx_queue);
500 /* Pass a received packet up through GRO. GRO can handle pages
501 * regardless of checksum state and skbs with a good checksum.
504 efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
505 unsigned int n_frags, u8 *eh, __wsum csum)
507 struct napi_struct *napi = &channel->napi_str;
508 struct efx_nic *efx = channel->efx;
511 skb = napi_get_frags(napi);
512 if (unlikely(!skb)) {
513 struct efx_rx_queue *rx_queue;
515 rx_queue = efx_channel_get_rx_queue(channel);
516 efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
520 if (efx->net_dev->features & NETIF_F_RXHASH &&
521 efx_rx_buf_hash_valid(efx, eh))
522 skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
526 skb->ip_summed = CHECKSUM_COMPLETE;
528 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
529 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
531 skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
534 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
535 rx_buf->page, rx_buf->page_offset,
538 skb->len += rx_buf->len;
539 if (skb_shinfo(skb)->nr_frags == n_frags)
542 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
545 skb->data_len = skb->len;
546 skb->truesize += n_frags * efx->rx_buffer_truesize;
548 skb_record_rx_queue(skb, channel->rx_queue.core_index);
550 napi_gro_frags(napi);
553 /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
554 * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
556 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
558 struct list_head *head = &efx->rss_context.list;
559 struct efx_rss_context *ctx, *new;
560 u32 id = 1; /* Don't use zero, that refers to the master RSS context */
562 WARN_ON(!mutex_is_locked(&efx->rss_lock));
564 /* Search for first gap in the numbering */
565 list_for_each_entry(ctx, head, list) {
566 if (ctx->user_id != id)
569 /* Check for wrap. If this happens, we have nearly 2^32
570 * allocated RSS contexts, which seems unlikely.
572 if (WARN_ON_ONCE(!id))
576 /* Create the new entry */
577 new = kmalloc(sizeof(*new), GFP_KERNEL);
580 new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
581 new->rx_hash_udp_4tuple = false;
583 /* Insert the new entry into the gap */
585 list_add_tail(&new->list, &ctx->list);
589 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
591 struct list_head *head = &efx->rss_context.list;
592 struct efx_rss_context *ctx;
594 WARN_ON(!mutex_is_locked(&efx->rss_lock));
596 list_for_each_entry(ctx, head, list)
597 if (ctx->user_id == id)
602 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
604 list_del(&ctx->list);
608 void efx_set_default_rx_indir_table(struct efx_nic *efx,
609 struct efx_rss_context *ctx)
613 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
614 ctx->rx_indir_table[i] =
615 ethtool_rxfh_indir_default(i, efx->rss_spread);
619 * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
620 * @spec: Specification to test
622 * Return: %true if the specification is a non-drop RX filter that
623 * matches a local MAC address I/G bit value of 1 or matches a local
624 * IPv4 or IPv6 address value in the respective multicast address
625 * range. Otherwise %false.
627 bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
629 if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
630 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
633 if (spec->match_flags &
634 (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
635 is_multicast_ether_addr(spec->loc_mac))
638 if ((spec->match_flags &
639 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
640 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
641 if (spec->ether_type == htons(ETH_P_IP) &&
642 ipv4_is_multicast(spec->loc_host[0]))
644 if (spec->ether_type == htons(ETH_P_IPV6) &&
645 ((const u8 *)spec->loc_host)[0] == 0xff)
652 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
653 const struct efx_filter_spec *right)
655 if ((left->match_flags ^ right->match_flags) |
656 ((left->flags ^ right->flags) &
657 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
660 return memcmp(&left->outer_vid, &right->outer_vid,
661 sizeof(struct efx_filter_spec) -
662 offsetof(struct efx_filter_spec, outer_vid)) == 0;
665 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
667 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
668 return jhash2((const u32 *)&spec->outer_vid,
669 (sizeof(struct efx_filter_spec) -
670 offsetof(struct efx_filter_spec, outer_vid)) / 4,
674 #ifdef CONFIG_RFS_ACCEL
675 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
678 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
679 /* ARFS is currently updating this entry, leave it */
682 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
683 /* ARFS tried and failed to update this, so it's probably out
684 * of date. Remove the filter and the ARFS rule entry.
686 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
689 } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
690 /* ARFS has moved on, so old filter is not needed. Since we did
691 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
692 * not be removed by efx_rps_hash_del() subsequently.
697 /* Remove it iff ARFS wants to. */
702 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
703 const struct efx_filter_spec *spec)
705 u32 hash = efx_filter_spec_hash(spec);
707 lockdep_assert_held(&efx->rps_hash_lock);
708 if (!efx->rps_hash_table)
710 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
713 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
714 const struct efx_filter_spec *spec)
716 struct efx_arfs_rule *rule;
717 struct hlist_head *head;
718 struct hlist_node *node;
720 head = efx_rps_hash_bucket(efx, spec);
723 hlist_for_each(node, head) {
724 rule = container_of(node, struct efx_arfs_rule, node);
725 if (efx_filter_spec_equal(spec, &rule->spec))
731 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
732 const struct efx_filter_spec *spec,
735 struct efx_arfs_rule *rule;
736 struct hlist_head *head;
737 struct hlist_node *node;
739 head = efx_rps_hash_bucket(efx, spec);
742 hlist_for_each(node, head) {
743 rule = container_of(node, struct efx_arfs_rule, node);
744 if (efx_filter_spec_equal(spec, &rule->spec)) {
749 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
752 memcpy(&rule->spec, spec, sizeof(rule->spec));
753 hlist_add_head(&rule->node, head);
758 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
760 struct efx_arfs_rule *rule;
761 struct hlist_head *head;
762 struct hlist_node *node;
764 head = efx_rps_hash_bucket(efx, spec);
767 hlist_for_each(node, head) {
768 rule = container_of(node, struct efx_arfs_rule, node);
769 if (efx_filter_spec_equal(spec, &rule->spec)) {
770 /* Someone already reused the entry. We know that if
771 * this check doesn't fire (i.e. filter_id == REMOVING)
772 * then the REMOVING mark was put there by our caller,
773 * because caller is holding a lock on filter table and
774 * only holders of that lock set REMOVING.
776 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
783 /* We didn't find it. */
788 int efx_probe_filters(struct efx_nic *efx)
792 mutex_lock(&efx->mac_lock);
793 down_write(&efx->filter_sem);
794 rc = efx->type->filter_table_probe(efx);
798 #ifdef CONFIG_RFS_ACCEL
799 if (efx->type->offload_features & NETIF_F_NTUPLE) {
800 struct efx_channel *channel;
803 efx_for_each_channel(channel, efx) {
804 channel->rps_flow_id =
805 kcalloc(efx->type->max_rx_ip_filters,
806 sizeof(*channel->rps_flow_id),
808 if (!channel->rps_flow_id)
812 i < efx->type->max_rx_ip_filters;
814 channel->rps_flow_id[i] =
816 channel->rfs_expire_index = 0;
817 channel->rfs_filter_count = 0;
821 efx_for_each_channel(channel, efx)
822 kfree(channel->rps_flow_id);
823 efx->type->filter_table_remove(efx);
830 up_write(&efx->filter_sem);
831 mutex_unlock(&efx->mac_lock);
835 void efx_remove_filters(struct efx_nic *efx)
837 #ifdef CONFIG_RFS_ACCEL
838 struct efx_channel *channel;
840 efx_for_each_channel(channel, efx) {
841 cancel_delayed_work_sync(&channel->filter_work);
842 kfree(channel->rps_flow_id);
843 channel->rps_flow_id = NULL;
846 down_write(&efx->filter_sem);
847 efx->type->filter_table_remove(efx);
848 up_write(&efx->filter_sem);
851 #ifdef CONFIG_RFS_ACCEL
853 static void efx_filter_rfs_work(struct work_struct *data)
855 struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
857 struct efx_nic *efx = netdev_priv(req->net_dev);
858 struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
859 int slot_idx = req - efx->rps_slot;
860 struct efx_arfs_rule *rule;
864 rc = efx->type->filter_insert(efx, &req->spec, true);
866 /* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
867 rc %= efx->type->max_rx_ip_filters;
868 if (efx->rps_hash_table) {
869 spin_lock_bh(&efx->rps_hash_lock);
870 rule = efx_rps_hash_find(efx, &req->spec);
871 /* The rule might have already gone, if someone else's request
872 * for the same spec was already worked and then expired before
873 * we got around to our work. In that case we have nothing
874 * tying us to an arfs_id, meaning that as soon as the filter
875 * is considered for expiry it will be removed.
879 rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
881 rule->filter_id = rc;
882 arfs_id = rule->arfs_id;
884 spin_unlock_bh(&efx->rps_hash_lock);
887 /* Remember this so we can check whether to expire the filter
890 mutex_lock(&efx->rps_mutex);
891 if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
892 channel->rfs_filter_count++;
893 channel->rps_flow_id[rc] = req->flow_id;
894 mutex_unlock(&efx->rps_mutex);
896 if (req->spec.ether_type == htons(ETH_P_IP))
897 netif_info(efx, rx_status, efx->net_dev,
898 "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
899 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
900 req->spec.rem_host, ntohs(req->spec.rem_port),
901 req->spec.loc_host, ntohs(req->spec.loc_port),
902 req->rxq_index, req->flow_id, rc, arfs_id);
904 netif_info(efx, rx_status, efx->net_dev,
905 "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
906 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
907 req->spec.rem_host, ntohs(req->spec.rem_port),
908 req->spec.loc_host, ntohs(req->spec.loc_port),
909 req->rxq_index, req->flow_id, rc, arfs_id);
910 channel->n_rfs_succeeded++;
912 if (req->spec.ether_type == htons(ETH_P_IP))
913 netif_dbg(efx, rx_status, efx->net_dev,
914 "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
915 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
916 req->spec.rem_host, ntohs(req->spec.rem_port),
917 req->spec.loc_host, ntohs(req->spec.loc_port),
918 req->rxq_index, req->flow_id, rc, arfs_id);
920 netif_dbg(efx, rx_status, efx->net_dev,
921 "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
922 (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
923 req->spec.rem_host, ntohs(req->spec.rem_port),
924 req->spec.loc_host, ntohs(req->spec.loc_port),
925 req->rxq_index, req->flow_id, rc, arfs_id);
926 channel->n_rfs_failed++;
927 /* We're overloading the NIC's filter tables, so let's do a
928 * chunk of extra expiry work.
930 __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
934 /* Release references */
935 clear_bit(slot_idx, &efx->rps_slot_map);
936 dev_put(req->net_dev);
939 int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
940 u16 rxq_index, u32 flow_id)
942 struct efx_nic *efx = netdev_priv(net_dev);
943 struct efx_async_filter_insertion *req;
944 struct efx_arfs_rule *rule;
950 /* find a free slot */
951 for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
952 if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
954 if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
957 if (flow_id == RPS_FLOW_ID_INVALID) {
962 if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
963 rc = -EPROTONOSUPPORT;
967 if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
968 rc = -EPROTONOSUPPORT;
971 if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
972 rc = -EPROTONOSUPPORT;
976 req = efx->rps_slot + slot_idx;
977 efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
978 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
980 req->spec.match_flags =
981 EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
982 EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
983 EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
984 req->spec.ether_type = fk.basic.n_proto;
985 req->spec.ip_proto = fk.basic.ip_proto;
987 if (fk.basic.n_proto == htons(ETH_P_IP)) {
988 req->spec.rem_host[0] = fk.addrs.v4addrs.src;
989 req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
991 memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
992 sizeof(struct in6_addr));
993 memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
994 sizeof(struct in6_addr));
997 req->spec.rem_port = fk.ports.src;
998 req->spec.loc_port = fk.ports.dst;
1000 if (efx->rps_hash_table) {
1001 /* Add it to ARFS hash table */
1002 spin_lock(&efx->rps_hash_lock);
1003 rule = efx_rps_hash_add(efx, &req->spec, &new);
1009 rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
1011 /* Skip if existing or pending filter already does the right thing */
1012 if (!new && rule->rxq_index == rxq_index &&
1013 rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
1015 rule->rxq_index = rxq_index;
1016 rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
1017 spin_unlock(&efx->rps_hash_lock);
1019 /* Without an ARFS hash table, we just use arfs_id 0 for all
1020 * filters. This means if multiple flows hash to the same
1021 * flow_id, all but the most recently touched will be eligible
1027 /* Queue the request */
1028 dev_hold(req->net_dev = net_dev);
1029 INIT_WORK(&req->work, efx_filter_rfs_work);
1030 req->rxq_index = rxq_index;
1031 req->flow_id = flow_id;
1032 schedule_work(&req->work);
1035 spin_unlock(&efx->rps_hash_lock);
1037 clear_bit(slot_idx, &efx->rps_slot_map);
1041 bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
1043 bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1044 struct efx_nic *efx = channel->efx;
1045 unsigned int index, size, start;
1048 if (!mutex_trylock(&efx->rps_mutex))
1050 expire_one = efx->type->filter_rfs_expire_one;
1051 index = channel->rfs_expire_index;
1053 size = efx->type->max_rx_ip_filters;
1055 flow_id = channel->rps_flow_id[index];
1057 if (flow_id != RPS_FLOW_ID_INVALID) {
1059 if (expire_one(efx, flow_id, index)) {
1060 netif_info(efx, rx_status, efx->net_dev,
1061 "expired filter %d [channel %u flow %u]\n",
1062 index, channel->channel, flow_id);
1063 channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1064 channel->rfs_filter_count--;
1067 if (++index == size)
1069 /* If we were called with a quota that exceeds the total number
1070 * of filters in the table (which shouldn't happen, but could
1071 * if two callers race), ensure that we don't loop forever -
1072 * stop when we've examined every row of the table.
1078 channel->rfs_expire_index = index;
1079 mutex_unlock(&efx->rps_mutex);
1083 #endif /* CONFIG_RFS_ACCEL */