1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2023, Intel Corporation. */
9 * ice_init_irq_tracker - initialize interrupt tracker
10 * @pf: board private structure
11 * @max_vectors: maximum number of vectors that tracker can hold
14 ice_init_irq_tracker(struct ice_pf *pf, unsigned int max_vectors)
16 pf->irq_tracker.num_entries = max_vectors;
17 xa_init_flags(&pf->irq_tracker.entries, XA_FLAGS_ALLOC);
21 * ice_deinit_irq_tracker - free xarray tracker
22 * @pf: board private structure
24 static void ice_deinit_irq_tracker(struct ice_pf *pf)
26 xa_destroy(&pf->irq_tracker.entries);
30 * ice_free_irq_res - free a block of resources
31 * @pf: board private structure
32 * @index: starting index previously returned by ice_get_res
34 static void ice_free_irq_res(struct ice_pf *pf, u16 index)
36 struct ice_irq_entry *entry;
38 entry = xa_erase(&pf->irq_tracker.entries, index);
43 * ice_get_irq_res - get an interrupt resource
44 * @pf: board private structure
46 * Allocate new irq entry in the free slot of the tracker. Since xarray
47 * is used, always allocate new entry at the lowest possible index. Set
48 * proper allocation limit for maximum tracker entries.
50 * Returns allocated irq entry or NULL on failure.
52 static struct ice_irq_entry *ice_get_irq_res(struct ice_pf *pf)
54 struct xa_limit limit = { .max = pf->irq_tracker.num_entries,
56 struct ice_irq_entry *entry;
60 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
64 ret = xa_alloc(&pf->irq_tracker.entries, &index, entry, limit,
78 * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
79 * @pf: board private structure
80 * @v_remain: number of remaining MSI-X vectors to be distributed
82 * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
83 * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
86 static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
90 if (!ice_is_rdma_ena(pf)) {
91 pf->num_lan_msix = v_remain;
95 /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
96 v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
98 if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
99 dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
100 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
102 pf->num_rdma_msix = 0;
103 pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
104 } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
105 (v_remain - v_rdma < v_rdma)) {
106 /* Support minimum RDMA and give remaining vectors to LAN MSIX
108 pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
109 pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
111 /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
113 pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
114 ICE_RDMA_NUM_AEQ_MSIX;
115 pf->num_lan_msix = v_remain - pf->num_rdma_msix;
120 * ice_ena_msix_range - Request a range of MSIX vectors from the OS
121 * @pf: board private structure
123 * Compute the number of MSIX vectors wanted and request from the OS. Adjust
124 * device usage if there are not enough vectors. Return the number of vectors
125 * reserved or negative on failure.
127 static int ice_ena_msix_range(struct ice_pf *pf)
129 int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
130 struct device *dev = ice_pf_to_dev(pf);
133 hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
134 num_cpus = num_online_cpus();
136 /* LAN miscellaneous handler */
137 v_other = ICE_MIN_LAN_OICR_MSIX;
140 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
141 v_other += ICE_FDIR_MSIX;
144 v_other += ICE_ESWITCH_MSIX;
149 pf->num_lan_msix = num_cpus;
150 v_wanted += pf->num_lan_msix;
152 /* RDMA auxiliary driver */
153 if (ice_is_rdma_ena(pf)) {
154 pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
155 v_wanted += pf->num_rdma_msix;
158 if (v_wanted > hw_num_msix) {
161 dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
162 v_wanted, hw_num_msix);
164 if (hw_num_msix < ICE_MIN_MSIX) {
169 v_remain = hw_num_msix - v_other;
170 if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
171 v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
172 v_remain = ICE_MIN_LAN_TXRX_MSIX;
175 ice_reduce_msix_usage(pf, v_remain);
176 v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
178 dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
180 if (ice_is_rdma_ena(pf))
181 dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
185 /* actually reserve the vectors */
186 v_actual = pci_alloc_irq_vectors(pf->pdev, ICE_MIN_MSIX, v_wanted,
189 dev_err(dev, "unable to reserve MSI-X vectors\n");
194 if (v_actual < v_wanted) {
195 dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
198 if (v_actual < ICE_MIN_MSIX) {
199 /* error if we can't get minimum vectors */
200 pci_free_irq_vectors(pf->pdev);
204 int v_remain = v_actual - v_other;
206 if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
207 v_remain = ICE_MIN_LAN_TXRX_MSIX;
209 ice_reduce_msix_usage(pf, v_remain);
211 dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
214 if (ice_is_rdma_ena(pf))
215 dev_notice(dev, "Enabled %d MSI-X vectors for RDMA.\n",
223 pf->num_rdma_msix = 0;
224 pf->num_lan_msix = 0;
229 * ice_clear_interrupt_scheme - Undo things done by ice_init_interrupt_scheme
230 * @pf: board private structure
232 void ice_clear_interrupt_scheme(struct ice_pf *pf)
234 pci_free_irq_vectors(pf->pdev);
235 ice_deinit_irq_tracker(pf);
239 * ice_init_interrupt_scheme - Determine proper interrupt scheme
240 * @pf: board private structure to initialize
242 int ice_init_interrupt_scheme(struct ice_pf *pf)
246 vectors = ice_ena_msix_range(pf);
251 ice_init_irq_tracker(pf, vectors);
257 * ice_alloc_irq - Allocate new interrupt vector
258 * @pf: board private structure
260 * Allocate new interrupt vector for a given owner id.
261 * return struct msi_map with interrupt details and track
262 * allocated interrupt appropriately.
264 * This function mimics individual interrupt allocation,
265 * even interrupts are actually already allocated with
266 * pci_alloc_irq_vectors. Individual allocation helps
267 * to track interrupts and simplifies interrupt related
270 * On failure, return map with negative .index. The caller
271 * is expected to check returned map index.
274 struct msi_map ice_alloc_irq(struct ice_pf *pf)
276 struct msi_map map = { .index = -ENOENT };
277 struct ice_irq_entry *entry;
279 entry = ice_get_irq_res(pf);
283 map.index = entry->index;
284 map.virq = pci_irq_vector(pf->pdev, map.index);
290 * ice_free_irq - Free interrupt vector
291 * @pf: board private structure
292 * @map: map with interrupt details
294 * Remove allocated interrupt from the interrupt tracker.
296 void ice_free_irq(struct ice_pf *pf, struct msi_map map)
298 ice_free_irq_res(pf, map.index);