1 // SPDX-License-Identifier: GPL-2.0
2 /* pci_msi.c: Sparc64 MSI support common layer.
4 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/slab.h>
13 static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
15 struct sparc64_msiq_cookie *msiq_cookie = cookie;
16 struct pci_pbm_info *pbm = msiq_cookie->pbm;
17 unsigned long msiqid = msiq_cookie->msiqid;
18 const struct sparc64_msiq_ops *ops;
19 unsigned long orig_head, head;
24 err = ops->get_head(pbm, msiqid, &head);
25 if (unlikely(err < 0))
32 err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
33 if (likely(err > 0)) {
36 irq = pbm->msi_irq_table[msi - pbm->msi_first];
37 generic_handle_irq(irq);
40 if (unlikely(err < 0))
46 if (likely(head != orig_head)) {
47 err = ops->set_head(pbm, msiqid, head);
48 if (unlikely(err < 0))
54 printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
59 printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
65 printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
74 static u32 pick_msiq(struct pci_pbm_info *pbm)
76 static DEFINE_SPINLOCK(rotor_lock);
80 spin_lock_irqsave(&rotor_lock, flags);
82 rotor = pbm->msiq_rotor;
83 ret = pbm->msiq_first + rotor;
85 if (++rotor >= pbm->msiq_num)
87 pbm->msiq_rotor = rotor;
89 spin_unlock_irqrestore(&rotor_lock, flags);
95 static int alloc_msi(struct pci_pbm_info *pbm)
99 for (i = 0; i < pbm->msi_num; i++) {
100 if (!test_and_set_bit(i, pbm->msi_bitmap))
101 return i + pbm->msi_first;
107 static void free_msi(struct pci_pbm_info *pbm, int msi_num)
109 msi_num -= pbm->msi_first;
110 clear_bit(msi_num, pbm->msi_bitmap);
113 static struct irq_chip msi_irq = {
115 .irq_mask = pci_msi_mask_irq,
116 .irq_unmask = pci_msi_unmask_irq,
117 .irq_enable = pci_msi_unmask_irq,
118 .irq_disable = pci_msi_mask_irq,
119 /* XXX affinity XXX */
122 static int sparc64_setup_msi_irq(unsigned int *irq_p,
123 struct pci_dev *pdev,
124 struct msi_desc *entry)
126 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
127 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
132 *irq_p = irq_alloc(0, 0);
137 irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq,
140 err = alloc_msi(pbm);
141 if (unlikely(err < 0))
146 msiqid = pick_msiq(pbm);
148 err = ops->msi_setup(pbm, msiqid, msi,
149 (entry->pci.msi_attrib.is_64 ? 1 : 0));
153 pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
155 if (entry->pci.msi_attrib.is_64) {
156 msg.address_hi = pbm->msi64_start >> 32;
157 msg.address_lo = pbm->msi64_start & 0xffffffff;
160 msg.address_lo = pbm->msi32_start;
164 irq_set_msi_desc(*irq_p, entry);
165 pci_write_msi_msg(*irq_p, &msg);
173 irq_set_chip(*irq_p, NULL);
181 static void sparc64_teardown_msi_irq(unsigned int irq,
182 struct pci_dev *pdev)
184 struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
185 const struct sparc64_msiq_ops *ops = pbm->msi_ops;
186 unsigned int msi_num;
189 for (i = 0; i < pbm->msi_num; i++) {
190 if (pbm->msi_irq_table[i] == irq)
193 if (i >= pbm->msi_num) {
194 pci_err(pdev, "%s: teardown: No MSI for irq %u\n", pbm->name,
199 msi_num = pbm->msi_first + i;
200 pbm->msi_irq_table[i] = ~0U;
202 err = ops->msi_teardown(pbm, msi_num);
204 pci_err(pdev, "%s: teardown: ops->teardown() on MSI %u, "
205 "irq %u, gives error %d\n", pbm->name, msi_num, irq,
210 free_msi(pbm, msi_num);
212 irq_set_chip(irq, NULL);
216 static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
218 unsigned long size, bits_per_ulong;
220 bits_per_ulong = sizeof(unsigned long) * 8;
221 size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
223 BUG_ON(size % sizeof(unsigned long));
225 pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
226 if (!pbm->msi_bitmap)
232 static void msi_bitmap_free(struct pci_pbm_info *pbm)
234 kfree(pbm->msi_bitmap);
235 pbm->msi_bitmap = NULL;
238 static int msi_table_alloc(struct pci_pbm_info *pbm)
242 size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
243 pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
244 if (!pbm->msiq_irq_cookies)
247 for (i = 0; i < pbm->msiq_num; i++) {
248 struct sparc64_msiq_cookie *p;
250 p = &pbm->msiq_irq_cookies[i];
252 p->msiqid = pbm->msiq_first + i;
255 size = pbm->msi_num * sizeof(unsigned int);
256 pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
257 if (!pbm->msi_irq_table) {
258 kfree(pbm->msiq_irq_cookies);
259 pbm->msiq_irq_cookies = NULL;
266 static void msi_table_free(struct pci_pbm_info *pbm)
268 kfree(pbm->msiq_irq_cookies);
269 pbm->msiq_irq_cookies = NULL;
271 kfree(pbm->msi_irq_table);
272 pbm->msi_irq_table = NULL;
275 static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
276 const struct sparc64_msiq_ops *ops,
277 unsigned long msiqid,
278 unsigned long devino)
280 int irq = ops->msiq_build_irq(pbm, msiqid, devino);
286 nid = pbm->numa_node;
290 cpumask_copy(&numa_mask, cpumask_of_node(nid));
291 irq_set_affinity(irq, &numa_mask);
293 err = request_irq(irq, sparc64_msiq_interrupt, 0,
295 &pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
302 static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
303 const struct sparc64_msiq_ops *ops)
307 for (i = 0; i < pbm->msiq_num; i++) {
308 unsigned long msiqid = i + pbm->msiq_first;
309 unsigned long devino = i + pbm->msiq_first_devino;
312 err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
320 void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
321 const struct sparc64_msiq_ops *ops)
326 val = of_get_property(pbm->op->dev.of_node, "#msi-eqs", &len);
327 if (!val || len != 4)
329 pbm->msiq_num = *val;
331 const struct msiq_prop {
336 const struct msi_range_prop {
340 const struct addr_range_prop {
349 val = of_get_property(pbm->op->dev.of_node, "msi-eq-size", &len);
350 if (!val || len != 4)
353 pbm->msiq_ent_count = *val;
355 mqp = of_get_property(pbm->op->dev.of_node,
356 "msi-eq-to-devino", &len);
358 mqp = of_get_property(pbm->op->dev.of_node,
359 "msi-eq-devino", &len);
360 if (!mqp || len != sizeof(struct msiq_prop))
363 pbm->msiq_first = mqp->first_msiq;
364 pbm->msiq_first_devino = mqp->first_devino;
366 val = of_get_property(pbm->op->dev.of_node, "#msi", &len);
367 if (!val || len != 4)
371 mrng = of_get_property(pbm->op->dev.of_node, "msi-ranges", &len);
372 if (!mrng || len != sizeof(struct msi_range_prop))
374 pbm->msi_first = mrng->first_msi;
376 val = of_get_property(pbm->op->dev.of_node, "msi-data-mask", &len);
377 if (!val || len != 4)
379 pbm->msi_data_mask = *val;
381 val = of_get_property(pbm->op->dev.of_node, "msix-data-width", &len);
382 if (!val || len != 4)
384 pbm->msix_data_width = *val;
386 arng = of_get_property(pbm->op->dev.of_node, "msi-address-ranges",
388 if (!arng || len != sizeof(struct addr_range_prop))
390 pbm->msi32_start = ((u64)arng->msi32_high << 32) |
391 (u64) arng->msi32_low;
392 pbm->msi64_start = ((u64)arng->msi64_high << 32) |
393 (u64) arng->msi64_low;
394 pbm->msi32_len = arng->msi32_len;
395 pbm->msi64_len = arng->msi64_len;
397 if (msi_bitmap_alloc(pbm))
400 if (msi_table_alloc(pbm)) {
401 msi_bitmap_free(pbm);
405 if (ops->msiq_alloc(pbm)) {
407 msi_bitmap_free(pbm);
411 if (sparc64_bringup_msi_queues(pbm, ops)) {
414 msi_bitmap_free(pbm);
418 printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
421 pbm->msiq_first, pbm->msiq_num,
423 pbm->msiq_first_devino);
424 printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
427 pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
428 pbm->msix_data_width);
429 printk(KERN_INFO "%s: MSI addr32[0x%llx:0x%x] "
430 "addr64[0x%llx:0x%x]\n",
432 pbm->msi32_start, pbm->msi32_len,
433 pbm->msi64_start, pbm->msi64_len);
434 printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
436 __pa(pbm->msi_queues));
439 pbm->setup_msi_irq = sparc64_setup_msi_irq;
440 pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
446 printk(KERN_INFO "%s: No MSI support.\n", pbm->name);