2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2005 Silicon Graphics, Inc. All rights reserved.
10 * MOATB Core Services driver.
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/types.h>
17 #include <linux/ioport.h>
18 #include <linux/kernel.h>
19 #include <linux/notifier.h>
20 #include <linux/reboot.h>
21 #include <linux/init.h>
23 #include <linux/delay.h>
24 #include <linux/device.h>
26 #include <linux/uio.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/pagemap.h>
31 #include <linux/uaccess.h>
32 #include <asm/pgtable.h>
33 #include <asm/sn/addrs.h>
34 #include <asm/sn/intr.h>
35 #include <asm/sn/tiocx.h>
40 #define DBG(fmt...) printk(KERN_ALERT fmt)
44 static DEFINE_MUTEX(mbcs_mutex);
45 static int mbcs_major;
47 static LIST_HEAD(soft_list);
52 static const struct file_operations mbcs_ops = {
55 .llseek = mbcs_sram_llseek,
56 .read = mbcs_sram_read,
57 .write = mbcs_sram_write,
58 .mmap = mbcs_gscr_mmap,
61 struct mbcs_callback_arg {
63 struct cx_dev *cx_dev;
66 static inline void mbcs_getdma_init(struct getdma *gdma)
68 memset(gdma, 0, sizeof(struct getdma));
69 gdma->DoneIntEnable = 1;
72 static inline void mbcs_putdma_init(struct putdma *pdma)
74 memset(pdma, 0, sizeof(struct putdma));
75 pdma->DoneIntEnable = 1;
78 static inline void mbcs_algo_init(struct algoblock *algo_soft)
80 memset(algo_soft, 0, sizeof(struct algoblock));
83 static inline void mbcs_getdma_set(void *mmr,
92 uint64_t amoModType, uint64_t intrHostDest,
95 union dma_control rdma_control;
96 union dma_amo_dest amo_dest;
97 union intr_dest intr_dest;
98 union dma_localaddr local_addr;
99 union dma_hostaddr host_addr;
101 rdma_control.dma_control_reg = 0;
102 amo_dest.dma_amo_dest_reg = 0;
103 intr_dest.intr_dest_reg = 0;
104 local_addr.dma_localaddr_reg = 0;
105 host_addr.dma_hostaddr_reg = 0;
107 host_addr.dma_sys_addr = hostAddr;
108 MBCS_MMR_SET(mmr, MBCS_RD_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
110 local_addr.dma_ram_addr = localAddr;
111 local_addr.dma_ram_sel = localRamSel;
112 MBCS_MMR_SET(mmr, MBCS_RD_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
114 rdma_control.dma_op_length = numPkts;
115 rdma_control.done_amo_en = amoEnable;
116 rdma_control.done_int_en = intrEnable;
117 rdma_control.pio_mem_n = peerIO;
118 MBCS_MMR_SET(mmr, MBCS_RD_DMA_CTRL, rdma_control.dma_control_reg);
120 amo_dest.dma_amo_sys_addr = amoHostDest;
121 amo_dest.dma_amo_mod_type = amoModType;
122 MBCS_MMR_SET(mmr, MBCS_RD_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
124 intr_dest.address = intrHostDest;
125 intr_dest.int_vector = intrVector;
126 MBCS_MMR_SET(mmr, MBCS_RD_DMA_INT_DEST, intr_dest.intr_dest_reg);
130 static inline void mbcs_putdma_set(void *mmr,
133 uint64_t localRamSel,
138 uint64_t amoHostDest,
140 uint64_t intrHostDest, uint64_t intrVector)
142 union dma_control wdma_control;
143 union dma_amo_dest amo_dest;
144 union intr_dest intr_dest;
145 union dma_localaddr local_addr;
146 union dma_hostaddr host_addr;
148 wdma_control.dma_control_reg = 0;
149 amo_dest.dma_amo_dest_reg = 0;
150 intr_dest.intr_dest_reg = 0;
151 local_addr.dma_localaddr_reg = 0;
152 host_addr.dma_hostaddr_reg = 0;
154 host_addr.dma_sys_addr = hostAddr;
155 MBCS_MMR_SET(mmr, MBCS_WR_DMA_SYS_ADDR, host_addr.dma_hostaddr_reg);
157 local_addr.dma_ram_addr = localAddr;
158 local_addr.dma_ram_sel = localRamSel;
159 MBCS_MMR_SET(mmr, MBCS_WR_DMA_LOC_ADDR, local_addr.dma_localaddr_reg);
161 wdma_control.dma_op_length = numPkts;
162 wdma_control.done_amo_en = amoEnable;
163 wdma_control.done_int_en = intrEnable;
164 wdma_control.pio_mem_n = peerIO;
165 MBCS_MMR_SET(mmr, MBCS_WR_DMA_CTRL, wdma_control.dma_control_reg);
167 amo_dest.dma_amo_sys_addr = amoHostDest;
168 amo_dest.dma_amo_mod_type = amoModType;
169 MBCS_MMR_SET(mmr, MBCS_WR_DMA_AMO_DEST, amo_dest.dma_amo_dest_reg);
171 intr_dest.address = intrHostDest;
172 intr_dest.int_vector = intrVector;
173 MBCS_MMR_SET(mmr, MBCS_WR_DMA_INT_DEST, intr_dest.intr_dest_reg);
177 static inline void mbcs_algo_set(void *mmr,
178 uint64_t amoHostDest,
180 uint64_t intrHostDest,
181 uint64_t intrVector, uint64_t algoStepCount)
183 union dma_amo_dest amo_dest;
184 union intr_dest intr_dest;
185 union algo_step step;
187 step.algo_step_reg = 0;
188 intr_dest.intr_dest_reg = 0;
189 amo_dest.dma_amo_dest_reg = 0;
191 amo_dest.dma_amo_sys_addr = amoHostDest;
192 amo_dest.dma_amo_mod_type = amoModType;
193 MBCS_MMR_SET(mmr, MBCS_ALG_AMO_DEST, amo_dest.dma_amo_dest_reg);
195 intr_dest.address = intrHostDest;
196 intr_dest.int_vector = intrVector;
197 MBCS_MMR_SET(mmr, MBCS_ALG_INT_DEST, intr_dest.intr_dest_reg);
199 step.alg_step_cnt = algoStepCount;
200 MBCS_MMR_SET(mmr, MBCS_ALG_STEP, step.algo_step_reg);
203 static inline int mbcs_getdma_start(struct mbcs_soft *soft)
208 union cm_control cm_control;
210 mmr_base = soft->mmr_base;
211 gdma = &soft->getdma;
213 /* check that host address got setup */
218 (gdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
221 mbcs_getdma_set(mmr_base, tiocx_dma_addr(gdma->hostAddr),
223 (gdma->localAddr < MB2) ? 0 :
224 (gdma->localAddr < MB4) ? 1 :
225 (gdma->localAddr < MB6) ? 2 : 3,
232 gdma->intrHostDest, gdma->intrVector);
235 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
236 cm_control.rd_dma_go = 1;
237 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
243 static inline int mbcs_putdma_start(struct mbcs_soft *soft)
248 union cm_control cm_control;
250 mmr_base = soft->mmr_base;
251 pdma = &soft->putdma;
253 /* check that host address got setup */
258 (pdma->bytes + (MBCS_CACHELINE_SIZE - 1)) / MBCS_CACHELINE_SIZE;
261 mbcs_putdma_set(mmr_base, tiocx_dma_addr(pdma->hostAddr),
263 (pdma->localAddr < MB2) ? 0 :
264 (pdma->localAddr < MB4) ? 1 :
265 (pdma->localAddr < MB6) ? 2 : 3,
272 pdma->intrHostDest, pdma->intrVector);
275 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
276 cm_control.wr_dma_go = 1;
277 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
283 static inline int mbcs_algo_start(struct mbcs_soft *soft)
285 struct algoblock *algo_soft = &soft->algo;
286 void *mmr_base = soft->mmr_base;
287 union cm_control cm_control;
289 if (mutex_lock_interruptible(&soft->algolock))
292 atomic_set(&soft->algo_done, 0);
294 mbcs_algo_set(mmr_base,
295 algo_soft->amoHostDest,
296 algo_soft->amoModType,
297 algo_soft->intrHostDest,
298 algo_soft->intrVector, algo_soft->algoStepCount);
300 /* start algorithm */
301 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
302 cm_control.alg_done_int_en = 1;
303 cm_control.alg_go = 1;
304 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
306 mutex_unlock(&soft->algolock);
311 static inline ssize_t
312 do_mbcs_sram_dmawrite(struct mbcs_soft *soft, uint64_t hostAddr,
313 size_t len, loff_t * off)
317 if (mutex_lock_interruptible(&soft->dmawritelock))
320 atomic_set(&soft->dmawrite_done, 0);
322 soft->putdma.hostAddr = hostAddr;
323 soft->putdma.localAddr = *off;
324 soft->putdma.bytes = len;
326 if (mbcs_putdma_start(soft) < 0) {
327 DBG(KERN_ALERT "do_mbcs_sram_dmawrite: "
328 "mbcs_putdma_start failed\n");
333 if (wait_event_interruptible(soft->dmawrite_queue,
334 atomic_read(&soft->dmawrite_done))) {
343 mutex_unlock(&soft->dmawritelock);
348 static inline ssize_t
349 do_mbcs_sram_dmaread(struct mbcs_soft *soft, uint64_t hostAddr,
350 size_t len, loff_t * off)
354 if (mutex_lock_interruptible(&soft->dmareadlock))
357 atomic_set(&soft->dmawrite_done, 0);
359 soft->getdma.hostAddr = hostAddr;
360 soft->getdma.localAddr = *off;
361 soft->getdma.bytes = len;
363 if (mbcs_getdma_start(soft) < 0) {
364 DBG(KERN_ALERT "mbcs_strategy: mbcs_getdma_start failed\n");
369 if (wait_event_interruptible(soft->dmaread_queue,
370 atomic_read(&soft->dmaread_done))) {
379 mutex_unlock(&soft->dmareadlock);
384 static int mbcs_open(struct inode *ip, struct file *fp)
386 struct mbcs_soft *soft;
389 mutex_lock(&mbcs_mutex);
392 /* Nothing protects access to this list... */
393 list_for_each_entry(soft, &soft_list, list) {
394 if (soft->nasid == minor) {
395 fp->private_data = soft->cxdev;
396 mutex_unlock(&mbcs_mutex);
401 mutex_unlock(&mbcs_mutex);
405 static ssize_t mbcs_sram_read(struct file * fp, char __user *buf, size_t len, loff_t * off)
407 struct cx_dev *cx_dev = fp->private_data;
408 struct mbcs_soft *soft = cx_dev->soft;
412 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
416 rv = do_mbcs_sram_dmawrite(soft, hostAddr, len, off);
420 if (copy_to_user(buf, (void *)hostAddr, len))
424 free_pages(hostAddr, get_order(len));
430 mbcs_sram_write(struct file * fp, const char __user *buf, size_t len, loff_t * off)
432 struct cx_dev *cx_dev = fp->private_data;
433 struct mbcs_soft *soft = cx_dev->soft;
437 hostAddr = __get_dma_pages(GFP_KERNEL, get_order(len));
441 if (copy_from_user((void *)hostAddr, buf, len)) {
446 rv = do_mbcs_sram_dmaread(soft, hostAddr, len, off);
449 free_pages(hostAddr, get_order(len));
454 static loff_t mbcs_sram_llseek(struct file * filp, loff_t off, int whence)
456 return generic_file_llseek_size(filp, off, whence, MAX_LFS_FILESIZE,
460 static uint64_t mbcs_pioaddr(struct mbcs_soft *soft, uint64_t offset)
464 mmr_base = (uint64_t) (soft->mmr_base + offset);
469 static void mbcs_debug_pioaddr_set(struct mbcs_soft *soft)
471 soft->debug_addr = mbcs_pioaddr(soft, MBCS_DEBUG_START);
474 static void mbcs_gscr_pioaddr_set(struct mbcs_soft *soft)
476 soft->gscr_addr = mbcs_pioaddr(soft, MBCS_GSCR_START);
479 static int mbcs_gscr_mmap(struct file *fp, struct vm_area_struct *vma)
481 struct cx_dev *cx_dev = fp->private_data;
482 struct mbcs_soft *soft = cx_dev->soft;
484 if (vma->vm_pgoff != 0)
487 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
489 /* Remap-pfn-range will mark the range VM_IO */
490 if (remap_pfn_range(vma,
492 __pa(soft->gscr_addr) >> PAGE_SHIFT,
501 * mbcs_completion_intr_handler - Primary completion handler.
503 * @arg: soft struct for device
507 mbcs_completion_intr_handler(int irq, void *arg)
509 struct mbcs_soft *soft = (struct mbcs_soft *)arg;
511 union cm_status cm_status;
512 union cm_control cm_control;
514 mmr_base = soft->mmr_base;
515 cm_status.cm_status_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_STATUS);
517 if (cm_status.rd_dma_done) {
518 /* stop dma-read engine, clear status */
519 cm_control.cm_control_reg =
520 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
521 cm_control.rd_dma_clr = 1;
522 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
523 cm_control.cm_control_reg);
524 atomic_set(&soft->dmaread_done, 1);
525 wake_up(&soft->dmaread_queue);
527 if (cm_status.wr_dma_done) {
528 /* stop dma-write engine, clear status */
529 cm_control.cm_control_reg =
530 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
531 cm_control.wr_dma_clr = 1;
532 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
533 cm_control.cm_control_reg);
534 atomic_set(&soft->dmawrite_done, 1);
535 wake_up(&soft->dmawrite_queue);
537 if (cm_status.alg_done) {
539 cm_control.cm_control_reg =
540 MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
541 cm_control.alg_done_clr = 1;
542 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL,
543 cm_control.cm_control_reg);
544 atomic_set(&soft->algo_done, 1);
545 wake_up(&soft->algo_queue);
552 * mbcs_intr_alloc - Allocate interrupts.
553 * @dev: device pointer
556 static int mbcs_intr_alloc(struct cx_dev *dev)
558 struct sn_irq_info *sn_irq;
559 struct mbcs_soft *soft;
560 struct getdma *getdma;
561 struct putdma *putdma;
562 struct algoblock *algo;
565 getdma = &soft->getdma;
566 putdma = &soft->putdma;
569 soft->get_sn_irq = NULL;
570 soft->put_sn_irq = NULL;
571 soft->algo_sn_irq = NULL;
573 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
576 soft->get_sn_irq = sn_irq;
577 getdma->intrHostDest = sn_irq->irq_xtalkaddr;
578 getdma->intrVector = sn_irq->irq_irq;
579 if (request_irq(sn_irq->irq_irq,
580 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
581 "MBCS get intr", (void *)soft)) {
582 tiocx_irq_free(soft->get_sn_irq);
586 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
587 if (sn_irq == NULL) {
588 free_irq(soft->get_sn_irq->irq_irq, soft);
589 tiocx_irq_free(soft->get_sn_irq);
592 soft->put_sn_irq = sn_irq;
593 putdma->intrHostDest = sn_irq->irq_xtalkaddr;
594 putdma->intrVector = sn_irq->irq_irq;
595 if (request_irq(sn_irq->irq_irq,
596 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
597 "MBCS put intr", (void *)soft)) {
598 tiocx_irq_free(soft->put_sn_irq);
599 free_irq(soft->get_sn_irq->irq_irq, soft);
600 tiocx_irq_free(soft->get_sn_irq);
604 sn_irq = tiocx_irq_alloc(dev->cx_id.nasid, TIOCX_CORELET, -1, -1, -1);
605 if (sn_irq == NULL) {
606 free_irq(soft->put_sn_irq->irq_irq, soft);
607 tiocx_irq_free(soft->put_sn_irq);
608 free_irq(soft->get_sn_irq->irq_irq, soft);
609 tiocx_irq_free(soft->get_sn_irq);
612 soft->algo_sn_irq = sn_irq;
613 algo->intrHostDest = sn_irq->irq_xtalkaddr;
614 algo->intrVector = sn_irq->irq_irq;
615 if (request_irq(sn_irq->irq_irq,
616 (void *)mbcs_completion_intr_handler, IRQF_SHARED,
617 "MBCS algo intr", (void *)soft)) {
618 tiocx_irq_free(soft->algo_sn_irq);
619 free_irq(soft->put_sn_irq->irq_irq, soft);
620 tiocx_irq_free(soft->put_sn_irq);
621 free_irq(soft->get_sn_irq->irq_irq, soft);
622 tiocx_irq_free(soft->get_sn_irq);
630 * mbcs_intr_dealloc - Remove interrupts.
631 * @dev: device pointer
634 static void mbcs_intr_dealloc(struct cx_dev *dev)
636 struct mbcs_soft *soft;
640 free_irq(soft->get_sn_irq->irq_irq, soft);
641 tiocx_irq_free(soft->get_sn_irq);
642 free_irq(soft->put_sn_irq->irq_irq, soft);
643 tiocx_irq_free(soft->put_sn_irq);
644 free_irq(soft->algo_sn_irq->irq_irq, soft);
645 tiocx_irq_free(soft->algo_sn_irq);
648 static inline int mbcs_hw_init(struct mbcs_soft *soft)
650 void *mmr_base = soft->mmr_base;
651 union cm_control cm_control;
652 union cm_req_timeout cm_req_timeout;
655 cm_req_timeout.cm_req_timeout_reg =
656 MBCS_MMR_GET(mmr_base, MBCS_CM_REQ_TOUT);
658 cm_req_timeout.time_out = MBCS_CM_CONTROL_REQ_TOUT_MASK;
659 MBCS_MMR_SET(mmr_base, MBCS_CM_REQ_TOUT,
660 cm_req_timeout.cm_req_timeout_reg);
662 mbcs_gscr_pioaddr_set(soft);
663 mbcs_debug_pioaddr_set(soft);
666 err_stat = MBCS_MMR_GET(mmr_base, MBCS_CM_ERR_STAT);
667 MBCS_MMR_SET(mmr_base, MBCS_CM_CLR_ERR_STAT, err_stat);
668 MBCS_MMR_ZERO(mmr_base, MBCS_CM_ERROR_DETAIL1);
670 /* enable interrupts */
671 /* turn off 2^23 (INT_EN_PIO_REQ_ADDR_INV) */
672 MBCS_MMR_SET(mmr_base, MBCS_CM_ERR_INT_EN, 0x3ffffff7e00ffUL);
674 /* arm status regs and clear engines */
675 cm_control.cm_control_reg = MBCS_MMR_GET(mmr_base, MBCS_CM_CONTROL);
676 cm_control.rearm_stat_regs = 1;
677 cm_control.alg_clr = 1;
678 cm_control.wr_dma_clr = 1;
679 cm_control.rd_dma_clr = 1;
681 MBCS_MMR_SET(mmr_base, MBCS_CM_CONTROL, cm_control.cm_control_reg);
686 static ssize_t show_algo(struct device *dev, struct device_attribute *attr, char *buf)
688 struct cx_dev *cx_dev = to_cx_dev(dev);
689 struct mbcs_soft *soft = cx_dev->soft;
693 * By convention, the first debug register contains the
694 * algorithm number and revision.
696 debug0 = *(uint64_t *) soft->debug_addr;
698 return sprintf(buf, "0x%x 0x%x\n",
699 upper_32_bits(debug0), lower_32_bits(debug0));
702 static ssize_t store_algo(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
705 struct cx_dev *cx_dev = to_cx_dev(dev);
706 struct mbcs_soft *soft = cx_dev->soft;
711 n = simple_strtoul(buf, NULL, 0);
714 mbcs_algo_start(soft);
715 if (wait_event_interruptible(soft->algo_queue,
716 atomic_read(&soft->algo_done)))
723 DEVICE_ATTR(algo, 0644, show_algo, store_algo);
726 * mbcs_probe - Initialize for device
727 * @dev: device pointer
728 * @device_id: id table pointer
731 static int mbcs_probe(struct cx_dev *dev, const struct cx_device_id *id)
733 struct mbcs_soft *soft;
737 soft = kzalloc(sizeof(struct mbcs_soft), GFP_KERNEL);
741 soft->nasid = dev->cx_id.nasid;
742 list_add(&soft->list, &soft_list);
743 soft->mmr_base = (void *)tiocx_swin_base(dev->cx_id.nasid);
747 init_waitqueue_head(&soft->dmawrite_queue);
748 init_waitqueue_head(&soft->dmaread_queue);
749 init_waitqueue_head(&soft->algo_queue);
751 mutex_init(&soft->dmawritelock);
752 mutex_init(&soft->dmareadlock);
753 mutex_init(&soft->algolock);
755 mbcs_getdma_init(&soft->getdma);
756 mbcs_putdma_init(&soft->putdma);
757 mbcs_algo_init(&soft->algo);
761 /* Allocate interrupts */
762 mbcs_intr_alloc(dev);
764 device_create_file(&dev->dev, &dev_attr_algo);
769 static int mbcs_remove(struct cx_dev *dev)
772 mbcs_intr_dealloc(dev);
776 device_remove_file(&dev->dev, &dev_attr_algo);
781 static const struct cx_device_id mbcs_id_table[] = {
783 .part_num = MBCS_PART_NUM,
784 .mfg_num = MBCS_MFG_NUM,
787 .part_num = MBCS_PART_NUM_ALG0,
788 .mfg_num = MBCS_MFG_NUM,
793 MODULE_DEVICE_TABLE(cx, mbcs_id_table);
795 static struct cx_drv mbcs_driver = {
797 .id_table = mbcs_id_table,
799 .remove = mbcs_remove,
802 static void __exit mbcs_exit(void)
804 unregister_chrdev(mbcs_major, DEVICE_NAME);
805 cx_driver_unregister(&mbcs_driver);
808 static int __init mbcs_init(void)
812 if (!ia64_platform_is("sn2"))
815 // Put driver into chrdevs[]. Get major number.
816 rv = register_chrdev(mbcs_major, DEVICE_NAME, &mbcs_ops);
818 DBG(KERN_ALERT "mbcs_init: can't get major number. %d\n", rv);
823 return cx_driver_register(&mbcs_driver);
826 module_init(mbcs_init);
827 module_exit(mbcs_exit);
829 MODULE_AUTHOR("Bruce Losure <blosure@sgi.com>");
830 MODULE_DESCRIPTION("Driver for MOATB Core Services");
831 MODULE_LICENSE("GPL");