2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/wait.h>
17 #include <linux/interrupt.h>
18 #include <linux/sched.h>
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
23 #include <asm/uaccess.h>
24 #include <linux/debugfs.h>
25 #include <linux/seq_file.h>
28 #include <linux/sipc.h>
32 static struct sbuf_mgr *sbufs[SIPC_ID_NR][SMSG_CH_NR];
34 static int sbuf_thread(void *data)
36 struct sbuf_mgr *sbuf = data;
37 struct smsg mcmd, mrecv;
39 struct sched_param param = {.sched_priority = 90};
41 /*set the thread as a real time thread, and its priority is 90*/
42 sched_setscheduler(current, SCHED_RR, ¶m);
44 /* since the channel open may hang, we call it in the sbuf thread */
45 rval = smsg_ch_open(sbuf->dst, sbuf->channel, -1);
47 printk(KERN_ERR "Failed to open channel %d\n", sbuf->channel);
48 /* assign NULL to thread poniter as failed to open channel */
53 /* sbuf init done, handle the ring rx events */
54 while (!kthread_should_stop()) {
55 /* monitor sbuf rdptr/wrptr update smsg */
56 smsg_set(&mrecv, sbuf->channel, 0, 0, 0);
57 rval = smsg_recv(sbuf->dst, &mrecv, -1);
60 /* channel state is free */
65 pr_debug("sbuf thread recv msg: dst=%d, channel=%d, "
66 "type=%d, flag=0x%04x, value=0x%08x\n",
67 sbuf->dst, sbuf->channel,
68 mrecv.type, mrecv.flag, mrecv.value);
72 /* handle channel recovery */
73 smsg_open_ack(sbuf->dst, sbuf->channel);
76 /* handle channel recovery */
77 smsg_close_ack(sbuf->dst, sbuf->channel);
78 sbuf->state = SBUF_STATE_IDLE;
81 /* respond cmd done for sbuf init */
82 WARN_ON(mrecv.flag != SMSG_CMD_SBUF_INIT);
83 smsg_set(&mcmd, sbuf->channel, SMSG_TYPE_DONE,
84 SMSG_DONE_SBUF_INIT, sbuf->smem_addr);
85 smsg_send(sbuf->dst, &mcmd, -1);
86 sbuf->state = SBUF_STATE_READY;
90 WARN_ON(bufid >= sbuf->ringnr);
92 case SMSG_EVENT_SBUF_RDPTR:
93 wake_up_interruptible_all(&(sbuf->rings[bufid].txwait));
94 if (sbuf->rings[bufid].handler) {
95 sbuf->rings[bufid].handler(SBUF_NOTIFY_WRITE, sbuf->rings[bufid].data);
98 case SMSG_EVENT_SBUF_WRPTR:
99 wake_up_interruptible_all(&(sbuf->rings[bufid].rxwait));
100 if (sbuf->rings[bufid].handler) {
101 sbuf->rings[bufid].handler(SBUF_NOTIFY_READ, sbuf->rings[bufid].data);
114 printk(KERN_WARNING "non-handled sbuf msg: %d-%d, %d, %d, %d\n",
115 sbuf->dst, sbuf->channel,
116 mrecv.type, mrecv.flag, mrecv.value);
124 int sbuf_create(uint8_t dst, uint8_t channel, uint32_t bufnum,
125 uint32_t txbufsize, uint32_t rxbufsize)
127 struct sbuf_mgr *sbuf;
128 volatile struct sbuf_smem_header *smem;
129 volatile struct sbuf_ring_header *ringhd;
130 int hsize, i, result;
132 sbuf = kzalloc(sizeof(struct sbuf_mgr), GFP_KERNEL);
134 printk(KERN_ERR "Failed to allocate mgr for sbuf\n");
138 sbuf->state = SBUF_STATE_IDLE;
140 sbuf->channel = channel;
141 sbuf->ringnr = bufnum;
144 hsize = sizeof(struct sbuf_smem_header) + sizeof(struct sbuf_ring_header) * bufnum;
145 sbuf->smem_size = hsize + (txbufsize + rxbufsize) * bufnum;
146 sbuf->smem_addr = smem_alloc(sbuf->smem_size);
147 if (!sbuf->smem_addr) {
148 printk(KERN_ERR "Failed to allocate smem for sbuf\n");
152 sbuf->smem_virt = ioremap_nocache(sbuf->smem_addr, sbuf->smem_size);
153 if (!sbuf->smem_virt) {
154 printk(KERN_ERR "Failed to map smem for sbuf\n");
155 smem_free(sbuf->smem_addr, sbuf->smem_size);
160 /* allocate rings description */
161 sbuf->rings = kzalloc(sizeof(struct sbuf_ring) * bufnum, GFP_KERNEL);
163 printk(KERN_ERR "Failed to allocate rings for sbuf\n");
164 iounmap(sbuf->smem_virt);
165 smem_free(sbuf->smem_addr, sbuf->smem_size);
170 /* initialize all ring bufs */
171 smem = (volatile struct sbuf_smem_header *)sbuf->smem_virt;
172 smem->ringnr = bufnum;
173 for (i = 0; i < bufnum; i++) {
174 ringhd = (volatile struct sbuf_ring_header *)&(smem->headers[i]);
175 ringhd->txbuf_addr = sbuf->smem_addr + hsize +
176 (txbufsize + rxbufsize) * i;
177 ringhd->txbuf_size = txbufsize;
178 ringhd->txbuf_rdptr = 0;
179 ringhd->txbuf_wrptr = 0;
180 ringhd->rxbuf_addr = smem->headers[i].txbuf_addr + txbufsize;
181 ringhd->rxbuf_size = rxbufsize;
182 ringhd->rxbuf_rdptr = 0;
183 ringhd->rxbuf_wrptr = 0;
185 sbuf->rings[i].header = ringhd;
186 sbuf->rings[i].txbuf_virt = sbuf->smem_virt + hsize +
187 (txbufsize + rxbufsize) * i;
188 sbuf->rings[i].rxbuf_virt = sbuf->rings[i].txbuf_virt + txbufsize;
189 init_waitqueue_head(&(sbuf->rings[i].txwait));
190 init_waitqueue_head(&(sbuf->rings[i].rxwait));
191 mutex_init(&(sbuf->rings[i].txlock));
192 mutex_init(&(sbuf->rings[i].rxlock));
195 sbuf->thread = kthread_create(sbuf_thread, sbuf,
196 "sbuf-%d-%d", dst, channel);
197 if (IS_ERR(sbuf->thread)) {
198 printk(KERN_ERR "Failed to create kthread: sbuf-%d-%d\n", dst, channel);
200 iounmap(sbuf->smem_virt);
201 smem_free(sbuf->smem_addr, sbuf->smem_size);
202 result = PTR_ERR(sbuf->thread);
208 sbufs[dst][channel] = sbuf;
209 wake_up_process(sbuf->thread);
214 void sbuf_destroy(uint8_t dst, uint8_t channel)
216 struct sbuf_mgr *sbuf = sbufs[dst][channel];
223 sbuf->state = SBUF_STATE_IDLE;
224 smsg_ch_close(dst, channel, -1);
226 /* stop sbuf thread if it's created successfully and still alive */
227 if (!IS_ERR_OR_NULL(sbuf->thread)) {
228 kthread_stop(sbuf->thread);
232 for (i = 0; i < sbuf->ringnr; i++) {
233 wake_up_interruptible_all(&sbuf->rings[i].txwait);
234 wake_up_interruptible_all(&sbuf->rings[i].rxwait);
239 if (sbuf->smem_virt) {
240 iounmap(sbuf->smem_virt);
242 smem_free(sbuf->smem_addr, sbuf->smem_size);
245 sbufs[dst][channel] = NULL;
248 int sbuf_write(uint8_t dst, uint8_t channel, uint32_t bufid,
249 void *buf, uint32_t len, int timeout)
251 struct sbuf_mgr *sbuf = sbufs[dst][channel];
252 struct sbuf_ring *ring = NULL;
253 volatile struct sbuf_ring_header *ringhd = NULL;
256 int rval, left, tail, txsize;
261 ring = &(sbuf->rings[bufid]);
262 ringhd = ring->header;
263 if (sbuf->state != SBUF_STATE_READY) {
264 printk(KERN_ERR "sbuf-%d-%d not ready to write!\n", dst, channel);
268 pr_debug("sbuf_write: dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n",
269 dst, channel, bufid, len, timeout);
270 pr_debug("sbuf_write: channel=%d, wrptr=%d, rdptr=%d",
271 channel, ringhd->txbuf_wrptr, ringhd->txbuf_rdptr);
277 mutex_lock(&ring->txlock);
279 if (!mutex_trylock(&(ring->txlock))) {
280 printk(KERN_INFO "sbuf_write busy!\n");
287 if ((int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) >=
288 ringhd->txbuf_size) {
289 printk(KERN_WARNING "sbuf %d-%d ring %d txbuf is full!\n",
290 dst, channel, bufid);
293 } else if (timeout < 0) {
295 rval = wait_event_interruptible(ring->txwait,
296 (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) <
297 ringhd->txbuf_size || sbuf->state == SBUF_STATE_IDLE);
299 printk(KERN_WARNING "sbuf_write wait interrupted!\n");
302 if (sbuf->state == SBUF_STATE_IDLE) {
303 printk(KERN_ERR "sbuf_write sbuf state is idle!\n");
308 rval = wait_event_interruptible_timeout(ring->txwait,
309 (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) <
310 ringhd->txbuf_size || sbuf->state == SBUF_STATE_IDLE,
313 printk(KERN_WARNING "sbuf_write wait interrupted!\n");
314 } else if (rval == 0) {
315 printk(KERN_WARNING "sbuf_write wait timeout!\n");
319 if (sbuf->state == SBUF_STATE_IDLE) {
320 printk(KERN_ERR "sbuf_write sbuf state is idle!\n");
326 while (left && (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) < ringhd->txbuf_size &&
327 sbuf->state == SBUF_STATE_READY) {
328 /* calc txpos & txsize */
329 txpos = ring->txbuf_virt + ringhd->txbuf_wrptr % ringhd->txbuf_size;
330 txsize = ringhd->txbuf_size - (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr);
331 txsize = min(txsize, left);
333 tail = txpos + txsize - (ring->txbuf_virt + ringhd->txbuf_size);
335 /* ring buffer is rounded */
336 if ((uintptr_t)buf > TASK_SIZE) {
337 unalign_memcpy(txpos, buf, txsize - tail);
338 unalign_memcpy(ring->txbuf_virt, buf + txsize - tail, tail);
340 if(unalign_copy_from_user(txpos, (void __user *)buf, txsize - tail) ||
341 unalign_copy_from_user(ring->txbuf_virt,
342 (void __user *)(buf + txsize - tail), tail)) {
343 printk(KERN_ERR "sbuf_write: failed to copy from user!\n");
349 if ((uintptr_t)buf > TASK_SIZE) {
350 unalign_memcpy(txpos, buf, txsize);
352 /* handle the user space address */
353 if(unalign_copy_from_user(txpos, (void __user *)buf, txsize)) {
354 printk(KERN_ERR "sbuf_write: failed to copy from user!\n");
362 pr_debug("sbuf_write: channel=%d, txpos=%p, txsize=%d\n", channel, txpos, txsize);
364 /* update tx wrptr */
365 ringhd->txbuf_wrptr = ringhd->txbuf_wrptr + txsize;
366 /* tx ringbuf is empty, so need to notify peer side */
367 if(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr == txsize) {
368 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBUF_WRPTR, bufid);
369 smsg_send(dst, &mevt, -1);
376 mutex_unlock(&ring->txlock);
378 pr_debug("sbuf_write done: channel=%d, len=%d\n", channel, len - left);
387 int sbuf_read(uint8_t dst, uint8_t channel, uint32_t bufid,
388 void *buf, uint32_t len, int timeout)
390 struct sbuf_mgr *sbuf = sbufs[dst][channel];
391 struct sbuf_ring *ring = NULL;
392 volatile struct sbuf_ring_header *ringhd = NULL;
395 int rval, left, tail, rxsize;
400 ring = &(sbuf->rings[bufid]);
401 ringhd = ring->header;
403 if (sbuf->state != SBUF_STATE_READY) {
404 printk(KERN_ERR "sbuf-%d-%d not ready to read!\n", dst, channel);
408 pr_debug("sbuf_read: dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n",
409 dst, channel, bufid, len, timeout);
410 pr_debug("sbuf_read: channel=%d, wrptr=%d, rdptr=%d",
411 channel, ringhd->rxbuf_wrptr, ringhd->rxbuf_rdptr);
417 mutex_lock(&ring->rxlock);
419 if (!mutex_trylock(&(ring->rxlock))) {
420 printk(KERN_INFO "sbuf_read busy!\n");
425 if (ringhd->rxbuf_wrptr == ringhd->rxbuf_rdptr) {
428 printk(KERN_WARNING "sbuf %d-%d ring %d rxbuf is empty!\n",
429 dst, channel, bufid);
431 } else if (timeout < 0) {
433 rval = wait_event_interruptible(ring->rxwait,
434 ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr ||
435 sbuf->state == SBUF_STATE_IDLE);
437 printk(KERN_WARNING "sbuf_read wait interrupted!\n");
440 if (sbuf->state == SBUF_STATE_IDLE) {
441 printk(KERN_ERR "sbuf_read sbuf state is idle!\n");
446 rval = wait_event_interruptible_timeout(ring->rxwait,
447 ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr ||
448 sbuf->state == SBUF_STATE_IDLE, timeout);
450 printk(KERN_WARNING "sbuf_read wait interrupted!\n");
451 } else if (rval == 0) {
452 printk(KERN_WARNING "sbuf_read wait timeout!\n");
456 if (sbuf->state == SBUF_STATE_IDLE) {
457 printk(KERN_ERR "sbuf_read sbuf state is idle!\n");
463 while (left && (ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr) &&
464 sbuf->state == SBUF_STATE_READY) {
465 /* calc rxpos & rxsize */
466 rxpos = ring->rxbuf_virt + ringhd->rxbuf_rdptr % ringhd->rxbuf_size;
467 rxsize = (int)(ringhd->rxbuf_wrptr - ringhd->rxbuf_rdptr);
469 WARN_ON(rxsize > ringhd->rxbuf_size);
470 rxsize = min(rxsize, left);
472 pr_debug("sbuf_read: channel=%d, buf=%p, rxpos=%p, rxsize=%d\n", channel, buf, rxpos, rxsize);
474 tail = rxpos + rxsize - (ring->rxbuf_virt + ringhd->rxbuf_size);
477 /* ring buffer is rounded */
478 if ((uintptr_t)buf > TASK_SIZE) {
479 unalign_memcpy(buf, rxpos, rxsize - tail);
480 unalign_memcpy(buf + rxsize - tail, ring->rxbuf_virt, tail);
482 /* handle the user space address */
483 if(unalign_copy_to_user((void __user *)buf, rxpos, rxsize - tail) ||
484 unalign_copy_to_user((void __user *)(buf + rxsize - tail),
485 ring->rxbuf_virt, tail)) {
486 printk(KERN_ERR "sbuf_read: failed to copy to user!\n");
492 if ((uintptr_t)buf > TASK_SIZE) {
493 unalign_memcpy(buf, rxpos, rxsize);
495 /* handle the user space address */
496 if (unalign_copy_to_user((void __user *)buf, rxpos, rxsize)) {
497 printk(KERN_ERR "sbuf_read: failed to copy to user!\n");
504 /* update rx rdptr */
505 ringhd->rxbuf_rdptr = ringhd->rxbuf_rdptr + rxsize;
506 /* rx ringbuf is full ,so need to notify peer side */
507 if(ringhd->rxbuf_wrptr - ringhd->rxbuf_rdptr == ringhd->rxbuf_size - rxsize) {
508 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBUF_RDPTR, bufid);
509 smsg_send(dst, &mevt, -1);
516 mutex_unlock(&ring->rxlock);
518 pr_debug("sbuf_read done: channel=%d, len=%d", channel, len - left);
527 int sbuf_poll_wait(uint8_t dst, uint8_t channel, uint32_t bufid,
528 struct file *filp, poll_table *wait)
530 struct sbuf_mgr *sbuf = sbufs[dst][channel];
531 struct sbuf_ring *ring = NULL;
532 volatile struct sbuf_ring_header *ringhd = NULL;
533 unsigned int mask = 0;
538 ring = &(sbuf->rings[bufid]);
539 ringhd = ring->header;
540 if (sbuf->state != SBUF_STATE_READY) {
541 printk(KERN_ERR "sbuf-%d-%d not ready to poll !\n", dst, channel);
545 poll_wait(filp, &ring->txwait, wait);
546 poll_wait(filp, &ring->rxwait, wait);
548 if (ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr) {
549 mask |= POLLIN | POLLRDNORM;
552 if (ringhd->txbuf_wrptr - ringhd->txbuf_rdptr < ringhd->txbuf_size) {
553 mask |= POLLOUT | POLLWRNORM;
559 int sbuf_status(uint8_t dst, uint8_t channel)
561 struct sbuf_mgr *sbuf = sbufs[dst][channel];
566 if (sbuf->state != SBUF_STATE_READY) {
573 int sbuf_register_notifier(uint8_t dst, uint8_t channel, uint32_t bufid,
574 void (*handler)(int event, void *data), void *data)
576 struct sbuf_mgr *sbuf = sbufs[dst][channel];
577 struct sbuf_ring *ring = NULL;
582 ring = &(sbuf->rings[bufid]);
583 ring->handler = handler;
589 #if defined(CONFIG_DEBUG_FS)
591 static int sbuf_debug_show(struct seq_file *m, void *private)
593 struct sbuf_mgr *sbuf = NULL;
594 struct sbuf_ring *rings = NULL;
595 volatile struct sbuf_ring_header *ring = NULL;
598 for (i = 0; i < SIPC_ID_NR; i++) {
599 for (j=0; j< SMSG_CH_NR; j++) {
604 seq_printf(m, "sbuf dst 0x%0x, channel: 0x%0x, state: %d, smem_virt: 0x%lx, smem_addr: 0x%0x, smem_size: 0x%0x, ringnr: %d \n",
605 sbuf->dst, sbuf->channel, sbuf->state, (size_t)sbuf->smem_virt, sbuf->smem_addr, sbuf->smem_size, sbuf->ringnr);
607 for (n=0; n < sbuf->ringnr; n++) {
608 rings = &(sbuf->rings[n]);
609 ring = rings->header;
610 seq_printf(m, "sbuf ring[%d]: rxbuf_addr :0x%0x, rxbuf_rdptr :0x%0x, rxbuf_wrptr :0x%0x, rxbuf_size :0x%0x \n", n, ring->rxbuf_addr, ring->rxbuf_rdptr, ring->rxbuf_wrptr, ring->rxbuf_size);
611 seq_printf(m, "sbuf ring[%d]: txbuf_addr :0x%0x, txbuf_rdptr :0x%0x, txbuf_wrptr :0x%0x, txbuf_size :0x%0x \n", n, ring->txbuf_addr, ring->txbuf_rdptr, ring->txbuf_wrptr, ring->txbuf_size);
618 static int sbuf_debug_open(struct inode *inode, struct file *file)
620 return single_open(file, sbuf_debug_show, inode->i_private);
623 static const struct file_operations sbuf_debug_fops = {
624 .open = sbuf_debug_open,
627 .release = single_release,
630 int sbuf_init_debugfs( void *root )
634 debugfs_create_file("sbuf", S_IRUGO, (struct dentry *)root, NULL, &sbuf_debug_fops);
638 #endif /* CONFIG_DEBUG_FS */
641 EXPORT_SYMBOL(sbuf_create);
642 EXPORT_SYMBOL(sbuf_destroy);
643 EXPORT_SYMBOL(sbuf_write);
644 EXPORT_SYMBOL(sbuf_read);
645 EXPORT_SYMBOL(sbuf_poll_wait);
646 EXPORT_SYMBOL(sbuf_status);
647 EXPORT_SYMBOL(sbuf_register_notifier);
649 MODULE_AUTHOR("Chen Gaopeng");
650 MODULE_DESCRIPTION("SIPC/SBUF driver");
651 MODULE_LICENSE("GPL");