2 * Copyright (C) 2012 Spreadtrum Communications Inc.
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/wait.h>
17 #include <linux/interrupt.h>
18 #include <linux/sched.h>
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <asm/uaccess.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/log2.h>
30 #include <linux/sipc.h>
33 static struct sblock_mgr *sblocks[SIPC_ID_NR][SMSG_CH_NR];
35 static inline uint32_t sblock_get_index(uint32_t x, uint32_t y)
40 static inline uint32_t sblock_get_ringpos(uint32_t x, uint32_t y)
42 return is_power_of_2(y) ? (x & (y - 1)) : (x % y);
45 void sblock_put(uint8_t dst, uint8_t channel, struct sblock *blk)
47 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
48 struct sblock_ring *ring = NULL;
49 volatile struct sblock_ring_header *poolhd = NULL;
59 poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
61 spin_lock_irqsave(&ring->p_txlock, flags);
62 txpos = sblock_get_ringpos(poolhd->txblk_rdptr - 1, poolhd->txblk_count);
63 ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
64 ring->r_txblks[txpos].length = poolhd->txblk_size;
65 poolhd->txblk_rdptr = poolhd->txblk_rdptr - 1;
66 if ((int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr) == 1) {
67 wake_up_interruptible_all(&(ring->getwait));
69 index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
70 ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;
72 spin_unlock_irqrestore(&ring->p_txlock, flags);
75 static int sblock_recover(uint8_t dst, uint8_t channel)
77 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
78 struct sblock_ring *ring = NULL;
79 volatile struct sblock_ring_header *ringhd = NULL;
80 volatile struct sblock_ring_header *poolhd = NULL;
81 unsigned long pflags, qflags;
89 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
90 poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
92 sblock->state = SBLOCK_STATE_IDLE;
93 wake_up_interruptible_all(&ring->getwait);
94 wake_up_interruptible_all(&ring->recvwait);
96 spin_lock_irqsave(&ring->r_txlock, pflags);
97 /* clean txblks ring */
98 ringhd->txblk_wrptr = ringhd->txblk_rdptr;
100 spin_lock_irqsave(&ring->p_txlock, qflags);
101 /* recover txblks pool */
102 poolhd->txblk_rdptr = poolhd->txblk_wrptr;
103 for (i = 0, j = 0; i < poolhd->txblk_count; i++) {
104 if (ring->txrecord[i] == SBLOCK_BLK_STATE_DONE) {
105 ring->p_txblks[j].addr = i * sblock->txblksz + poolhd->txblk_addr;
106 ring->p_txblks[j].length = sblock->txblksz;
107 poolhd->txblk_wrptr = poolhd->txblk_wrptr + 1;
111 spin_unlock_irqrestore(&ring->p_txlock, qflags);
112 spin_unlock_irqrestore(&ring->r_txlock, pflags);
115 spin_lock_irqsave(&ring->r_rxlock, pflags);
116 /* clean rxblks ring */
117 ringhd->rxblk_rdptr = ringhd->rxblk_wrptr;
119 spin_lock_irqsave(&ring->p_rxlock, qflags);
120 /* recover rxblks pool */
121 poolhd->rxblk_wrptr = poolhd->rxblk_rdptr;
122 for (i = 0, j = 0; i < poolhd->rxblk_count; i++) {
123 if (ring->rxrecord[i] == SBLOCK_BLK_STATE_DONE) {
124 ring->p_rxblks[j].addr = i * sblock->rxblksz + poolhd->rxblk_addr;
125 ring->p_rxblks[j].length = sblock->rxblksz;
126 poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
130 spin_unlock_irqrestore(&ring->p_rxlock, qflags);
131 spin_unlock_irqrestore(&ring->r_rxlock, pflags);
136 static int sblock_thread(void *data)
138 struct sblock_mgr *sblock = data;
139 struct smsg mcmd, mrecv;
142 struct sched_param param = {.sched_priority = 90};
144 /*set the thread as a real time thread, and its priority is 90*/
145 sched_setscheduler(current, SCHED_RR, ¶m);
147 /* since the channel open may hang, we call it in the sblock thread */
148 rval = smsg_ch_open(sblock->dst, sblock->channel, -1);
150 printk(KERN_ERR "Failed to open channel %d\n", sblock->channel);
151 /* assign NULL to thread poniter as failed to open channel */
152 sblock->thread = NULL;
156 /* handle the sblock events */
157 while (!kthread_should_stop()) {
159 /* monitor sblock recv smsg */
160 smsg_set(&mrecv, sblock->channel, 0, 0, 0);
161 rval = smsg_recv(sblock->dst, &mrecv, -1);
162 if (rval == -EIO || rval == -ENODEV) {
163 /* channel state is FREE */
168 pr_debug("sblock thread recv msg: dst=%d, channel=%d, "
169 "type=%d, flag=0x%04x, value=0x%08x\n",
170 sblock->dst, sblock->channel,
171 mrecv.type, mrecv.flag, mrecv.value);
173 switch (mrecv.type) {
175 /* handle channel recovery */
177 if (sblock->handler) {
178 sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
180 sblock_recover(sblock->dst, sblock->channel);
182 smsg_open_ack(sblock->dst, sblock->channel);
184 case SMSG_TYPE_CLOSE:
185 /* handle channel recovery */
186 smsg_close_ack(sblock->dst, sblock->channel);
187 if (sblock->handler) {
188 sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
190 sblock->state = SBLOCK_STATE_IDLE;
193 /* respond cmd done for sblock init */
194 WARN_ON(mrecv.flag != SMSG_CMD_SBLOCK_INIT);
195 smsg_set(&mcmd, sblock->channel, SMSG_TYPE_DONE,
196 SMSG_DONE_SBLOCK_INIT, sblock->smem_addr);
197 smsg_send(sblock->dst, &mcmd, -1);
198 if (sblock->handler) {
199 sblock->handler(SBLOCK_NOTIFY_OPEN, sblock->data);
201 sblock->state = SBLOCK_STATE_READY;
204 case SMSG_TYPE_EVENT:
205 /* handle sblock send/release events */
206 switch (mrecv.flag) {
207 case SMSG_EVENT_SBLOCK_SEND:
208 wake_up_interruptible_all(&sblock->ring->recvwait);
209 if (sblock->handler) {
210 sblock->handler(SBLOCK_NOTIFY_RECV, sblock->data);
213 case SMSG_EVENT_SBLOCK_RELEASE:
214 wake_up_interruptible_all(&(sblock->ring->getwait));
215 if (sblock->handler) {
216 sblock->handler(SBLOCK_NOTIFY_GET, sblock->data);
229 printk(KERN_WARNING "non-handled sblock msg: %d-%d, %d, %d, %d\n",
230 sblock->dst, sblock->channel,
231 mrecv.type, mrecv.flag, mrecv.value);
236 printk(KERN_WARNING "sblock %d-%d thread stop", sblock->dst, sblock->channel);
240 int sblock_create(uint8_t dst, uint8_t channel,
241 uint32_t txblocknum, uint32_t txblocksize,
242 uint32_t rxblocknum, uint32_t rxblocksize)
244 struct sblock_mgr *sblock = NULL;
245 volatile struct sblock_ring_header *ringhd = NULL;
246 volatile struct sblock_ring_header *poolhd = NULL;
250 sblock = kzalloc(sizeof(struct sblock_mgr) , GFP_KERNEL);
255 sblock->state = SBLOCK_STATE_IDLE;
257 sblock->channel = channel;
258 sblock->txblksz = txblocksize;
259 sblock->rxblksz = rxblocksize;
260 sblock->txblknum = txblocknum;
261 sblock->rxblknum = rxblocknum;
265 hsize = sizeof(struct sblock_header);
266 sblock->smem_size = hsize + /* for header*/
267 txblocknum * txblocksize + rxblocknum * rxblocksize + /* for blks */
268 (txblocknum + rxblocknum) * sizeof(struct sblock_blks) + /* for ring*/
269 (txblocknum + rxblocknum) * sizeof(struct sblock_blks); /* for pool*/
271 sblock->smem_addr = smem_alloc(sblock->smem_size);
272 if (!sblock->smem_addr) {
273 printk(KERN_ERR "Failed to allocate smem for sblock\n");
277 sblock->smem_virt = ioremap_nocache(sblock->smem_addr, sblock->smem_size);
278 if (!sblock->smem_virt) {
279 printk(KERN_ERR "Failed to map smem for sblock\n");
280 smem_free(sblock->smem_addr, sblock->smem_size);
285 /* initialize ring and header */
286 sblock->ring = kzalloc(sizeof(struct sblock_ring), GFP_KERNEL);
288 printk(KERN_ERR "Failed to allocate ring for sblock\n");
289 iounmap(sblock->smem_virt);
290 smem_free(sblock->smem_addr, sblock->smem_size);
294 ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt);
295 ringhd->txblk_addr = sblock->smem_addr + hsize;
296 ringhd->txblk_count = txblocknum;
297 ringhd->txblk_size = txblocksize;
298 ringhd->txblk_rdptr = 0;
299 ringhd->txblk_wrptr = 0;
300 ringhd->txblk_blks = sblock->smem_addr + hsize +
301 txblocknum * txblocksize + rxblocknum * rxblocksize;
302 ringhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
303 ringhd->rxblk_count = rxblocknum;
304 ringhd->rxblk_size = rxblocksize;
305 ringhd->rxblk_rdptr = 0;
306 ringhd->rxblk_wrptr = 0;
307 ringhd->rxblk_blks = ringhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);
309 poolhd = (volatile struct sblock_ring_header *)(sblock->smem_virt + sizeof(struct sblock_ring_header));
310 poolhd->txblk_addr = sblock->smem_addr + hsize;
311 poolhd->txblk_count = txblocknum;
312 poolhd->txblk_size = txblocksize;
313 poolhd->txblk_rdptr = 0;
314 poolhd->txblk_wrptr = 0;
315 poolhd->txblk_blks = ringhd->rxblk_blks + rxblocknum * sizeof(struct sblock_blks);
316 poolhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
317 poolhd->rxblk_count = rxblocknum;
318 poolhd->rxblk_size = rxblocksize;
319 poolhd->rxblk_rdptr = 0;
320 poolhd->rxblk_wrptr = 0;
321 poolhd->rxblk_blks = poolhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);
323 sblock->ring->txrecord = kzalloc(sizeof(int) * txblocknum, GFP_KERNEL);
324 if (!sblock->ring->txrecord) {
325 printk(KERN_ERR "Failed to allocate memory for txrecord\n");
326 iounmap(sblock->smem_virt);
327 smem_free(sblock->smem_addr, sblock->smem_size);
333 sblock->ring->rxrecord = kzalloc(sizeof(int) * rxblocknum, GFP_KERNEL);
334 if (!sblock->ring->rxrecord) {
335 printk(KERN_ERR "Failed to allocate memory for rxrecord\n");
336 iounmap(sblock->smem_virt);
337 smem_free(sblock->smem_addr, sblock->smem_size);
338 kfree(sblock->ring->txrecord);
344 sblock->ring->header = sblock->smem_virt;
345 sblock->ring->txblk_virt = sblock->smem_virt +
346 (ringhd->txblk_addr - sblock->smem_addr);
347 sblock->ring->r_txblks = sblock->smem_virt +
348 (ringhd->txblk_blks - sblock->smem_addr);
349 sblock->ring->rxblk_virt = sblock->smem_virt +
350 (ringhd->rxblk_addr - sblock->smem_addr);
351 sblock->ring->r_rxblks = sblock->smem_virt +
352 (ringhd->rxblk_blks - sblock->smem_addr);
353 sblock->ring->p_txblks = sblock->smem_virt +
354 (poolhd->txblk_blks - sblock->smem_addr);
355 sblock->ring->p_rxblks = sblock->smem_virt +
356 (poolhd->rxblk_blks - sblock->smem_addr);
359 for (i = 0; i < txblocknum; i++) {
360 sblock->ring->p_txblks[i].addr = poolhd->txblk_addr + i * txblocksize;
361 sblock->ring->p_txblks[i].length = txblocksize;
362 sblock->ring->txrecord[i] = SBLOCK_BLK_STATE_DONE;
363 poolhd->txblk_wrptr++;
365 for (i = 0; i < rxblocknum; i++) {
366 sblock->ring->p_rxblks[i].addr = poolhd->rxblk_addr + i * rxblocksize;
367 sblock->ring->p_rxblks[i].length = rxblocksize;
368 sblock->ring->rxrecord[i] = SBLOCK_BLK_STATE_DONE;
369 poolhd->rxblk_wrptr++;
372 init_waitqueue_head(&sblock->ring->getwait);
373 init_waitqueue_head(&sblock->ring->recvwait);
374 spin_lock_init(&sblock->ring->r_txlock);
375 spin_lock_init(&sblock->ring->r_rxlock);
376 spin_lock_init(&sblock->ring->p_txlock);
377 spin_lock_init(&sblock->ring->p_rxlock);
379 sblock->thread = kthread_create(sblock_thread, sblock,
380 "sblock-%d-%d", dst, channel);
381 if (IS_ERR(sblock->thread)) {
382 printk(KERN_ERR "Failed to create kthread: sblock-%d-%d\n", dst, channel);
383 iounmap(sblock->smem_virt);
384 smem_free(sblock->smem_addr, sblock->smem_size);
385 kfree(sblock->ring->txrecord);
386 kfree(sblock->ring->rxrecord);
388 result = PTR_ERR(sblock->thread);
393 sblocks[dst][channel]=sblock;
394 wake_up_process(sblock->thread);
399 void sblock_destroy(uint8_t dst, uint8_t channel)
401 struct sblock_mgr *sblock = sblocks[dst][channel];
403 if (sblock == NULL) {
407 sblock->state = SBLOCK_STATE_IDLE;
408 smsg_ch_close(dst, channel, -1);
410 /* stop sblock thread if it's created successfully and still alive */
411 if (!IS_ERR_OR_NULL(sblock->thread)) {
412 kthread_stop(sblock->thread);
416 wake_up_interruptible_all(&sblock->ring->recvwait);
417 wake_up_interruptible_all(&sblock->ring->getwait);
418 if (sblock->ring->txrecord) {
419 kfree(sblock->ring->txrecord);
421 if (sblock->ring->rxrecord) {
422 kfree(sblock->ring->rxrecord);
426 if (sblock->smem_virt) {
427 iounmap(sblock->smem_virt);
429 smem_free(sblock->smem_addr, sblock->smem_size);
432 sblocks[dst][channel]=NULL;
435 int sblock_register_notifier(uint8_t dst, uint8_t channel,
436 void (*handler)(int event, void *data), void *data)
438 struct sblock_mgr *sblock = sblocks[dst][channel];
441 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
444 #ifndef CONFIG_SIPC_WCN
445 if (sblock->handler) {
446 printk(KERN_ERR "sblock handler already registered\n");
450 sblock->handler = handler;
456 int sblock_get(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
458 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
459 struct sblock_ring *ring = NULL;
460 volatile struct sblock_ring_header *ringhd = NULL;
461 volatile struct sblock_ring_header *poolhd = NULL;
466 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
467 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
468 return sblock ? -EIO : -ENODEV;
472 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
473 poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
475 if (poolhd->txblk_rdptr == poolhd->txblk_wrptr) {
478 printk(KERN_WARNING "sblock_get %d-%d is empty!\n",
481 } else if (timeout < 0) {
483 rval = wait_event_interruptible(ring->getwait,
484 poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
485 sblock->state == SBLOCK_STATE_IDLE);
487 printk(KERN_WARNING "sblock_get wait interrupted!\n");
490 if (sblock->state == SBLOCK_STATE_IDLE) {
491 printk(KERN_ERR "sblock_get sblock state is idle!\n");
496 rval = wait_event_interruptible_timeout(ring->getwait,
497 poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
498 sblock == SBLOCK_STATE_IDLE,
501 printk(KERN_WARNING "sblock_get wait interrupted!\n");
502 } else if (rval == 0) {
503 printk(KERN_WARNING "sblock_get wait timeout!\n");
507 if(sblock->state == SBLOCK_STATE_IDLE) {
508 printk(KERN_ERR "sblock_get sblock state is idle!\n");
518 /* multi-gotter may cause got failure */
519 spin_lock_irqsave(&ring->p_txlock, flags);
520 if (poolhd->txblk_rdptr != poolhd->txblk_wrptr &&
521 sblock->state == SBLOCK_STATE_READY) {
522 txpos = sblock_get_ringpos(poolhd->txblk_rdptr, poolhd->txblk_count);
523 blk->addr = sblock->smem_virt + (ring->p_txblks[txpos].addr - sblock->smem_addr);
524 blk->length = poolhd->txblk_size;
525 poolhd->txblk_rdptr = poolhd->txblk_rdptr + 1;
526 index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
527 ring->txrecord[index] = SBLOCK_BLK_STATE_PENDING;
529 rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
531 spin_unlock_irqrestore(&ring->p_txlock, flags);
536 static int sblock_send_ex(uint8_t dst, uint8_t channel, struct sblock *blk, bool yell)
538 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
539 struct sblock_ring *ring;
540 volatile struct sblock_ring_header *ringhd;
546 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
547 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
548 return sblock ? -EIO : -ENODEV;
551 pr_debug("sblock_send: dst=%d, channel=%d, addr=%p, len=%d\n",
552 dst, channel, blk->addr, blk->length);
555 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
557 spin_lock_irqsave(&ring->r_txlock, flags);
559 txpos = sblock_get_ringpos(ringhd->txblk_wrptr, ringhd->txblk_count);
560 ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
561 ring->r_txblks[txpos].length = blk->length;
562 pr_debug("sblock_send: channel=%d, wrptr=%d, txpos=%d, addr=%x\n",
563 channel, ringhd->txblk_wrptr, txpos, ring->r_txblks[txpos].addr);
564 ringhd->txblk_wrptr = ringhd->txblk_wrptr + 1;
565 if (sblock->state == SBLOCK_STATE_READY) {
567 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
568 rval = smsg_send(dst, &mevt, 0);
570 else if(!ring->yell) {
571 if(((int)(ringhd->txblk_wrptr - ringhd->txblk_rdptr) == 1) /*&&
572 ((int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr) == (sblock->txblknum - 1))*/) {
577 index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
578 ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;
580 spin_unlock_irqrestore(&ring->r_txlock, flags);
585 int sblock_send(uint8_t dst, uint8_t channel, struct sblock *blk)
587 return sblock_send_ex(dst, channel, blk, true);
590 int sblock_send_prepare(uint8_t dst, uint8_t channel, struct sblock *blk)
592 return sblock_send_ex(dst, channel, blk, false);
595 int sblock_send_finish(uint8_t dst, uint8_t channel)
597 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
598 struct sblock_ring *ring;
599 volatile struct sblock_ring_header *ringhd;
603 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
604 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
605 return sblock ? -EIO : -ENODEV;
609 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
611 if (ringhd->txblk_wrptr != ringhd->txblk_rdptr) {
612 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
613 rval = smsg_send(dst, &mevt, 0);
619 int sblock_receive(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
621 struct sblock_mgr *sblock = sblocks[dst][channel];
622 struct sblock_ring *ring;
623 volatile struct sblock_ring_header *ringhd;
624 int rxpos, index, rval = 0;
627 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
628 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
629 return sblock ? -EIO : -ENODEV;
633 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
635 pr_debug("sblock_receive: dst=%d, channel=%d, timeout=%d\n",
636 dst, channel, timeout);
637 pr_debug("sblock_receive: channel=%d, wrptr=%d, rdptr=%d",
638 channel, ringhd->rxblk_wrptr, ringhd->rxblk_rdptr);
640 if (ringhd->rxblk_wrptr == ringhd->rxblk_rdptr) {
643 pr_debug("sblock_receive %d-%d is empty!\n",
646 } else if (timeout < 0) {
648 rval = wait_event_interruptible(ring->recvwait,
649 ringhd->rxblk_wrptr != ringhd->rxblk_rdptr);
651 printk(KERN_WARNING "sblock_receive wait interrupted!\n");
654 if (sblock->state == SBLOCK_STATE_IDLE) {
655 printk(KERN_ERR "sblock_receive sblock state is idle!\n");
661 rval = wait_event_interruptible_timeout(ring->recvwait,
662 ringhd->rxblk_wrptr != ringhd->rxblk_rdptr, timeout);
664 printk(KERN_WARNING "sblock_receive wait interrupted!\n");
665 } else if (rval == 0) {
666 printk(KERN_WARNING "sblock_receive wait timeout!\n");
670 if (sblock->state == SBLOCK_STATE_IDLE) {
671 printk(KERN_ERR "sblock_receive sblock state is idle!\n");
681 /* multi-receiver may cause recv failure */
682 spin_lock_irqsave(&ring->r_rxlock, flags);
684 if (ringhd->rxblk_wrptr != ringhd->rxblk_rdptr &&
685 sblock->state == SBLOCK_STATE_READY) {
686 rxpos = sblock_get_ringpos(ringhd->rxblk_rdptr, ringhd->rxblk_count);
687 blk->addr = ring->r_rxblks[rxpos].addr - sblock->smem_addr + sblock->smem_virt;
688 blk->length = ring->r_rxblks[rxpos].length;
689 ringhd->rxblk_rdptr = ringhd->rxblk_rdptr + 1;
690 pr_debug("sblock_receive: channel=%d, rxpos=%d, addr=%p, len=%d\n",
691 channel, rxpos, blk->addr, blk->length);
692 index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
693 ring->rxrecord[index] = SBLOCK_BLK_STATE_PENDING;
695 rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
697 spin_unlock_irqrestore(&ring->r_rxlock, flags);
702 int sblock_get_arrived_count(uint8_t dst, uint8_t channel)
704 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
705 struct sblock_ring *ring = NULL;
706 volatile struct sblock_ring_header *ringhd = NULL;
710 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
711 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
716 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
718 spin_lock_irqsave(&ring->r_rxlock, flags);
719 blk_count = (int)(ringhd->rxblk_wrptr - ringhd->rxblk_rdptr);
720 spin_unlock_irqrestore(&ring->r_rxlock, flags);
726 int sblock_get_free_count(uint8_t dst, uint8_t channel)
728 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
729 struct sblock_ring *ring = NULL;
730 volatile struct sblock_ring_header *poolhd = NULL;
734 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
735 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
740 poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
742 spin_lock_irqsave(&ring->p_txlock, flags);
743 blk_count = (int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr);
744 spin_unlock_irqrestore(&ring->p_txlock, flags);
749 int sblock_release(uint8_t dst, uint8_t channel, struct sblock *blk)
751 struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
752 struct sblock_ring *ring = NULL;
753 volatile struct sblock_ring_header *ringhd = NULL;
754 volatile struct sblock_ring_header *poolhd = NULL;
760 if (!sblock || sblock->state != SBLOCK_STATE_READY) {
761 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
765 pr_debug("sblock_release: dst=%d, channel=%d, addr=%p, len=%d\n",
766 dst, channel, blk->addr, blk->length);
769 ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
770 poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
772 spin_lock_irqsave(&ring->p_rxlock, flags);
773 rxpos = sblock_get_ringpos(poolhd->rxblk_wrptr, poolhd->rxblk_count);
774 ring->p_rxblks[rxpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
775 ring->p_rxblks[rxpos].length = poolhd->rxblk_size;
776 poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
777 pr_debug("sblock_release: addr=%x\n", ring->p_rxblks[rxpos].addr);
779 if((int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr) == 1 &&
780 sblock->state == SBLOCK_STATE_READY) {
781 /* send smsg to notify the peer side */
782 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_RELEASE, 0);
783 smsg_send(dst, &mevt, -1);
786 index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
787 ring->rxrecord[index] = SBLOCK_BLK_STATE_DONE;
789 spin_unlock_irqrestore(&ring->p_rxlock, flags);
794 #if defined(CONFIG_DEBUG_FS)
795 static int sblock_debug_show(struct seq_file *m, void *private)
797 struct sblock_mgr *sblock = NULL;
798 struct sblock_ring *ring = NULL;
799 volatile struct sblock_ring_header *ringhd = NULL;
800 volatile struct sblock_ring_header *poolhd = NULL;
803 for (i = 0; i < SIPC_ID_NR; i++) {
804 for (j=0; j < SMSG_CH_NR; j++) {
805 sblock = sblocks[i][j];
810 ringhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->ring);
811 poolhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->pool);
813 seq_printf(m, "sblock dst 0x%0x, channel: 0x%0x, state: %d, smem_virt: 0x%lx, smem_addr: 0x%0x, smem_size: 0x%0x, txblksz: %d, rxblksz: %d \n",
814 sblock->dst, sblock->channel, sblock->state,
815 (size_t)sblock->smem_virt, sblock->smem_addr,
816 sblock->smem_size, sblock->txblksz, sblock->rxblksz );
817 seq_printf(m, "sblock ring: txblk_virt :0x%lx, rxblk_virt :0x%lx \n",
818 (size_t)ring->txblk_virt, (size_t)ring->rxblk_virt);
819 seq_printf(m, "sblock ring header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxblk_count :%d, rxblk_blks: 0x%0x \n",
820 ringhd->rxblk_addr, ringhd->rxblk_rdptr,
821 ringhd->rxblk_wrptr, ringhd->rxblk_size,
822 ringhd->rxblk_count, ringhd->rxblk_blks);
823 seq_printf(m, "sblock ring header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txblk_count :%d, txblk_blks: 0x%0x \n",
824 ringhd->txblk_addr, ringhd->txblk_rdptr,
825 ringhd->txblk_wrptr, ringhd->txblk_size,
826 ringhd->txblk_count, ringhd->txblk_blks );
827 seq_printf(m, "sblock pool header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxpool_count :%d, rxblk_blks: 0x%0x \n",
828 poolhd->rxblk_addr, poolhd->rxblk_rdptr,
829 poolhd->rxblk_wrptr, poolhd->rxblk_size,
830 (int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr),
832 seq_printf(m, "sblock pool header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txpool_count :%d, txblk_blks: 0x%0x \n",
833 poolhd->txblk_addr, poolhd->txblk_rdptr,
834 poolhd->txblk_wrptr, poolhd->txblk_size,
835 (int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr),
836 poolhd->txblk_blks );
843 static int sblock_debug_open(struct inode *inode, struct file *file)
845 return single_open(file, sblock_debug_show, inode->i_private);
848 static const struct file_operations sblock_debug_fops = {
849 .open = sblock_debug_open,
852 .release = single_release,
855 int sblock_init_debugfs(void *root )
859 debugfs_create_file("sblock", S_IRUGO, (struct dentry *)root, NULL, &sblock_debug_fops);
863 #endif /* CONFIG_DEBUG_FS */
865 EXPORT_SYMBOL(sblock_put);
866 EXPORT_SYMBOL(sblock_create);
867 EXPORT_SYMBOL(sblock_destroy);
868 EXPORT_SYMBOL(sblock_register_notifier);
869 EXPORT_SYMBOL(sblock_get);
870 EXPORT_SYMBOL(sblock_send);
871 EXPORT_SYMBOL(sblock_send_prepare);
872 EXPORT_SYMBOL(sblock_send_finish);
873 EXPORT_SYMBOL(sblock_receive);
874 EXPORT_SYMBOL(sblock_get_arrived_count);
875 EXPORT_SYMBOL(sblock_get_free_count);
876 EXPORT_SYMBOL(sblock_release);
878 MODULE_AUTHOR("Chen Gaopeng");
879 MODULE_DESCRIPTION("SIPC/SBLOCK driver");
880 MODULE_LICENSE("GPL");