Input: sprd_eic_keys: remove event log
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / sipc / sblock.c
1 /*
2  * Copyright (C) 2012 Spreadtrum Communications Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/wait.h>
17 #include <linux/interrupt.h>
18 #include <linux/sched.h>
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <asm/uaccess.h>
26 #include <linux/debugfs.h>
27 #include <linux/seq_file.h>
28 #include <linux/log2.h>
29
30 #include <linux/sipc.h>
31 #include "sblock.h"
32
33 static struct sblock_mgr *sblocks[SIPC_ID_NR][SMSG_CH_NR];
34
35 static inline uint32_t sblock_get_index(uint32_t x, uint32_t y)
36 {
37         return (x / y);
38 }
39
40 static inline uint32_t sblock_get_ringpos(uint32_t x, uint32_t y)
41 {
42         return is_power_of_2(y) ? (x & (y - 1)) : (x % y);
43 }
44
45 void sblock_put(uint8_t dst, uint8_t channel, struct sblock *blk)
46 {
47         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
48         struct sblock_ring *ring = NULL;
49         volatile struct sblock_ring_header *poolhd = NULL;
50         unsigned long flags;
51         int txpos;
52         int index;
53
54         if (!sblock) {
55                 return;
56         }
57
58         ring = sblock->ring;
59         poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
60
61         spin_lock_irqsave(&ring->p_txlock, flags);
62         txpos = sblock_get_ringpos(poolhd->txblk_rdptr - 1, poolhd->txblk_count);
63         ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
64         ring->r_txblks[txpos].length = poolhd->txblk_size;
65         poolhd->txblk_rdptr = poolhd->txblk_rdptr - 1;
66         if ((int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr) == 1) {
67                 wake_up_interruptible_all(&(ring->getwait));
68         }
69         index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
70         ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;
71
72         spin_unlock_irqrestore(&ring->p_txlock, flags);
73 }
74
75 static int sblock_recover(uint8_t dst, uint8_t channel)
76 {
77         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
78         struct sblock_ring *ring = NULL;
79         volatile struct sblock_ring_header *ringhd = NULL;
80         volatile struct sblock_ring_header *poolhd = NULL;
81         unsigned long pflags, qflags;
82         int i, j;
83
84         if (!sblock) {
85                 return -ENODEV;
86         }
87
88         ring = sblock->ring;
89         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
90         poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
91
92         sblock->state = SBLOCK_STATE_IDLE;
93         wake_up_interruptible_all(&ring->getwait);
94         wake_up_interruptible_all(&ring->recvwait);
95
96         spin_lock_irqsave(&ring->r_txlock, pflags);
97         /* clean txblks ring */
98         ringhd->txblk_wrptr = ringhd->txblk_rdptr;
99
100         spin_lock_irqsave(&ring->p_txlock, qflags);
101         /* recover txblks pool */
102         poolhd->txblk_rdptr = poolhd->txblk_wrptr;
103         for (i = 0, j = 0; i < poolhd->txblk_count; i++) {
104                 if (ring->txrecord[i] == SBLOCK_BLK_STATE_DONE) {
105                         ring->p_txblks[j].addr = i * sblock->txblksz + poolhd->txblk_addr;
106                         ring->p_txblks[j].length = sblock->txblksz;
107                         poolhd->txblk_wrptr = poolhd->txblk_wrptr + 1;
108                         j++;
109                 }
110         }
111         spin_unlock_irqrestore(&ring->p_txlock, qflags);
112         spin_unlock_irqrestore(&ring->r_txlock, pflags);
113
114
115         spin_lock_irqsave(&ring->r_rxlock, pflags);
116         /* clean rxblks ring */
117         ringhd->rxblk_rdptr = ringhd->rxblk_wrptr;
118
119         spin_lock_irqsave(&ring->p_rxlock, qflags);
120         /* recover rxblks pool */
121         poolhd->rxblk_wrptr = poolhd->rxblk_rdptr;
122         for (i = 0, j = 0; i < poolhd->rxblk_count; i++) {
123                 if (ring->rxrecord[i] == SBLOCK_BLK_STATE_DONE) {
124                         ring->p_rxblks[j].addr = i * sblock->rxblksz + poolhd->rxblk_addr;
125                         ring->p_rxblks[j].length = sblock->rxblksz;
126                         poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
127                         j++;
128                 }
129         }
130         spin_unlock_irqrestore(&ring->p_rxlock, qflags);
131         spin_unlock_irqrestore(&ring->r_rxlock, pflags);
132
133         return 0;
134 }
135
136 static int sblock_thread(void *data)
137 {
138         struct sblock_mgr *sblock = data;
139         struct smsg mcmd, mrecv;
140         int rval;
141         int recovery = 0;
142         struct sched_param param = {.sched_priority = 90};
143
144         /*set the thread as a real time thread, and its priority is 90*/
145         sched_setscheduler(current, SCHED_RR, &param);
146
147         /* since the channel open may hang, we call it in the sblock thread */
148         rval = smsg_ch_open(sblock->dst, sblock->channel, -1);
149         if (rval != 0) {
150                 printk(KERN_ERR "Failed to open channel %d\n", sblock->channel);
151                 /* assign NULL to thread poniter as failed to open channel */
152                 sblock->thread = NULL;
153                 return rval;
154         }
155
156         /* handle the sblock events */
157         while (!kthread_should_stop()) {
158
159                 /* monitor sblock recv smsg */
160                 smsg_set(&mrecv, sblock->channel, 0, 0, 0);
161                 rval = smsg_recv(sblock->dst, &mrecv, -1);
162                 if (rval == -EIO || rval == -ENODEV) {
163                 /* channel state is FREE */
164                         msleep(5);
165                         continue;
166                 }
167
168                 pr_debug("sblock thread recv msg: dst=%d, channel=%d, "
169                                 "type=%d, flag=0x%04x, value=0x%08x\n",
170                                 sblock->dst, sblock->channel,
171                                 mrecv.type, mrecv.flag, mrecv.value);
172
173                 switch (mrecv.type) {
174                 case SMSG_TYPE_OPEN:
175                         /* handle channel recovery */
176                         if (recovery) {
177                                 if (sblock->handler) {
178                                         sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
179                                 }
180                                 sblock_recover(sblock->dst, sblock->channel);
181                         }
182                         smsg_open_ack(sblock->dst, sblock->channel);
183                         break;
184                 case SMSG_TYPE_CLOSE:
185                         /* handle channel recovery */
186                         smsg_close_ack(sblock->dst, sblock->channel);
187                         if (sblock->handler) {
188                                 sblock->handler(SBLOCK_NOTIFY_CLOSE, sblock->data);
189                         }
190                         sblock->state = SBLOCK_STATE_IDLE;
191                         break;
192                 case SMSG_TYPE_CMD:
193                         /* respond cmd done for sblock init */
194                         WARN_ON(mrecv.flag != SMSG_CMD_SBLOCK_INIT);
195                         smsg_set(&mcmd, sblock->channel, SMSG_TYPE_DONE,
196                                         SMSG_DONE_SBLOCK_INIT, sblock->smem_addr);
197                         smsg_send(sblock->dst, &mcmd, -1);
198                         if (sblock->handler) {
199                                 sblock->handler(SBLOCK_NOTIFY_OPEN, sblock->data);
200                         }
201                         sblock->state = SBLOCK_STATE_READY;
202                         recovery = 1;
203                         break;
204                 case SMSG_TYPE_EVENT:
205                         /* handle sblock send/release events */
206                         switch (mrecv.flag) {
207                         case SMSG_EVENT_SBLOCK_SEND:
208                                 wake_up_interruptible_all(&sblock->ring->recvwait);
209                                 if (sblock->handler) {
210                                         sblock->handler(SBLOCK_NOTIFY_RECV, sblock->data);
211                                 }
212                                 break;
213                         case SMSG_EVENT_SBLOCK_RELEASE:
214                                 wake_up_interruptible_all(&(sblock->ring->getwait));
215                                 if (sblock->handler) {
216                                         sblock->handler(SBLOCK_NOTIFY_GET, sblock->data);
217                                 }
218                                 break;
219                         default:
220                                 rval = 1;
221                                 break;
222                         }
223                         break;
224                 default:
225                         rval = 1;
226                         break;
227                 };
228                 if (rval) {
229                         printk(KERN_WARNING "non-handled sblock msg: %d-%d, %d, %d, %d\n",
230                                         sblock->dst, sblock->channel,
231                                         mrecv.type, mrecv.flag, mrecv.value);
232                         rval = 0;
233                 }
234         }
235
236         printk(KERN_WARNING "sblock %d-%d thread stop", sblock->dst, sblock->channel);
237         return rval;
238 }
239
240 int sblock_create(uint8_t dst, uint8_t channel,
241                 uint32_t txblocknum, uint32_t txblocksize,
242                 uint32_t rxblocknum, uint32_t rxblocksize)
243 {
244         struct sblock_mgr *sblock = NULL;
245         volatile struct sblock_ring_header *ringhd = NULL;
246         volatile struct sblock_ring_header *poolhd = NULL;
247         uint32_t hsize;
248         int i, result;
249
250         sblock = kzalloc(sizeof(struct sblock_mgr) , GFP_KERNEL);
251         if (!sblock) {
252                 return -ENOMEM;
253         }
254
255         sblock->state = SBLOCK_STATE_IDLE;
256         sblock->dst = dst;
257         sblock->channel = channel;
258         sblock->txblksz = txblocksize;
259         sblock->rxblksz = rxblocksize;
260         sblock->txblknum = txblocknum;
261         sblock->rxblknum = rxblocknum;
262
263
264         /* allocate smem */
265         hsize = sizeof(struct sblock_header);
266         sblock->smem_size = hsize +                                             /* for header*/
267                 txblocknum * txblocksize + rxblocknum * rxblocksize +           /* for blks */
268                 (txblocknum + rxblocknum) * sizeof(struct sblock_blks) +        /* for ring*/
269                 (txblocknum + rxblocknum) * sizeof(struct sblock_blks);         /* for pool*/
270
271         sblock->smem_addr = smem_alloc(sblock->smem_size);
272         if (!sblock->smem_addr) {
273                 printk(KERN_ERR "Failed to allocate smem for sblock\n");
274                 kfree(sblock);
275                 return -ENOMEM;
276         }
277         sblock->smem_virt = ioremap_nocache(sblock->smem_addr, sblock->smem_size);
278         if (!sblock->smem_virt) {
279                 printk(KERN_ERR "Failed to map smem for sblock\n");
280                 smem_free(sblock->smem_addr, sblock->smem_size);
281                 kfree(sblock);
282                 return -EFAULT;
283         }
284
285         /* initialize ring and header */
286         sblock->ring = kzalloc(sizeof(struct sblock_ring), GFP_KERNEL);
287         if (!sblock->ring) {
288                 printk(KERN_ERR "Failed to allocate ring for sblock\n");
289                 iounmap(sblock->smem_virt);
290                 smem_free(sblock->smem_addr, sblock->smem_size);
291                 kfree(sblock);
292                 return -ENOMEM;
293         }
294         ringhd = (volatile struct sblock_ring_header *)(sblock->smem_virt);
295         ringhd->txblk_addr = sblock->smem_addr + hsize;
296         ringhd->txblk_count = txblocknum;
297         ringhd->txblk_size = txblocksize;
298         ringhd->txblk_rdptr = 0;
299         ringhd->txblk_wrptr = 0;
300         ringhd->txblk_blks = sblock->smem_addr + hsize +
301                 txblocknum * txblocksize + rxblocknum * rxblocksize;
302         ringhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
303         ringhd->rxblk_count = rxblocknum;
304         ringhd->rxblk_size = rxblocksize;
305         ringhd->rxblk_rdptr = 0;
306         ringhd->rxblk_wrptr = 0;
307         ringhd->rxblk_blks = ringhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);
308
309         poolhd = (volatile struct sblock_ring_header *)(sblock->smem_virt + sizeof(struct sblock_ring_header));
310         poolhd->txblk_addr = sblock->smem_addr + hsize;
311         poolhd->txblk_count = txblocknum;
312         poolhd->txblk_size = txblocksize;
313         poolhd->txblk_rdptr = 0;
314         poolhd->txblk_wrptr = 0;
315         poolhd->txblk_blks = ringhd->rxblk_blks + rxblocknum * sizeof(struct sblock_blks);
316         poolhd->rxblk_addr = ringhd->txblk_addr + txblocknum * txblocksize;
317         poolhd->rxblk_count = rxblocknum;
318         poolhd->rxblk_size = rxblocksize;
319         poolhd->rxblk_rdptr = 0;
320         poolhd->rxblk_wrptr = 0;
321         poolhd->rxblk_blks = poolhd->txblk_blks + txblocknum * sizeof(struct sblock_blks);
322
323         sblock->ring->txrecord = kzalloc(sizeof(int) * txblocknum, GFP_KERNEL);
324         if (!sblock->ring->txrecord) {
325                 printk(KERN_ERR "Failed to allocate memory for txrecord\n");
326                 iounmap(sblock->smem_virt);
327                 smem_free(sblock->smem_addr, sblock->smem_size);
328                 kfree(sblock->ring);
329                 kfree(sblock);
330                 return -ENOMEM;
331         }
332
333         sblock->ring->rxrecord = kzalloc(sizeof(int) * rxblocknum, GFP_KERNEL);
334         if (!sblock->ring->rxrecord) {
335                 printk(KERN_ERR "Failed to allocate memory for rxrecord\n");
336                 iounmap(sblock->smem_virt);
337                 smem_free(sblock->smem_addr, sblock->smem_size);
338                 kfree(sblock->ring->txrecord);
339                 kfree(sblock->ring);
340                 kfree(sblock);
341                 return -ENOMEM;
342         }
343
344         sblock->ring->header = sblock->smem_virt;
345         sblock->ring->txblk_virt = sblock->smem_virt +
346                 (ringhd->txblk_addr - sblock->smem_addr);
347         sblock->ring->r_txblks = sblock->smem_virt +
348                 (ringhd->txblk_blks - sblock->smem_addr);
349         sblock->ring->rxblk_virt = sblock->smem_virt +
350                 (ringhd->rxblk_addr - sblock->smem_addr);
351         sblock->ring->r_rxblks = sblock->smem_virt +
352                 (ringhd->rxblk_blks - sblock->smem_addr);
353         sblock->ring->p_txblks = sblock->smem_virt +
354                 (poolhd->txblk_blks - sblock->smem_addr);
355         sblock->ring->p_rxblks = sblock->smem_virt +
356                 (poolhd->rxblk_blks - sblock->smem_addr);
357
358
359         for (i = 0; i < txblocknum; i++) {
360                 sblock->ring->p_txblks[i].addr = poolhd->txblk_addr + i * txblocksize;
361                 sblock->ring->p_txblks[i].length = txblocksize;
362                 sblock->ring->txrecord[i] = SBLOCK_BLK_STATE_DONE;
363                 poolhd->txblk_wrptr++;
364         }
365         for (i = 0; i < rxblocknum; i++) {
366                 sblock->ring->p_rxblks[i].addr = poolhd->rxblk_addr + i * rxblocksize;
367                 sblock->ring->p_rxblks[i].length = rxblocksize;
368                 sblock->ring->rxrecord[i] = SBLOCK_BLK_STATE_DONE;
369                 poolhd->rxblk_wrptr++;
370         }
371
372         init_waitqueue_head(&sblock->ring->getwait);
373         init_waitqueue_head(&sblock->ring->recvwait);
374         spin_lock_init(&sblock->ring->r_txlock);
375         spin_lock_init(&sblock->ring->r_rxlock);
376         spin_lock_init(&sblock->ring->p_txlock);
377         spin_lock_init(&sblock->ring->p_rxlock);
378
379         sblock->thread = kthread_create(sblock_thread, sblock,
380                         "sblock-%d-%d", dst, channel);
381         if (IS_ERR(sblock->thread)) {
382                 printk(KERN_ERR "Failed to create kthread: sblock-%d-%d\n", dst, channel);
383                 iounmap(sblock->smem_virt);
384                 smem_free(sblock->smem_addr, sblock->smem_size);
385                 kfree(sblock->ring->txrecord);
386                 kfree(sblock->ring->rxrecord);
387                 kfree(sblock->ring);
388                 result = PTR_ERR(sblock->thread);
389                 kfree(sblock);
390                 return result;
391         }
392
393         sblocks[dst][channel]=sblock;
394         wake_up_process(sblock->thread);
395
396         return 0;
397 }
398
399 void sblock_destroy(uint8_t dst, uint8_t channel)
400 {
401         struct sblock_mgr *sblock = sblocks[dst][channel];
402
403         if (sblock == NULL) {
404                 return;
405         }
406
407         sblock->state = SBLOCK_STATE_IDLE;
408         smsg_ch_close(dst, channel, -1);
409
410         /* stop sblock thread if it's created successfully and still alive */
411         if (!IS_ERR_OR_NULL(sblock->thread)) {
412                 kthread_stop(sblock->thread);
413         }
414
415         if (sblock->ring) {
416                 wake_up_interruptible_all(&sblock->ring->recvwait);
417                 wake_up_interruptible_all(&sblock->ring->getwait);
418                 if (sblock->ring->txrecord) {
419                         kfree(sblock->ring->txrecord);
420                 }
421                 if (sblock->ring->rxrecord) {
422                         kfree(sblock->ring->rxrecord);
423                 }
424                 kfree(sblock->ring);
425         }
426         if (sblock->smem_virt) {
427                 iounmap(sblock->smem_virt);
428         }
429         smem_free(sblock->smem_addr, sblock->smem_size);
430         kfree(sblock);
431
432         sblocks[dst][channel]=NULL;
433 }
434
435 int sblock_register_notifier(uint8_t dst, uint8_t channel,
436                 void (*handler)(int event, void *data), void *data)
437 {
438         struct sblock_mgr *sblock = sblocks[dst][channel];
439
440         if (!sblock) {
441                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
442                 return -ENODEV;
443         }
444 #ifndef CONFIG_SIPC_WCN
445         if (sblock->handler) {
446                 printk(KERN_ERR "sblock handler already registered\n");
447                 return -EBUSY;
448         }
449 #endif
450         sblock->handler = handler;
451         sblock->data = data;
452
453         return 0;
454 }
455
456 int sblock_get(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
457 {
458         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
459         struct sblock_ring *ring = NULL;
460         volatile struct sblock_ring_header *ringhd = NULL;
461         volatile struct sblock_ring_header *poolhd = NULL;
462         int txpos, index;
463         int rval = 0;
464         unsigned long flags;
465
466         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
467                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
468                 return sblock ? -EIO : -ENODEV;
469         }
470
471         ring = sblock->ring;
472         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
473         poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
474
475         if (poolhd->txblk_rdptr == poolhd->txblk_wrptr) {
476                 if (timeout == 0) {
477                         /* no wait */
478                         printk(KERN_WARNING "sblock_get %d-%d is empty!\n",
479                                 dst, channel);
480                         rval = -ENODATA;
481                 } else if (timeout < 0) {
482                         /* wait forever */
483                         rval = wait_event_interruptible(ring->getwait,
484                                         poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
485                                         sblock->state == SBLOCK_STATE_IDLE);
486                         if (rval < 0) {
487                                 printk(KERN_WARNING "sblock_get wait interrupted!\n");
488                         }
489
490                         if (sblock->state == SBLOCK_STATE_IDLE) {
491                                 printk(KERN_ERR "sblock_get sblock state is idle!\n");
492                                 rval = -EIO;
493                         }
494                 } else {
495                         /* wait timeout */
496                         rval = wait_event_interruptible_timeout(ring->getwait,
497                                         poolhd->txblk_rdptr != poolhd->txblk_wrptr ||
498                                         sblock == SBLOCK_STATE_IDLE,
499                                         timeout);
500                         if (rval < 0) {
501                                 printk(KERN_WARNING "sblock_get wait interrupted!\n");
502                         } else if (rval == 0) {
503                                 printk(KERN_WARNING "sblock_get wait timeout!\n");
504                                 rval = -ETIME;
505                         }
506
507                         if(sblock->state == SBLOCK_STATE_IDLE) {
508                                 printk(KERN_ERR "sblock_get sblock state is idle!\n");
509                                 rval = -EIO;
510                         }
511                 }
512         }
513
514         if (rval < 0) {
515                 return rval;
516         }
517
518         /* multi-gotter may cause got failure */
519         spin_lock_irqsave(&ring->p_txlock, flags);
520         if (poolhd->txblk_rdptr != poolhd->txblk_wrptr &&
521                         sblock->state == SBLOCK_STATE_READY) {
522                 txpos = sblock_get_ringpos(poolhd->txblk_rdptr, poolhd->txblk_count);
523                 blk->addr = sblock->smem_virt + (ring->p_txblks[txpos].addr - sblock->smem_addr);
524                 blk->length = poolhd->txblk_size;
525                 poolhd->txblk_rdptr = poolhd->txblk_rdptr + 1;
526                 index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
527                 ring->txrecord[index] = SBLOCK_BLK_STATE_PENDING;
528         } else {
529                 rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
530         }
531         spin_unlock_irqrestore(&ring->p_txlock, flags);
532
533         return rval;
534 }
535
536 static int sblock_send_ex(uint8_t dst, uint8_t channel, struct sblock *blk, bool yell)
537 {
538         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
539         struct sblock_ring *ring;
540         volatile struct sblock_ring_header *ringhd;
541         struct smsg mevt;
542         int txpos, index;
543         int rval = 0;
544         unsigned long flags;
545
546         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
547                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
548                 return sblock ? -EIO : -ENODEV;
549         }
550
551         pr_debug("sblock_send: dst=%d, channel=%d, addr=%p, len=%d\n",
552                         dst, channel, blk->addr, blk->length);
553
554         ring = sblock->ring;
555         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
556
557         spin_lock_irqsave(&ring->r_txlock, flags);
558
559         txpos = sblock_get_ringpos(ringhd->txblk_wrptr, ringhd->txblk_count);
560         ring->r_txblks[txpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
561         ring->r_txblks[txpos].length = blk->length;
562         pr_debug("sblock_send: channel=%d, wrptr=%d, txpos=%d, addr=%x\n",
563                         channel, ringhd->txblk_wrptr, txpos, ring->r_txblks[txpos].addr);
564         ringhd->txblk_wrptr = ringhd->txblk_wrptr + 1;
565         if (sblock->state == SBLOCK_STATE_READY) {
566                 if(yell) {
567                         smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
568                         rval = smsg_send(dst, &mevt, 0);
569                 }
570                 else if(!ring->yell) {
571                         if(((int)(ringhd->txblk_wrptr - ringhd->txblk_rdptr) == 1) /*&&
572                                 ((int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr) == (sblock->txblknum - 1))*/) {
573                                 ring->yell = 1;
574                         }
575                 }
576         }
577         index = sblock_get_index((blk->addr - ring->txblk_virt), sblock->txblksz);
578          ring->txrecord[index] = SBLOCK_BLK_STATE_DONE;
579
580         spin_unlock_irqrestore(&ring->r_txlock, flags);
581
582         return rval ;
583 }
584
585 int sblock_send(uint8_t dst, uint8_t channel, struct sblock *blk)
586 {
587         return sblock_send_ex(dst, channel, blk, true);
588 }
589
590 int sblock_send_prepare(uint8_t dst, uint8_t channel, struct sblock *blk)
591 {
592         return sblock_send_ex(dst, channel, blk, false);
593 }
594
595 int sblock_send_finish(uint8_t dst, uint8_t channel)
596 {
597         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
598         struct sblock_ring *ring;
599         volatile struct sblock_ring_header *ringhd;
600         struct smsg mevt;
601         int rval=0;
602
603         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
604                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
605                 return sblock ? -EIO : -ENODEV;
606         }
607
608         ring = sblock->ring;
609         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
610
611         if (ringhd->txblk_wrptr != ringhd->txblk_rdptr) {
612                 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_SEND, 0);
613                 rval = smsg_send(dst, &mevt, 0);
614         }
615
616         return rval;
617 }
618
619 int sblock_receive(uint8_t dst, uint8_t channel, struct sblock *blk, int timeout)
620 {
621         struct sblock_mgr *sblock = sblocks[dst][channel];
622         struct sblock_ring *ring;
623         volatile struct sblock_ring_header *ringhd;
624         int rxpos, index, rval = 0;
625         unsigned long flags;
626
627         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
628                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
629                 return sblock ? -EIO : -ENODEV;
630         }
631
632         ring = sblock->ring;
633         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
634
635         pr_debug("sblock_receive: dst=%d, channel=%d, timeout=%d\n",
636                         dst, channel, timeout);
637         pr_debug("sblock_receive: channel=%d, wrptr=%d, rdptr=%d",
638                         channel, ringhd->rxblk_wrptr, ringhd->rxblk_rdptr);
639
640         if (ringhd->rxblk_wrptr == ringhd->rxblk_rdptr) {
641                 if (timeout == 0) {
642                         /* no wait */
643                         pr_debug("sblock_receive %d-%d is empty!\n",
644                                 dst, channel);
645                         rval = -ENODATA;
646                 } else if (timeout < 0) {
647                         /* wait forever */
648                         rval = wait_event_interruptible(ring->recvwait,
649                                 ringhd->rxblk_wrptr != ringhd->rxblk_rdptr);
650                         if (rval < 0) {
651                                 printk(KERN_WARNING "sblock_receive wait interrupted!\n");
652                         }
653
654                         if (sblock->state == SBLOCK_STATE_IDLE) {
655                                 printk(KERN_ERR "sblock_receive sblock state is idle!\n");
656                                 rval = -EIO;
657                         }
658
659                 } else {
660                         /* wait timeout */
661                         rval = wait_event_interruptible_timeout(ring->recvwait,
662                                 ringhd->rxblk_wrptr != ringhd->rxblk_rdptr, timeout);
663                         if (rval < 0) {
664                                 printk(KERN_WARNING "sblock_receive wait interrupted!\n");
665                         } else if (rval == 0) {
666                                 printk(KERN_WARNING "sblock_receive wait timeout!\n");
667                                 rval = -ETIME;
668                         }
669
670                         if (sblock->state == SBLOCK_STATE_IDLE) {
671                                 printk(KERN_ERR "sblock_receive sblock state is idle!\n");
672                                 rval = -EIO;
673                         }
674                 }
675         }
676
677         if (rval < 0) {
678                 return rval;
679         }
680
681         /* multi-receiver may cause recv failure */
682         spin_lock_irqsave(&ring->r_rxlock, flags);
683
684         if (ringhd->rxblk_wrptr != ringhd->rxblk_rdptr &&
685                         sblock->state == SBLOCK_STATE_READY) {
686                 rxpos = sblock_get_ringpos(ringhd->rxblk_rdptr, ringhd->rxblk_count);
687                 blk->addr = ring->r_rxblks[rxpos].addr - sblock->smem_addr + sblock->smem_virt;
688                 blk->length = ring->r_rxblks[rxpos].length;
689                 ringhd->rxblk_rdptr = ringhd->rxblk_rdptr + 1;
690                 pr_debug("sblock_receive: channel=%d, rxpos=%d, addr=%p, len=%d\n",
691                         channel, rxpos, blk->addr, blk->length);
692                 index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
693                 ring->rxrecord[index] = SBLOCK_BLK_STATE_PENDING;
694         } else {
695                 rval = sblock->state == SBLOCK_STATE_READY ? -EAGAIN : -EIO;
696         }
697         spin_unlock_irqrestore(&ring->r_rxlock, flags);
698
699         return rval;
700 }
701
702 int sblock_get_arrived_count(uint8_t dst, uint8_t channel)
703 {
704         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
705         struct sblock_ring *ring = NULL;
706         volatile struct sblock_ring_header *ringhd = NULL;
707         int blk_count = 0;
708         unsigned long flags;
709
710         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
711                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
712                 return -ENODEV;
713         }
714
715         ring = sblock->ring;
716         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
717
718         spin_lock_irqsave(&ring->r_rxlock, flags);
719         blk_count = (int)(ringhd->rxblk_wrptr - ringhd->rxblk_rdptr);
720         spin_unlock_irqrestore(&ring->r_rxlock, flags);
721
722         return blk_count;
723
724 }
725
726 int sblock_get_free_count(uint8_t dst, uint8_t channel)
727 {
728         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
729         struct sblock_ring *ring = NULL;
730         volatile struct sblock_ring_header *poolhd = NULL;
731         int blk_count = 0;
732         unsigned long flags;
733
734         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
735                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
736                 return -ENODEV;
737         }
738
739         ring = sblock->ring;
740         poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
741
742         spin_lock_irqsave(&ring->p_txlock, flags);
743         blk_count = (int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr);
744         spin_unlock_irqrestore(&ring->p_txlock, flags);
745
746         return blk_count;
747 }
748
749 int sblock_release(uint8_t dst, uint8_t channel, struct sblock *blk)
750 {
751         struct sblock_mgr *sblock = (struct sblock_mgr *)sblocks[dst][channel];
752         struct sblock_ring *ring = NULL;
753         volatile struct sblock_ring_header *ringhd = NULL;
754         volatile struct sblock_ring_header *poolhd = NULL;
755         struct smsg mevt;
756         unsigned long flags;
757         int rxpos;
758         int index;
759
760         if (!sblock || sblock->state != SBLOCK_STATE_READY) {
761                 printk(KERN_ERR "sblock-%d-%d not ready!\n", dst, channel);
762                 return -ENODEV;
763         }
764
765         pr_debug("sblock_release: dst=%d, channel=%d, addr=%p, len=%d\n",
766                         dst, channel, blk->addr, blk->length);
767
768         ring = sblock->ring;
769         ringhd = (volatile struct sblock_ring_header *)(&ring->header->ring);
770         poolhd = (volatile struct sblock_ring_header *)(&ring->header->pool);
771
772         spin_lock_irqsave(&ring->p_rxlock, flags);
773         rxpos = sblock_get_ringpos(poolhd->rxblk_wrptr, poolhd->rxblk_count);
774         ring->p_rxblks[rxpos].addr = blk->addr - sblock->smem_virt + sblock->smem_addr;
775         ring->p_rxblks[rxpos].length = poolhd->rxblk_size;
776         poolhd->rxblk_wrptr = poolhd->rxblk_wrptr + 1;
777         pr_debug("sblock_release: addr=%x\n", ring->p_rxblks[rxpos].addr);
778
779         if((int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr) == 1 &&
780                         sblock->state == SBLOCK_STATE_READY) {
781                 /* send smsg to notify the peer side */
782                 smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBLOCK_RELEASE, 0);
783                 smsg_send(dst, &mevt, -1);
784         }
785
786         index = sblock_get_index((blk->addr - ring->rxblk_virt), sblock->rxblksz);
787         ring->rxrecord[index] = SBLOCK_BLK_STATE_DONE;
788
789         spin_unlock_irqrestore(&ring->p_rxlock, flags);
790
791         return 0;
792 }
793
794 #if defined(CONFIG_DEBUG_FS)
795 static int sblock_debug_show(struct seq_file *m, void *private)
796 {
797         struct sblock_mgr *sblock = NULL;
798         struct sblock_ring  *ring = NULL;
799         volatile struct sblock_ring_header *ringhd = NULL;
800         volatile struct sblock_ring_header *poolhd = NULL;
801         int i, j;
802
803         for (i = 0; i < SIPC_ID_NR; i++) {
804                 for (j=0;  j < SMSG_CH_NR; j++) {
805                         sblock = sblocks[i][j];
806                         if (!sblock) {
807                                 continue;
808                         }
809                         ring = sblock->ring;
810                         ringhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->ring);
811                         poolhd = (volatile struct sblock_ring_header *)(&sblock->ring->header->pool);
812
813                         seq_printf(m, "sblock dst 0x%0x, channel: 0x%0x, state: %d, smem_virt: 0x%lx, smem_addr: 0x%0x, smem_size: 0x%0x, txblksz: %d, rxblksz: %d \n",
814                                 sblock->dst, sblock->channel, sblock->state,
815                                 (size_t)sblock->smem_virt, sblock->smem_addr,
816                                 sblock->smem_size, sblock->txblksz, sblock->rxblksz );
817                         seq_printf(m, "sblock ring: txblk_virt :0x%lx, rxblk_virt :0x%lx \n",
818                                 (size_t)ring->txblk_virt, (size_t)ring->rxblk_virt);
819                         seq_printf(m, "sblock ring header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxblk_count :%d, rxblk_blks: 0x%0x \n",
820                                 ringhd->rxblk_addr, ringhd->rxblk_rdptr,
821                                 ringhd->rxblk_wrptr, ringhd->rxblk_size,
822                                 ringhd->rxblk_count, ringhd->rxblk_blks);
823                         seq_printf(m, "sblock ring header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txblk_count :%d, txblk_blks: 0x%0x \n",
824                                 ringhd->txblk_addr, ringhd->txblk_rdptr,
825                                 ringhd->txblk_wrptr, ringhd->txblk_size,
826                                 ringhd->txblk_count, ringhd->txblk_blks );
827                         seq_printf(m, "sblock pool header: rxblk_addr :0x%0x, rxblk_rdptr :0x%0x, rxblk_wrptr :0x%0x, rxblk_size :%d, rxpool_count :%d, rxblk_blks: 0x%0x \n",
828                                 poolhd->rxblk_addr, poolhd->rxblk_rdptr,
829                                 poolhd->rxblk_wrptr, poolhd->rxblk_size,
830                                 (int)(poolhd->rxblk_wrptr - poolhd->rxblk_rdptr),
831                                 poolhd->rxblk_blks);
832                         seq_printf(m, "sblock pool header: txblk_addr :0x%0x, txblk_rdptr :0x%0x, txblk_wrptr :0x%0x, txblk_size :%d, txpool_count :%d, txblk_blks: 0x%0x \n",
833                                 poolhd->txblk_addr, poolhd->txblk_rdptr,
834                                 poolhd->txblk_wrptr, poolhd->txblk_size,
835                                 (int)(poolhd->txblk_wrptr - poolhd->txblk_rdptr),
836                                 poolhd->txblk_blks );
837                 }
838         }
839         return 0;
840
841 }
842
843 static int sblock_debug_open(struct inode *inode, struct file *file)
844 {
845         return single_open(file, sblock_debug_show, inode->i_private);
846 }
847
848 static const struct file_operations sblock_debug_fops = {
849         .open = sblock_debug_open,
850         .read = seq_read,
851         .llseek = seq_lseek,
852         .release = single_release,
853 };
854
855 int  sblock_init_debugfs(void *root )
856 {
857         if (!root)
858                 return -ENXIO;
859         debugfs_create_file("sblock", S_IRUGO, (struct dentry *)root, NULL, &sblock_debug_fops);
860         return 0;
861 }
862
863 #endif /* CONFIG_DEBUG_FS */
864
865 EXPORT_SYMBOL(sblock_put);
866 EXPORT_SYMBOL(sblock_create);
867 EXPORT_SYMBOL(sblock_destroy);
868 EXPORT_SYMBOL(sblock_register_notifier);
869 EXPORT_SYMBOL(sblock_get);
870 EXPORT_SYMBOL(sblock_send);
871 EXPORT_SYMBOL(sblock_send_prepare);
872 EXPORT_SYMBOL(sblock_send_finish);
873 EXPORT_SYMBOL(sblock_receive);
874 EXPORT_SYMBOL(sblock_get_arrived_count);
875 EXPORT_SYMBOL(sblock_get_free_count);
876 EXPORT_SYMBOL(sblock_release);
877
878 MODULE_AUTHOR("Chen Gaopeng");
879 MODULE_DESCRIPTION("SIPC/SBLOCK driver");
880 MODULE_LICENSE("GPL");