Input: sprd_eic_keys: remove event log
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / drivers / sipc / sbuf.c
1 /*
2  * Copyright (C) 2012 Spreadtrum Communications Inc.
3  *
4  * This software is licensed under the terms of the GNU General Public
5  * License version 2, as published by the Free Software Foundation, and
6  * may be copied, distributed, and modified under those terms.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/wait.h>
17 #include <linux/interrupt.h>
18 #include <linux/sched.h>
19 #include <linux/kthread.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <asm/uaccess.h>
24 #include <linux/debugfs.h>
25 #include <linux/seq_file.h>
26
27
28 #include <linux/sipc.h>
29 #include "sbuf.h"
30
31
32 static struct sbuf_mgr *sbufs[SIPC_ID_NR][SMSG_CH_NR];
33
34 static int sbuf_thread(void *data)
35 {
36         struct sbuf_mgr *sbuf = data;
37         struct smsg mcmd, mrecv;
38         int rval, bufid;
39         struct sched_param param = {.sched_priority = 90};
40
41         /*set the thread as a real time thread, and its priority is 90*/
42         sched_setscheduler(current, SCHED_RR, &param);
43
44         /* since the channel open may hang, we call it in the sbuf thread */
45         rval = smsg_ch_open(sbuf->dst, sbuf->channel, -1);
46         if (rval != 0) {
47                 printk(KERN_ERR "Failed to open channel %d\n", sbuf->channel);
48                 /* assign NULL to thread poniter as failed to open channel */
49                 sbuf->thread = NULL;
50                 return rval;
51         }
52
53         /* sbuf init done, handle the ring rx events */
54         while (!kthread_should_stop()) {
55                 /* monitor sbuf rdptr/wrptr update smsg */
56                 smsg_set(&mrecv, sbuf->channel, 0, 0, 0);
57                 rval = smsg_recv(sbuf->dst, &mrecv, -1);
58
59                 if (rval == -EIO) {
60                 /* channel state is free */
61                         msleep(5);
62                         continue;
63                 }
64
65                 pr_debug("sbuf thread recv msg: dst=%d, channel=%d, "
66                                 "type=%d, flag=0x%04x, value=0x%08x\n",
67                                 sbuf->dst, sbuf->channel,
68                                 mrecv.type, mrecv.flag, mrecv.value);
69
70                 switch (mrecv.type) {
71                 case SMSG_TYPE_OPEN:
72                         /* handle channel recovery */
73                         smsg_open_ack(sbuf->dst, sbuf->channel);
74                         break;
75                 case SMSG_TYPE_CLOSE:
76                         /* handle channel recovery */
77                         smsg_close_ack(sbuf->dst, sbuf->channel);
78                         sbuf->state = SBUF_STATE_IDLE;
79                         break;
80                 case SMSG_TYPE_CMD:
81                         /* respond cmd done for sbuf init */
82                         WARN_ON(mrecv.flag != SMSG_CMD_SBUF_INIT);
83                         smsg_set(&mcmd, sbuf->channel, SMSG_TYPE_DONE,
84                                         SMSG_DONE_SBUF_INIT, sbuf->smem_addr);
85                         smsg_send(sbuf->dst, &mcmd, -1);
86                         sbuf->state = SBUF_STATE_READY;
87                         break;
88                 case SMSG_TYPE_EVENT:
89                         bufid = mrecv.value;
90                         WARN_ON(bufid >= sbuf->ringnr);
91                         switch (mrecv.flag) {
92                         case SMSG_EVENT_SBUF_RDPTR:
93                                 wake_up_interruptible_all(&(sbuf->rings[bufid].txwait));
94                                 if (sbuf->rings[bufid].handler) {
95                                         sbuf->rings[bufid].handler(SBUF_NOTIFY_WRITE, sbuf->rings[bufid].data);
96                                 }
97                                 break;
98                         case SMSG_EVENT_SBUF_WRPTR:
99                                 wake_up_interruptible_all(&(sbuf->rings[bufid].rxwait));
100                                 if (sbuf->rings[bufid].handler) {
101                                         sbuf->rings[bufid].handler(SBUF_NOTIFY_READ, sbuf->rings[bufid].data);
102                                 }
103                                 break;
104                         default:
105                                 rval = 1;
106                                 break;
107                         }
108                         break;
109                 default:
110                         rval = 1;
111                         break;
112                 };
113                 if (rval) {
114                         printk(KERN_WARNING "non-handled sbuf msg: %d-%d, %d, %d, %d\n",
115                                         sbuf->dst, sbuf->channel,
116                                         mrecv.type, mrecv.flag, mrecv.value);
117                         rval = 0;
118                 }
119         }
120
121         return 0;
122 }
123
124 int sbuf_create(uint8_t dst, uint8_t channel, uint32_t bufnum,
125                 uint32_t txbufsize, uint32_t rxbufsize)
126 {
127         struct sbuf_mgr *sbuf;
128         volatile struct sbuf_smem_header *smem;
129         volatile struct sbuf_ring_header *ringhd;
130         int hsize, i, result;
131
132         sbuf = kzalloc(sizeof(struct sbuf_mgr), GFP_KERNEL);
133         if (!sbuf) {
134                 printk(KERN_ERR "Failed to allocate mgr for sbuf\n");
135                 return -ENOMEM;
136         }
137
138         sbuf->state = SBUF_STATE_IDLE;
139         sbuf->dst = dst;
140         sbuf->channel = channel;
141         sbuf->ringnr = bufnum;
142
143         /* allocate smem */
144         hsize = sizeof(struct sbuf_smem_header) + sizeof(struct sbuf_ring_header) * bufnum;
145         sbuf->smem_size = hsize + (txbufsize + rxbufsize) * bufnum;
146         sbuf->smem_addr = smem_alloc(sbuf->smem_size);
147         if (!sbuf->smem_addr) {
148                 printk(KERN_ERR "Failed to allocate smem for sbuf\n");
149                 kfree(sbuf);
150                 return -ENOMEM;
151         }
152         sbuf->smem_virt = ioremap_nocache(sbuf->smem_addr, sbuf->smem_size);
153         if (!sbuf->smem_virt) {
154                 printk(KERN_ERR "Failed to map smem for sbuf\n");
155                 smem_free(sbuf->smem_addr, sbuf->smem_size);
156                 kfree(sbuf);
157                 return -EFAULT;
158         }
159
160         /* allocate rings description */
161         sbuf->rings = kzalloc(sizeof(struct sbuf_ring) * bufnum, GFP_KERNEL);
162         if (!sbuf->rings) {
163                 printk(KERN_ERR "Failed to allocate rings for sbuf\n");
164                 iounmap(sbuf->smem_virt);
165                 smem_free(sbuf->smem_addr, sbuf->smem_size);
166                 kfree(sbuf);
167                 return -ENOMEM;
168         }
169
170         /* initialize all ring bufs */
171         smem = (volatile struct sbuf_smem_header *)sbuf->smem_virt;
172         smem->ringnr = bufnum;
173         for (i = 0; i < bufnum; i++) {
174                 ringhd = (volatile struct sbuf_ring_header *)&(smem->headers[i]);
175                 ringhd->txbuf_addr = sbuf->smem_addr + hsize +
176                                 (txbufsize + rxbufsize) * i;
177                 ringhd->txbuf_size = txbufsize;
178                 ringhd->txbuf_rdptr = 0;
179                 ringhd->txbuf_wrptr = 0;
180                 ringhd->rxbuf_addr = smem->headers[i].txbuf_addr + txbufsize;
181                 ringhd->rxbuf_size = rxbufsize;
182                 ringhd->rxbuf_rdptr = 0;
183                 ringhd->rxbuf_wrptr = 0;
184
185                 sbuf->rings[i].header = ringhd;
186                 sbuf->rings[i].txbuf_virt = sbuf->smem_virt + hsize +
187                                 (txbufsize + rxbufsize) * i;
188                 sbuf->rings[i].rxbuf_virt = sbuf->rings[i].txbuf_virt + txbufsize;
189                 init_waitqueue_head(&(sbuf->rings[i].txwait));
190                 init_waitqueue_head(&(sbuf->rings[i].rxwait));
191                 mutex_init(&(sbuf->rings[i].txlock));
192                 mutex_init(&(sbuf->rings[i].rxlock));
193         }
194
195         sbuf->thread = kthread_create(sbuf_thread, sbuf,
196                         "sbuf-%d-%d", dst, channel);
197         if (IS_ERR(sbuf->thread)) {
198                 printk(KERN_ERR "Failed to create kthread: sbuf-%d-%d\n", dst, channel);
199                 kfree(sbuf->rings);
200                 iounmap(sbuf->smem_virt);
201                 smem_free(sbuf->smem_addr, sbuf->smem_size);
202                 result = PTR_ERR(sbuf->thread);
203                 kfree(sbuf);
204
205                 return result;
206         }
207
208         sbufs[dst][channel] = sbuf;
209         wake_up_process(sbuf->thread);
210
211         return 0;
212 }
213
214 void sbuf_destroy(uint8_t dst, uint8_t channel)
215 {
216         struct sbuf_mgr *sbuf = sbufs[dst][channel];
217         int i;
218
219         if (sbuf == NULL) {
220                 return;
221         }
222
223         sbuf->state = SBUF_STATE_IDLE;
224         smsg_ch_close(dst, channel, -1);
225
226         /* stop sbuf thread if it's created successfully and still alive */
227         if (!IS_ERR_OR_NULL(sbuf->thread)) {
228                 kthread_stop(sbuf->thread);
229         }
230
231         if (sbuf->rings) {
232                 for (i = 0; i < sbuf->ringnr; i++) {
233                         wake_up_interruptible_all(&sbuf->rings[i].txwait);
234                         wake_up_interruptible_all(&sbuf->rings[i].rxwait);
235                 }
236                 kfree(sbuf->rings);
237         }
238
239         if (sbuf->smem_virt) {
240                 iounmap(sbuf->smem_virt);
241         }
242         smem_free(sbuf->smem_addr, sbuf->smem_size);
243         kfree(sbuf);
244
245         sbufs[dst][channel] = NULL;
246 }
247
248 int sbuf_write(uint8_t dst, uint8_t channel, uint32_t bufid,
249                 void *buf, uint32_t len, int timeout)
250 {
251         struct sbuf_mgr *sbuf = sbufs[dst][channel];
252         struct sbuf_ring *ring = NULL;
253         volatile struct sbuf_ring_header *ringhd = NULL;
254         struct smsg mevt;
255         void *txpos;
256         int rval, left, tail, txsize;
257
258         if (!sbuf) {
259                 return -ENODEV;
260         }
261         ring = &(sbuf->rings[bufid]);
262         ringhd = ring->header;
263         if (sbuf->state != SBUF_STATE_READY) {
264                 printk(KERN_ERR "sbuf-%d-%d not ready to write!\n", dst, channel);
265                 return -ENODEV;
266         }
267
268         pr_debug("sbuf_write: dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n",
269                         dst, channel, bufid, len, timeout);
270         pr_debug("sbuf_write: channel=%d, wrptr=%d, rdptr=%d",
271                         channel, ringhd->txbuf_wrptr, ringhd->txbuf_rdptr);
272
273         rval = 0;
274         left = len;
275
276         if (timeout) {
277                 mutex_lock(&ring->txlock);
278         } else {
279                 if (!mutex_trylock(&(ring->txlock))) {
280                         printk(KERN_INFO "sbuf_write busy!\n");
281                         return -EBUSY;
282                 }
283         }
284
285         if (timeout == 0) {
286                 /* no wait */
287                 if ((int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) >=
288                                 ringhd->txbuf_size) {
289                         printk(KERN_WARNING "sbuf %d-%d ring %d txbuf is full!\n",
290                                 dst, channel, bufid);
291                         rval = -EBUSY;
292                 }
293         } else if (timeout < 0) {
294                 /* wait forever */
295                 rval = wait_event_interruptible(ring->txwait,
296                         (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) <
297                         ringhd->txbuf_size || sbuf->state == SBUF_STATE_IDLE);
298                 if (rval < 0) {
299                         printk(KERN_WARNING "sbuf_write wait interrupted!\n");
300                 }
301
302                 if (sbuf->state == SBUF_STATE_IDLE) {
303                         printk(KERN_ERR "sbuf_write sbuf state is idle!\n");
304                         rval = -EIO;
305                 }
306         } else {
307                 /* wait timeout */
308                 rval = wait_event_interruptible_timeout(ring->txwait,
309                         (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) <
310                         ringhd->txbuf_size || sbuf->state == SBUF_STATE_IDLE,
311                         timeout);
312                 if (rval < 0) {
313                         printk(KERN_WARNING "sbuf_write wait interrupted!\n");
314                 } else if (rval == 0) {
315                         printk(KERN_WARNING "sbuf_write wait timeout!\n");
316                         rval = -ETIME;
317                 }
318
319                 if (sbuf->state == SBUF_STATE_IDLE) {
320                         printk(KERN_ERR "sbuf_write sbuf state is idle!\n");
321                         rval = -EIO;
322                 }
323
324         }
325
326         while (left && (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr) < ringhd->txbuf_size &&
327                         sbuf->state == SBUF_STATE_READY) {
328                 /* calc txpos & txsize */
329                 txpos = ring->txbuf_virt + ringhd->txbuf_wrptr % ringhd->txbuf_size;
330                 txsize = ringhd->txbuf_size - (int)(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr);
331                 txsize = min(txsize, left);
332
333                 tail = txpos + txsize - (ring->txbuf_virt + ringhd->txbuf_size);
334                 if (tail > 0) {
335                         /* ring buffer is rounded */
336                         if ((uintptr_t)buf > TASK_SIZE) {
337                                 unalign_memcpy(txpos, buf, txsize - tail);
338                                 unalign_memcpy(ring->txbuf_virt, buf + txsize - tail, tail);
339                         } else {
340                                 if(unalign_copy_from_user(txpos, (void __user *)buf, txsize - tail) ||
341                                     unalign_copy_from_user(ring->txbuf_virt,
342                                     (void __user *)(buf + txsize - tail), tail)) {
343                                         printk(KERN_ERR "sbuf_write: failed to copy from user!\n");
344                                         rval = -EFAULT;
345                                         break;
346                                 }
347                         }
348                 } else {
349                         if ((uintptr_t)buf > TASK_SIZE) {
350                                 unalign_memcpy(txpos, buf, txsize);
351                         } else {
352                                 /* handle the user space address */
353                                 if(unalign_copy_from_user(txpos, (void __user *)buf, txsize)) {
354                                         printk(KERN_ERR "sbuf_write: failed to copy from user!\n");
355                                         rval = -EFAULT;
356                                         break;
357                                 }
358                         }
359                 }
360
361
362                 pr_debug("sbuf_write: channel=%d, txpos=%p, txsize=%d\n", channel, txpos, txsize);
363
364                 /* update tx wrptr */
365                 ringhd->txbuf_wrptr = ringhd->txbuf_wrptr + txsize;
366                 /* tx ringbuf is empty, so need to notify peer side */
367                 if(ringhd->txbuf_wrptr - ringhd->txbuf_rdptr == txsize) {
368                         smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBUF_WRPTR, bufid);
369                         smsg_send(dst, &mevt, -1);
370                 }
371
372                 left -= txsize;
373                 buf += txsize;
374         }
375
376         mutex_unlock(&ring->txlock);
377
378         pr_debug("sbuf_write done: channel=%d, len=%d\n", channel, len - left);
379
380         if (len == left) {
381                 return rval;
382         } else {
383                 return (len - left);
384         }
385 }
386
387 int sbuf_read(uint8_t dst, uint8_t channel, uint32_t bufid,
388                 void *buf, uint32_t len, int timeout)
389 {
390         struct sbuf_mgr *sbuf = sbufs[dst][channel];
391         struct sbuf_ring *ring = NULL;
392         volatile struct sbuf_ring_header *ringhd = NULL;
393         struct smsg mevt;
394         void *rxpos;
395         int rval, left, tail, rxsize;
396
397         if (!sbuf) {
398                 return -ENODEV;
399         }
400         ring = &(sbuf->rings[bufid]);
401         ringhd = ring->header;
402
403         if (sbuf->state != SBUF_STATE_READY) {
404                 printk(KERN_ERR "sbuf-%d-%d not ready to read!\n", dst, channel);
405                 return -ENODEV;
406         }
407
408         pr_debug("sbuf_read: dst=%d, channel=%d, bufid=%d, len=%d, timeout=%d\n",
409                         dst, channel, bufid, len, timeout);
410         pr_debug("sbuf_read: channel=%d, wrptr=%d, rdptr=%d",
411                         channel, ringhd->rxbuf_wrptr, ringhd->rxbuf_rdptr);
412
413         rval = 0;
414         left = len;
415
416         if (timeout) {
417                 mutex_lock(&ring->rxlock);
418         } else {
419                 if (!mutex_trylock(&(ring->rxlock))) {
420                         printk(KERN_INFO "sbuf_read busy!\n");
421                         return -EBUSY;
422                 }
423         }
424
425         if (ringhd->rxbuf_wrptr == ringhd->rxbuf_rdptr) {
426                 if (timeout == 0) {
427                         /* no wait */
428                         printk(KERN_WARNING "sbuf %d-%d ring %d rxbuf is empty!\n",
429                                 dst, channel, bufid);
430                         rval = -ENODATA;
431                 } else if (timeout < 0) {
432                         /* wait forever */
433                         rval = wait_event_interruptible(ring->rxwait,
434                                 ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr ||
435                                 sbuf->state == SBUF_STATE_IDLE);
436                         if (rval < 0) {
437                                 printk(KERN_WARNING "sbuf_read wait interrupted!\n");
438                         }
439
440                         if (sbuf->state == SBUF_STATE_IDLE) {
441                                 printk(KERN_ERR "sbuf_read sbuf state is idle!\n");
442                                 rval = -EIO;
443                         }
444                 } else {
445                         /* wait timeout */
446                         rval = wait_event_interruptible_timeout(ring->rxwait,
447                                 ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr ||
448                                 sbuf->state == SBUF_STATE_IDLE, timeout);
449                         if (rval < 0) {
450                                 printk(KERN_WARNING "sbuf_read wait interrupted!\n");
451                         } else if (rval == 0) {
452                                 printk(KERN_WARNING "sbuf_read wait timeout!\n");
453                                 rval = -ETIME;
454                         }
455
456                         if (sbuf->state == SBUF_STATE_IDLE) {
457                                 printk(KERN_ERR "sbuf_read sbuf state is idle!\n");
458                                 rval = -EIO;
459                         }
460                 }
461         }
462
463         while (left && (ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr) &&
464                         sbuf->state == SBUF_STATE_READY) {
465                 /* calc rxpos & rxsize */
466                 rxpos = ring->rxbuf_virt + ringhd->rxbuf_rdptr % ringhd->rxbuf_size;
467                 rxsize = (int)(ringhd->rxbuf_wrptr - ringhd->rxbuf_rdptr);
468                 /* check overrun */
469                 WARN_ON(rxsize > ringhd->rxbuf_size);
470                 rxsize = min(rxsize, left);
471
472                 pr_debug("sbuf_read: channel=%d, buf=%p, rxpos=%p, rxsize=%d\n", channel, buf, rxpos, rxsize);
473
474                 tail = rxpos + rxsize - (ring->rxbuf_virt + ringhd->rxbuf_size);
475
476                 if (tail > 0) {
477                         /* ring buffer is rounded */
478                         if ((uintptr_t)buf > TASK_SIZE) {
479                                 unalign_memcpy(buf, rxpos, rxsize - tail);
480                                 unalign_memcpy(buf + rxsize - tail, ring->rxbuf_virt, tail);
481                         } else {
482                                 /* handle the user space address */
483                                 if(unalign_copy_to_user((void __user *)buf, rxpos, rxsize - tail) ||
484                                     unalign_copy_to_user((void __user *)(buf + rxsize - tail),
485                                     ring->rxbuf_virt, tail)) {
486                                         printk(KERN_ERR "sbuf_read: failed to copy to user!\n");
487                                         rval = -EFAULT;
488                                         break;
489                                 }
490                         }
491                 } else {
492                         if ((uintptr_t)buf > TASK_SIZE) {
493                                 unalign_memcpy(buf, rxpos, rxsize);
494                         } else {
495                                 /* handle the user space address */
496                                 if (unalign_copy_to_user((void __user *)buf, rxpos, rxsize)) {
497                                         printk(KERN_ERR "sbuf_read: failed to copy to user!\n");
498                                         rval = -EFAULT;
499                                         break;
500                                 }
501                         }
502                 }
503
504                 /* update rx rdptr */
505                 ringhd->rxbuf_rdptr = ringhd->rxbuf_rdptr + rxsize;
506                 /* rx ringbuf is full ,so need to notify peer side */
507                 if(ringhd->rxbuf_wrptr - ringhd->rxbuf_rdptr == ringhd->rxbuf_size - rxsize) {
508                         smsg_set(&mevt, channel, SMSG_TYPE_EVENT, SMSG_EVENT_SBUF_RDPTR, bufid);
509                         smsg_send(dst, &mevt, -1);
510                 }
511
512                 left -= rxsize;
513                 buf += rxsize;
514         }
515
516         mutex_unlock(&ring->rxlock);
517
518         pr_debug("sbuf_read done: channel=%d, len=%d", channel, len - left);
519
520         if (len == left) {
521                 return rval;
522         } else {
523                 return (len - left);
524         }
525 }
526
527 int sbuf_poll_wait(uint8_t dst, uint8_t channel, uint32_t bufid,
528                 struct file *filp, poll_table *wait)
529 {
530         struct sbuf_mgr *sbuf = sbufs[dst][channel];
531         struct sbuf_ring *ring = NULL;
532         volatile struct sbuf_ring_header *ringhd = NULL;
533         unsigned int mask = 0;
534
535         if (!sbuf) {
536                 return -ENODEV;
537         }
538         ring = &(sbuf->rings[bufid]);
539         ringhd = ring->header;
540         if (sbuf->state != SBUF_STATE_READY) {
541                 printk(KERN_ERR "sbuf-%d-%d not ready to poll !\n", dst, channel);
542                 return -ENODEV;
543         }
544
545         poll_wait(filp, &ring->txwait, wait);
546         poll_wait(filp, &ring->rxwait, wait);
547
548         if (ringhd->rxbuf_wrptr != ringhd->rxbuf_rdptr) {
549                 mask |= POLLIN | POLLRDNORM;
550         }
551
552         if (ringhd->txbuf_wrptr - ringhd->txbuf_rdptr < ringhd->txbuf_size) {
553                 mask |= POLLOUT | POLLWRNORM;
554         }
555
556         return mask;
557 }
558
559 int sbuf_status(uint8_t dst, uint8_t channel)
560 {
561         struct sbuf_mgr *sbuf = sbufs[dst][channel];
562
563         if (!sbuf) {
564                 return -ENODEV;
565         }
566         if (sbuf->state != SBUF_STATE_READY) {
567                 return -ENODEV;
568         }
569
570         return 0;
571 }
572
573 int sbuf_register_notifier(uint8_t dst, uint8_t channel, uint32_t bufid,
574                 void (*handler)(int event, void *data), void *data)
575 {
576         struct sbuf_mgr *sbuf = sbufs[dst][channel];
577         struct sbuf_ring *ring = NULL;
578
579         if (!sbuf) {
580                 return -ENODEV;
581         }
582         ring = &(sbuf->rings[bufid]);
583         ring->handler = handler;
584         ring->data = data;
585
586         return 0;
587 }
588
589 #if defined(CONFIG_DEBUG_FS)
590
591 static int sbuf_debug_show(struct seq_file *m, void *private)
592 {
593         struct sbuf_mgr *sbuf = NULL;
594         struct sbuf_ring        *rings = NULL;
595         volatile struct sbuf_ring_header  *ring = NULL;
596         int i, j, n;
597
598         for (i = 0; i < SIPC_ID_NR; i++) {
599                 for (j=0;  j< SMSG_CH_NR; j++) {
600                         sbuf = sbufs[i][j];
601                         if (!sbuf) {
602                                 continue;
603                         }
604                         seq_printf(m, "sbuf dst 0x%0x, channel: 0x%0x, state: %d, smem_virt: 0x%lx, smem_addr: 0x%0x, smem_size: 0x%0x, ringnr: %d \n",
605                                    sbuf->dst, sbuf->channel, sbuf->state, (size_t)sbuf->smem_virt, sbuf->smem_addr, sbuf->smem_size, sbuf->ringnr);
606
607                         for (n=0;  n < sbuf->ringnr;  n++) {
608                                 rings = &(sbuf->rings[n]);
609                                 ring = rings->header;
610                                 seq_printf(m, "sbuf ring[%d]: rxbuf_addr :0x%0x, rxbuf_rdptr :0x%0x, rxbuf_wrptr :0x%0x, rxbuf_size :0x%0x \n", n,  ring->rxbuf_addr, ring->rxbuf_rdptr, ring->rxbuf_wrptr, ring->rxbuf_size);
611                                 seq_printf(m, "sbuf ring[%d]: txbuf_addr :0x%0x, txbuf_rdptr :0x%0x, txbuf_wrptr :0x%0x, txbuf_size :0x%0x \n", n,  ring->txbuf_addr, ring->txbuf_rdptr, ring->txbuf_wrptr, ring->txbuf_size);
612                         }
613                 }
614         }
615         return 0;
616 }
617
618 static int sbuf_debug_open(struct inode *inode, struct file *file)
619 {
620         return single_open(file, sbuf_debug_show, inode->i_private);
621 }
622
623 static const struct file_operations sbuf_debug_fops = {
624         .open = sbuf_debug_open,
625         .read = seq_read,
626         .llseek = seq_lseek,
627         .release = single_release,
628 };
629
630 int  sbuf_init_debugfs( void *root )
631 {
632         if (!root)
633                 return -ENXIO;
634         debugfs_create_file("sbuf", S_IRUGO, (struct dentry *)root, NULL, &sbuf_debug_fops);
635         return 0;
636 }
637
638 #endif /* CONFIG_DEBUG_FS */
639
640
641 EXPORT_SYMBOL(sbuf_create);
642 EXPORT_SYMBOL(sbuf_destroy);
643 EXPORT_SYMBOL(sbuf_write);
644 EXPORT_SYMBOL(sbuf_read);
645 EXPORT_SYMBOL(sbuf_poll_wait);
646 EXPORT_SYMBOL(sbuf_status);
647 EXPORT_SYMBOL(sbuf_register_notifier);
648
649 MODULE_AUTHOR("Chen Gaopeng");
650 MODULE_DESCRIPTION("SIPC/SBUF driver");
651 MODULE_LICENSE("GPL");