upload tizen1.0 source
[kernel/linux-2.6.36.git] / drivers / net / wireless / bcm4330 / src / shared / linux_osl.c
1 /*
2  * Linux OS Independent Layer
3  *
4  * Copyright (C) 1999-2011, Broadcom Corporation
5  * 
6  *         Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  * 
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  * 
20  *      Notwithstanding the above, under no circumstances may you combine this
21  * software in any way with any other Broadcom software provided under a license
22  * other than the GPL, without Broadcom's express prior written consent.
23  *
24  * $Id: linux_osl.c,v 1.168.2.6 2010-12-22 23:47:31 $
25  */
26
27 #define LINUX_PORT
28
29 #include <typedefs.h>
30 #include <bcmendian.h>
31 #include <linuxver.h>
32 #include <bcmdefs.h>
33 #include <osl.h>
34 #include <bcmutils.h>
35 #include <linux/delay.h>
36 #include <pcicfg.h>
37
38 #ifdef DHD_DEBUG
39 #define OSL_MSG_ERROR(x) printk x
40 #define OSL_MSG_INFO(x)
41 #else
42 #define OSL_MSG_ERROR(x)
43 #define OSL_MSG_INFO(x)
44 #endif
45
46
47 #ifdef BCMASSERT_LOG
48 #include <bcm_assert_log.h>
49 #endif
50
51 #include <linux/fs.h>
52
53 #define PCI_CFG_RETRY           10
54
55 #define OS_HANDLE_MAGIC         0x1234abcd      /* Magic # to recognize osh */
56 #define BCM_MEM_FILENAME_LEN    24              /* Mem. filename length */
57
58 #ifdef DHD_USE_STATIC_BUF
59 #define DHD_SKB_HDRSIZE                 336
60 #define DHD_SKB_1PAGE_BUFSIZE   ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
61 #define DHD_SKB_2PAGE_BUFSIZE   ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
62 #define DHD_SKB_4PAGE_BUFSIZE   ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
63
64 #define MAX_STATIC_BUF_NUM 16
65 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
66 #define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE)
67 typedef struct bcm_static_buf {
68         struct semaphore static_sem;
69         unsigned char *buf_ptr;
70         unsigned char buf_use[MAX_STATIC_BUF_NUM];
71 } bcm_static_buf_t;
72
73 static bcm_static_buf_t *bcm_static_buf = 0;
74
75 #define MAX_STATIC_PKT_NUM 8
76 typedef struct bcm_static_pkt {
77         struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM];
78         struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM];
79         struct sk_buff *skb_16k;
80         struct semaphore osl_pkt_sem;
81         unsigned char pkt_use[MAX_STATIC_PKT_NUM*2+1];
82 } bcm_static_pkt_t;
83 static bcm_static_pkt_t *bcm_static_skb = 0;
84 #endif /* DHD_USE_STATIC_BUF */
85
86 typedef struct bcm_mem_link {
87         struct bcm_mem_link *prev;
88         struct bcm_mem_link *next;
89         uint    size;
90         int     line;
91         char    file[BCM_MEM_FILENAME_LEN];
92 } bcm_mem_link_t;
93
94 #if defined(DSLCPE_DELAY_NOT_YET)
95 struct shared_osl {
96         int long_delay;
97         spinlock_t *lock;
98         void *wl;
99         unsigned long MIPS;
100 };
101 #endif
102
103 struct osl_info {
104         osl_pubinfo_t pub;
105 #ifdef CTFPOOL
106         ctfpool_t *ctfpool;
107 #endif /* CTFPOOL */
108         uint magic;
109         void *pdev;
110         atomic_t malloced;
111         uint failed;
112         uint bustype;
113         bcm_mem_link_t *dbgmem_list;
114 };
115
116 /* PCMCIA attribute space access macros */
117
118 /* Global ASSERT type flag */
119 uint32 g_assert_type = FALSE;
120
121 static int16 linuxbcmerrormap[] =
122 {       0,                      /* 0 */
123         -EINVAL,                /* BCME_ERROR */
124         -EINVAL,                /* BCME_BADARG */
125         -EINVAL,                /* BCME_BADOPTION */
126         -EINVAL,                /* BCME_NOTUP */
127         -EINVAL,                /* BCME_NOTDOWN */
128         -EINVAL,                /* BCME_NOTAP */
129         -EINVAL,                /* BCME_NOTSTA */
130         -EINVAL,                /* BCME_BADKEYIDX */
131         -EINVAL,                /* BCME_RADIOOFF */
132         -EINVAL,                /* BCME_NOTBANDLOCKED */
133         -EINVAL,                /* BCME_NOCLK */
134         -EINVAL,                /* BCME_BADRATESET */
135         -EINVAL,                /* BCME_BADBAND */
136         -E2BIG,                 /* BCME_BUFTOOSHORT */
137         -E2BIG,                 /* BCME_BUFTOOLONG */
138         -EBUSY,                 /* BCME_BUSY */
139         -EINVAL,                /* BCME_NOTASSOCIATED */
140         -EINVAL,                /* BCME_BADSSIDLEN */
141         -EINVAL,                /* BCME_OUTOFRANGECHAN */
142         -EINVAL,                /* BCME_BADCHAN */
143         -EFAULT,                /* BCME_BADADDR */
144         -ENOMEM,                /* BCME_NORESOURCE */
145         -EOPNOTSUPP,            /* BCME_UNSUPPORTED */
146         -EMSGSIZE,              /* BCME_BADLENGTH */
147         -EINVAL,                /* BCME_NOTREADY */
148         -EPERM,                 /* BCME_EPERM */
149         -ENOMEM,                /* BCME_NOMEM */
150         -EINVAL,                /* BCME_ASSOCIATED */
151         -ERANGE,                /* BCME_RANGE */
152         -EINVAL,                /* BCME_NOTFOUND */
153         -EINVAL,                /* BCME_WME_NOT_ENABLED */
154         -EINVAL,                /* BCME_TSPEC_NOTFOUND */
155         -EINVAL,                /* BCME_ACM_NOTSUPPORTED */
156         -EINVAL,                /* BCME_NOT_WME_ASSOCIATION */
157         -EIO,                   /* BCME_SDIO_ERROR */
158         -ENODEV,                /* BCME_DONGLE_DOWN */
159         -EINVAL,                /* BCME_VERSION */
160         -EIO,                   /* BCME_TXFAIL */
161         -EIO,                   /* BCME_RXFAIL */
162         -ENODEV,                /* BCME_NODEVICE */
163         -EINVAL,                /* BCME_NMODE_DISABLED */
164         -ENODATA,               /* BCME_NONRESIDENT */
165
166 /* When an new error code is added to bcmutils.h, add os 
167  * specific error translation here as well
168  */
169 /* check if BCME_LAST changed since the last time this function was updated */
170 #if BCME_LAST != -42
171 #error "You need to add a OS error translation in the linuxbcmerrormap \
172         for new error code defined in bcmutils.h"
173 #endif
174 };
175
176 /* translate bcmerrors into linux errors */
177 int
178 osl_error(int bcmerror)
179 {
180         if (bcmerror > 0)
181                 bcmerror = 0;
182         else if (bcmerror < BCME_LAST)
183                 bcmerror = BCME_ERROR;
184
185         /* Array bounds covered by ASSERT in osl_attach */
186         return linuxbcmerrormap[-bcmerror];
187 }
188
189 void * dhd_os_prealloc(int section, unsigned long size);
190 osl_t *
191 osl_attach(void *pdev, uint bustype, bool pkttag)
192 {
193         osl_t *osh;
194
195 //      osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
196         osh = kmalloc(sizeof(osl_t), GFP_KERNEL);
197         ASSERT(osh);
198
199         bzero(osh, sizeof(osl_t));
200
201         /* Check that error map has the right number of entries in it */
202         ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
203
204         osh->magic = OS_HANDLE_MAGIC;
205         atomic_set(&osh->malloced, 0);
206         osh->failed = 0;
207         osh->dbgmem_list = NULL;
208         osh->pdev = pdev;
209         osh->pub.pkttag = pkttag;
210         osh->bustype = bustype;
211
212         switch (bustype) {
213                 case PCI_BUS:
214                 case SI_BUS:
215                 case PCMCIA_BUS:
216                         osh->pub.mmbus = TRUE;
217                         break;
218                 case JTAG_BUS:
219                 case SDIO_BUS:
220                 case USB_BUS:
221                 case SPI_BUS:
222                 case RPC_BUS:
223                         osh->pub.mmbus = FALSE;
224                         break;
225                 default:
226                         ASSERT(FALSE);
227                         break;
228         }
229
230 #ifdef DHD_USE_STATIC_BUF
231         if (!bcm_static_buf) {
232                 if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
233                         STATIC_BUF_TOTAL_LEN))) {
234                         OSL_MSG_ERROR(("can not alloc static buf!\n"));
235                 }
236                 else
237                         OSL_MSG_INFO(("alloc static buf at %x!\n", (unsigned int)bcm_static_buf));
238
239                 init_MUTEX(&bcm_static_buf->static_sem);
240
241                 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
242         }
243
244         if (!bcm_static_skb)
245         {
246                 int i;
247                 void *skb_buff_ptr = 0;
248                 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
249                 skb_buff_ptr = dhd_os_prealloc(4, 0);
250
251                 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*17);
252                 for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
253                         bcm_static_skb->pkt_use[i] = 0;
254
255                 init_MUTEX(&bcm_static_skb->osl_pkt_sem);
256         }
257 #endif /* DHD_USE_STATIC_BUF */
258
259         return osh;
260 }
261
262 void
263 osl_detach(osl_t *osh)
264 {
265         if (osh == NULL)
266                 return;
267
268         ASSERT(osh->magic == OS_HANDLE_MAGIC);
269         kfree(osh);
270 }
271
272 #ifdef CTFPOOL
273 /*
274  * Allocate and add an object to packet pool.
275  */
276 void *
277 osl_ctfpool_add(osl_t *osh)
278 {
279         struct sk_buff *skb;
280
281         if ((osh == NULL) || (osh->ctfpool == NULL))
282                 return NULL;
283
284         spin_lock_bh(&osh->ctfpool->lock);
285         ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
286
287         /* No need to allocate more objects */
288         if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
289                 spin_unlock_bh(&osh->ctfpool->lock);
290                 return NULL;
291         }
292
293         /* Allocate a new skb and add it to the ctfpool */
294         skb = dev_alloc_skb(osh->ctfpool->obj_size);
295         if (skb == NULL) {
296                 OSL_MSG_ERROR(("%s: skb alloc of len %d failed\n", __FUNCTION__,
297                        osh->ctfpool->obj_size));
298                 spin_unlock_bh(&osh->ctfpool->lock);
299                 return NULL;
300         }
301
302         /* Add to ctfpool */
303         skb->next = (struct sk_buff *)osh->ctfpool->head;
304         osh->ctfpool->head = skb;
305         osh->ctfpool->fast_frees++;
306         osh->ctfpool->curr_obj++;
307
308         /* Hijack a skb member to store ptr to ctfpool */
309         CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
310
311         /* Use bit flag to indicate skb from fast ctfpool */
312         PKTFAST(osh, skb) = FASTBUF;
313
314         spin_unlock_bh(&osh->ctfpool->lock);
315
316         return skb;
317 }
318
319 /*
320  * Add new objects to the pool.
321  */
322 void
323 osl_ctfpool_replenish(osl_t *osh, uint thresh)
324 {
325         if ((osh == NULL) || (osh->ctfpool == NULL))
326                 return;
327
328         /* Do nothing if no refills are required */
329         while ((osh->ctfpool->refills > 0) && (thresh--)) {
330                 osl_ctfpool_add(osh);
331                 osh->ctfpool->refills--;
332         }
333 }
334
335 /*
336  * Initialize the packet pool with specified number of objects.
337  */
338 int32
339 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
340 {
341         osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
342         ASSERT(osh->ctfpool);
343         bzero(osh->ctfpool, sizeof(ctfpool_t));
344
345         osh->ctfpool->max_obj = numobj;
346         osh->ctfpool->obj_size = size;
347
348         spin_lock_init(&osh->ctfpool->lock);
349
350         while (numobj--) {
351                 if (!osl_ctfpool_add(osh))
352                         return -1;
353                 osh->ctfpool->fast_frees--;
354         }
355
356         return 0;
357 }
358
359 /*
360  * Cleanup the packet pool objects.
361  */
362 void
363 osl_ctfpool_cleanup(osl_t *osh)
364 {
365         struct sk_buff *skb, *nskb;
366
367         if ((osh == NULL) || (osh->ctfpool == NULL))
368                 return;
369
370         spin_lock_bh(&osh->ctfpool->lock);
371
372         skb = osh->ctfpool->head;
373
374         while (skb != NULL) {
375                 nskb = skb->next;
376                 dev_kfree_skb(skb);
377                 skb = nskb;
378                 osh->ctfpool->curr_obj--;
379         }
380
381         ASSERT(osh->ctfpool->curr_obj == 0);
382         osh->ctfpool->head = NULL;
383         spin_unlock_bh(&osh->ctfpool->lock);
384
385         kfree(osh->ctfpool);
386         osh->ctfpool = NULL;
387 }
388
389 void
390 osl_ctfpool_stats(osl_t *osh, void *b)
391 {
392         struct bcmstrbuf *bb;
393
394         if ((osh == NULL) || (osh->ctfpool == NULL))
395                 return;
396
397 #ifdef DHD_USE_STATIC_BUF
398         if (bcm_static_buf) {
399                 bcm_static_buf = 0;
400         }
401         if (bcm_static_skb) {
402                 bcm_static_skb = 0;
403         }
404 #endif /* DHD_USE_STATIC_BUF */
405
406         bb = b;
407
408         ASSERT((osh != NULL) && (bb != NULL));
409
410         bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
411                     osh->ctfpool->max_obj, osh->ctfpool->obj_size,
412                     osh->ctfpool->curr_obj, osh->ctfpool->refills);
413         bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
414                     osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
415                     osh->ctfpool->slow_allocs);
416 }
417
418 static inline struct sk_buff *
419 osl_pktfastget(osl_t *osh, uint len)
420 {
421         struct sk_buff *skb;
422
423         /* Try to do fast allocate. Return null if ctfpool is not in use
424          * or if there are no items in the ctfpool.
425          */
426         if (osh->ctfpool == NULL)
427                 return NULL;
428
429         spin_lock_bh(&osh->ctfpool->lock);
430         if (osh->ctfpool->head == NULL) {
431                 ASSERT(osh->ctfpool->curr_obj == 0);
432                 osh->ctfpool->slow_allocs++;
433                 spin_unlock_bh(&osh->ctfpool->lock);
434                 return NULL;
435         }
436
437         ASSERT(len <= osh->ctfpool->obj_size);
438
439         /* Get an object from ctfpool */
440         skb = (struct sk_buff *)osh->ctfpool->head;
441         osh->ctfpool->head = (void *)skb->next;
442
443         osh->ctfpool->fast_allocs++;
444         osh->ctfpool->curr_obj--;
445         ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
446         spin_unlock_bh(&osh->ctfpool->lock);
447
448         /* Init skb struct */
449         skb->next = skb->prev = NULL;
450         skb->data = skb->head + 16;
451         skb->tail = skb->head + 16;
452
453         skb->len = 0;
454         skb->cloned = 0;
455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
456         skb->list = NULL;
457 #endif
458         atomic_set(&skb->users, 1);
459
460         return skb;
461 }
462 #endif /* CTFPOOL */
463
464 /* Return a new packet. zero out pkttag */
465 void * BCMFASTPATH
466 osl_pktget(osl_t *osh, uint len)
467 {
468         struct sk_buff *skb;
469
470 #ifdef CTFPOOL
471         /* Allocate from local pool */
472         skb = osl_pktfastget(osh, len);
473         if ((skb != NULL) || ((skb = dev_alloc_skb(len)) != NULL)) {
474 #else /* CTFPOOL */
475         if ((skb = dev_alloc_skb(len))) {
476 #endif /* CTFPOOL */
477                 skb_put(skb, len);
478                 skb->priority = 0;
479
480
481                 osh->pub.pktalloced++;
482         }
483
484         return ((void*) skb);
485 }
486
487 #ifdef CTFPOOL
488 static inline void
489 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
490 {
491         ctfpool_t *ctfpool;
492
493         ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
494         ASSERT(ctfpool != NULL);
495
496         /* Add object to the ctfpool */
497         spin_lock_bh(&ctfpool->lock);
498         skb->next = (struct sk_buff *)ctfpool->head;
499         ctfpool->head = (void *)skb;
500
501         ctfpool->fast_frees++;
502         ctfpool->curr_obj++;
503
504         ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
505         spin_unlock_bh(&ctfpool->lock);
506
507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
508         skb->tstamp.tv.sec = 0;
509 #else
510         skb->stamp.tv_sec = 0;
511 #endif
512
513         /* We only need to init the fields that we change */
514         skb->dev = NULL;
515         skb->dst = NULL;
516         memset(skb->cb, 0, sizeof(skb->cb));
517         skb->ip_summed = 0;
518         skb->destructor = NULL;
519 }
520 #endif /* CTFPOOL */
521
522 /* Free the driver packet. Free the tag if present */
523 void BCMFASTPATH
524 osl_pktfree(osl_t *osh, void *p, bool send)
525 {
526         struct sk_buff *skb, *nskb;
527
528         skb = (struct sk_buff*) p;
529
530         if (send && osh->pub.tx_fn)
531                 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
532
533         /* perversion: we use skb->next to chain multi-skb packets */
534         while (skb) {
535                 nskb = skb->next;
536                 skb->next = NULL;
537
538
539 #ifdef CTFPOOL
540                 if (PKTISFAST(osh, skb))
541                         osl_pktfastfree(osh, skb);
542                 else {
543 #else /* CTFPOOL */
544                 {
545 #endif /* CTFPOOL */
546
547                         if (skb->destructor)
548                                 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
549                                  * destructor exists
550                                  */
551                                 dev_kfree_skb_any(skb);
552                         else
553                                 /* can free immediately (even in_irq()) if destructor
554                                  * does not exist
555                                  */
556                                 dev_kfree_skb(skb);
557                 }
558
559                 osh->pub.pktalloced--;
560
561                 skb = nskb;
562         }
563 }
564
565 #ifdef DHD_USE_STATIC_BUF
566 void*
567 osl_pktget_static(osl_t *osh, uint len)
568 {
569         int i = 0;
570         struct sk_buff *skb;
571
572         if (len > DHD_SKB_4PAGE_BUFSIZE)
573         {
574                 OSL_MSG_ERROR(("osl_pktget_static: Do we really need this big skb?? len=%d\n", len));
575                 return osl_pktget(osh, len);
576         }
577
578         down(&bcm_static_skb->osl_pkt_sem);
579         if (len <= DHD_SKB_1PAGE_BUFSIZE)
580         {
581                 for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
582                 {
583                         if (bcm_static_skb->pkt_use[i] == 0)
584                                 break;
585                 }
586
587                 if (i != MAX_STATIC_PKT_NUM)
588                 {
589                         bcm_static_skb->pkt_use[i] = 1;
590                         up(&bcm_static_skb->osl_pkt_sem);
591
592                         skb = bcm_static_skb->skb_4k[i];
593                         skb->tail = skb->data + len;
594                         skb->len = len;
595
596                         return skb;
597                 }
598         }
599
600         if (len <= DHD_SKB_2PAGE_BUFSIZE) 
601         {
602         for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
603         {
604                 if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
605                         break;
606         }
607
608         if (i != MAX_STATIC_PKT_NUM)
609         {
610                 bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
611                 up(&bcm_static_skb->osl_pkt_sem);
612                 
613                 skb = bcm_static_skb->skb_8k[i];
614                 skb->tail = skb->data + len;
615                 skb->len = len;
616
617                 return skb;
618         }
619         }
620
621         if (bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] == 0) 
622         {
623                 bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] = 1;
624                 up(&bcm_static_skb->osl_pkt_sem);
625
626                 skb = bcm_static_skb->skb_16k;
627                 skb->tail = skb->data + len;
628                 skb->len = len;
629
630                 return skb;
631         }
632
633         up(&bcm_static_skb->osl_pkt_sem);
634         OSL_MSG_ERROR(("osl_pktget_static: all static pkt in use!\n"));
635         return osl_pktget(osh, len);
636 }
637
638
639 void
640 osl_pktfree_static(osl_t *osh, void *p, bool send)
641 {
642         int i;
643
644         for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
645         {
646                 if (p == bcm_static_skb->skb_4k[i])
647                 {
648                         down(&bcm_static_skb->osl_pkt_sem);
649                         bcm_static_skb->pkt_use[i] = 0;
650                         up(&bcm_static_skb->osl_pkt_sem);
651
652                         return;
653                 }
654         }
655         return osl_pktfree(osh, p, send);
656 }
657 #endif /* DHD_USE_STATIC_BUF */
658 uint32
659 osl_pci_read_config(osl_t *osh, uint offset, uint size)
660 {
661         uint val = 0;
662         uint retry = PCI_CFG_RETRY;
663
664         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
665
666         /* only 4byte access supported */
667         ASSERT(size == 4);
668
669         do {
670                 pci_read_config_dword(osh->pdev, offset, &val);
671                 if (val != 0xffffffff)
672                         break;
673         } while (retry--);
674
675
676         return (val);
677 }
678
679 void
680 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
681 {
682         uint retry = PCI_CFG_RETRY;
683
684         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
685
686         /* only 4byte access supported */
687         ASSERT(size == 4);
688
689         do {
690                 pci_write_config_dword(osh->pdev, offset, val);
691                 if (offset != PCI_BAR0_WIN)
692                         break;
693                 if (osl_pci_read_config(osh, offset, size) == val)
694                         break;
695         } while (retry--);
696
697 }
698
699 /* return bus # for the pci device pointed by osh->pdev */
700 uint
701 osl_pci_bus(osl_t *osh)
702 {
703         ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
704
705         return ((struct pci_dev *)osh->pdev)->bus->number;
706 }
707
708 /* return slot # for the pci device pointed by osh->pdev */
709 uint
710 osl_pci_slot(osl_t *osh)
711 {
712         ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
713
714         return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
715 }
716
717 static void
718 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
719 {
720 }
721
722 void
723 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
724 {
725         osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
726 }
727
728 void
729 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
730 {
731         osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
732 }
733
734 void *
735 osl_malloc(osl_t *osh, uint size)
736 {
737         void *addr;
738
739         /* only ASSERT if osh is defined */
740         if (osh)
741                 ASSERT(osh->magic == OS_HANDLE_MAGIC);
742
743 #ifdef DHD_USE_STATIC_BUF
744         if (bcm_static_buf)
745         {
746                 int i = 0;
747                 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
748                 {
749                         down(&bcm_static_buf->static_sem);
750                         
751                         for (i = 0; i < MAX_STATIC_BUF_NUM; i++)
752                         {
753                                 if (bcm_static_buf->buf_use[i] == 0)
754                                         break;
755                         }
756                         
757                         if (i == MAX_STATIC_BUF_NUM)
758                         {
759                                 up(&bcm_static_buf->static_sem);
760                                 OSL_MSG_INFO(("osl_malloc: all static buff in use!\n"));
761                                 goto original;
762                         }
763                         
764                         bcm_static_buf->buf_use[i] = 1;
765                         up(&bcm_static_buf->static_sem);
766
767                         bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
768                         if (osh)
769                                 atomic_add(size, &osh->malloced);
770
771                         return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
772                 }
773         }
774 original:
775 #endif 
776
777         if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
778                 OSL_MSG_ERROR(("osl_malloc: GFP_ATOMIC failed, trying GFP_KERNEL\n"));
779         if ((addr = kmalloc(size, GFP_KERNEL)) == NULL) {
780                         OSL_MSG_ERROR(("osl_malloc: GFP_KERNEL failed also\n"));
781                 if (osh)
782                         osh->failed++;
783                 return (NULL);
784         }
785         }
786         if (osh)
787                 atomic_add(size, &osh->malloced);
788
789         return (addr);
790 }
791
792 void
793 osl_mfree(osl_t *osh, void *addr, uint size)
794 {
795 #ifdef DHD_USE_STATIC_BUF
796         if (bcm_static_buf)
797         {
798                 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
799                         <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
800                 {
801                         int buf_idx = 0;
802                         
803                         buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
804                         
805                         down(&bcm_static_buf->static_sem);
806                         bcm_static_buf->buf_use[buf_idx] = 0;
807                         up(&bcm_static_buf->static_sem);
808
809                         if (osh) {
810                                 ASSERT(osh->magic == OS_HANDLE_MAGIC);
811                                 atomic_sub(size, &osh->malloced);
812                         }
813                         return;
814                 }
815         }
816 #endif 
817         if (osh) {
818                 ASSERT(osh->magic == OS_HANDLE_MAGIC);
819                 atomic_sub(size, &osh->malloced);
820         }
821         kfree(addr);
822 }
823
824 uint
825 osl_malloced(osl_t *osh)
826 {
827         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
828         return (atomic_read(&osh->malloced));
829 }
830
831 uint
832 osl_malloc_failed(osl_t *osh)
833 {
834         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
835         return (osh->failed);
836 }
837
838
839
840 uint
841 osl_dma_consistent_align(void)
842 {
843         return (PAGE_SIZE);
844 }
845
846 void*
847 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
848 {
849         uint16 align = (1 << align_bits);
850         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
851
852         if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
853                 size += align;
854         *alloced = size;
855
856         return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
857 }
858
859 void
860 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
861 {
862         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
863
864         pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
865 }
866
867 uint BCMFASTPATH
868 osl_dma_map(osl_t *osh, void *va, uint size, int direction)
869 {
870         int dir;
871
872         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
873         dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
874         return (pci_map_single(osh->pdev, va, size, dir));
875 }
876
877 void BCMFASTPATH
878 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
879 {
880         int dir;
881
882         ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
883         dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
884         pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
885 }
886
887 #if defined(BCMASSERT_LOG)
888 void
889 osl_assert(char *exp, char *file, int line)
890 {
891         char tempbuf[256];
892         char *basename;
893
894         basename = strrchr(file, '/');
895         /* skip the '/' */
896         if (basename)
897                 basename++;
898
899         if (!basename)
900                 basename = file;
901
902 #ifdef BCMASSERT_LOG
903         snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
904                 exp, basename, line);
905
906         bcm_assert_log(tempbuf);
907 #endif /* BCMASSERT_LOG */
908
909
910 #ifdef __COVERITY__
911         /* Inform Coverity that execution will not continue past this point */
912         __coverity_panic__();
913 #endif
914 }
915 #endif 
916
917 void
918 osl_delay(uint usec)
919 {
920         uint d;
921
922         while (usec > 0) {
923                 d = MIN(usec, 1000);
924                 udelay(d);
925                 usec -= d;
926         }
927 }
928
929
930 /* Clone a packet.
931  * The pkttag contents are NOT cloned.
932  */
933 void *
934 osl_pktdup(osl_t *osh, void *skb)
935 {
936         void * p;
937
938         if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
939                 return NULL;
940
941 #ifdef CTFPOOL
942         if (PKTISFAST(osh, skb)) {
943                 ctfpool_t *ctfpool;
944
945                 /* if the buffer allocated from ctfpool is cloned then
946                  * we can't be sure when it will be freed. since there
947                  * is a chance that we will be losing a buffer
948                  * from our pool, we increment the refill count for the
949                  * object to be alloced later.
950                  */
951                 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
952                 ASSERT(ctfpool != NULL);
953                 PKTCLRFAST(osh, p);
954                 PKTCLRFAST(osh, skb);
955                 ctfpool->refills++;
956         }
957 #endif /* CTFPOOL */
958
959         /* skb_clone copies skb->cb.. we don't want that */
960         if (osh->pub.pkttag)
961                 bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
962
963         /* Increment the packet counter */
964         osh->pub.pktalloced++;
965         return (p);
966 }
967
968
969 /*
970  * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
971  */
972
973 /*
974  * BINOSL selects the slightly slower function-call-based binary compatible osl.
975  */
976
977 /* Linux Kernel: File Operations: start */
978 void *
979 osl_os_open_image(char *filename)
980 {
981         struct file *fp;
982
983         fp = filp_open(filename, O_RDONLY, 0);
984         /*
985          * 2.6.11 (FC4) supports filp_open() but later revs don't?
986          * Alternative:
987          * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
988          * ???
989          */
990          if (IS_ERR(fp))
991                  fp = NULL;
992
993          return fp;
994 }
995
996 int
997 osl_os_get_image_block(char *buf, int len, void *image)
998 {
999         struct file *fp = (struct file *)image;
1000         int rdlen;
1001
1002         if (!image)
1003                 return 0;
1004
1005         rdlen = kernel_read(fp, fp->f_pos, buf, len);
1006         if (rdlen > 0)
1007                 fp->f_pos += rdlen;
1008
1009         return rdlen;
1010 }
1011
1012 void
1013 osl_os_close_image(void *image)
1014 {
1015         if (image)
1016                 filp_close((struct file *)image, NULL);
1017 }
1018 /* Linux Kernel: File Operations: end */