2 * Linux OS Independent Layer
4 * Copyright (C) 1999-2011, Broadcom Corporation
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
20 * Notwithstanding the above, under no circumstances may you combine this
21 * software in any way with any other Broadcom software provided under a license
22 * other than the GPL, without Broadcom's express prior written consent.
24 * $Id: linux_osl.c,v 1.168.2.6 2010-12-22 23:47:31 $
30 #include <bcmendian.h>
35 #include <linux/delay.h>
39 #define OSL_MSG_ERROR(x) printk x
40 #define OSL_MSG_INFO(x)
42 #define OSL_MSG_ERROR(x)
43 #define OSL_MSG_INFO(x)
48 #include <bcm_assert_log.h>
53 #define PCI_CFG_RETRY 10
55 #define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */
56 #define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
58 #ifdef DHD_USE_STATIC_BUF
59 #define DHD_SKB_HDRSIZE 336
60 #define DHD_SKB_1PAGE_BUFSIZE ((PAGE_SIZE*1)-DHD_SKB_HDRSIZE)
61 #define DHD_SKB_2PAGE_BUFSIZE ((PAGE_SIZE*2)-DHD_SKB_HDRSIZE)
62 #define DHD_SKB_4PAGE_BUFSIZE ((PAGE_SIZE*4)-DHD_SKB_HDRSIZE)
64 #define MAX_STATIC_BUF_NUM 16
65 #define STATIC_BUF_SIZE (PAGE_SIZE*2)
66 #define STATIC_BUF_TOTAL_LEN (MAX_STATIC_BUF_NUM*STATIC_BUF_SIZE)
67 typedef struct bcm_static_buf {
68 struct semaphore static_sem;
69 unsigned char *buf_ptr;
70 unsigned char buf_use[MAX_STATIC_BUF_NUM];
73 static bcm_static_buf_t *bcm_static_buf = 0;
75 #define MAX_STATIC_PKT_NUM 8
76 typedef struct bcm_static_pkt {
77 struct sk_buff *skb_4k[MAX_STATIC_PKT_NUM];
78 struct sk_buff *skb_8k[MAX_STATIC_PKT_NUM];
79 struct sk_buff *skb_16k;
80 struct semaphore osl_pkt_sem;
81 unsigned char pkt_use[MAX_STATIC_PKT_NUM*2+1];
83 static bcm_static_pkt_t *bcm_static_skb = 0;
84 #endif /* DHD_USE_STATIC_BUF */
86 typedef struct bcm_mem_link {
87 struct bcm_mem_link *prev;
88 struct bcm_mem_link *next;
91 char file[BCM_MEM_FILENAME_LEN];
94 #if defined(DSLCPE_DELAY_NOT_YET)
113 bcm_mem_link_t *dbgmem_list;
116 /* PCMCIA attribute space access macros */
118 /* Global ASSERT type flag */
119 uint32 g_assert_type = FALSE;
121 static int16 linuxbcmerrormap[] =
123 -EINVAL, /* BCME_ERROR */
124 -EINVAL, /* BCME_BADARG */
125 -EINVAL, /* BCME_BADOPTION */
126 -EINVAL, /* BCME_NOTUP */
127 -EINVAL, /* BCME_NOTDOWN */
128 -EINVAL, /* BCME_NOTAP */
129 -EINVAL, /* BCME_NOTSTA */
130 -EINVAL, /* BCME_BADKEYIDX */
131 -EINVAL, /* BCME_RADIOOFF */
132 -EINVAL, /* BCME_NOTBANDLOCKED */
133 -EINVAL, /* BCME_NOCLK */
134 -EINVAL, /* BCME_BADRATESET */
135 -EINVAL, /* BCME_BADBAND */
136 -E2BIG, /* BCME_BUFTOOSHORT */
137 -E2BIG, /* BCME_BUFTOOLONG */
138 -EBUSY, /* BCME_BUSY */
139 -EINVAL, /* BCME_NOTASSOCIATED */
140 -EINVAL, /* BCME_BADSSIDLEN */
141 -EINVAL, /* BCME_OUTOFRANGECHAN */
142 -EINVAL, /* BCME_BADCHAN */
143 -EFAULT, /* BCME_BADADDR */
144 -ENOMEM, /* BCME_NORESOURCE */
145 -EOPNOTSUPP, /* BCME_UNSUPPORTED */
146 -EMSGSIZE, /* BCME_BADLENGTH */
147 -EINVAL, /* BCME_NOTREADY */
148 -EPERM, /* BCME_EPERM */
149 -ENOMEM, /* BCME_NOMEM */
150 -EINVAL, /* BCME_ASSOCIATED */
151 -ERANGE, /* BCME_RANGE */
152 -EINVAL, /* BCME_NOTFOUND */
153 -EINVAL, /* BCME_WME_NOT_ENABLED */
154 -EINVAL, /* BCME_TSPEC_NOTFOUND */
155 -EINVAL, /* BCME_ACM_NOTSUPPORTED */
156 -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
157 -EIO, /* BCME_SDIO_ERROR */
158 -ENODEV, /* BCME_DONGLE_DOWN */
159 -EINVAL, /* BCME_VERSION */
160 -EIO, /* BCME_TXFAIL */
161 -EIO, /* BCME_RXFAIL */
162 -ENODEV, /* BCME_NODEVICE */
163 -EINVAL, /* BCME_NMODE_DISABLED */
164 -ENODATA, /* BCME_NONRESIDENT */
166 /* When an new error code is added to bcmutils.h, add os
167 * specific error translation here as well
169 /* check if BCME_LAST changed since the last time this function was updated */
171 #error "You need to add a OS error translation in the linuxbcmerrormap \
172 for new error code defined in bcmutils.h"
176 /* translate bcmerrors into linux errors */
178 osl_error(int bcmerror)
182 else if (bcmerror < BCME_LAST)
183 bcmerror = BCME_ERROR;
185 /* Array bounds covered by ASSERT in osl_attach */
186 return linuxbcmerrormap[-bcmerror];
189 void * dhd_os_prealloc(int section, unsigned long size);
191 osl_attach(void *pdev, uint bustype, bool pkttag)
195 // osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
196 osh = kmalloc(sizeof(osl_t), GFP_KERNEL);
199 bzero(osh, sizeof(osl_t));
201 /* Check that error map has the right number of entries in it */
202 ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
204 osh->magic = OS_HANDLE_MAGIC;
205 atomic_set(&osh->malloced, 0);
207 osh->dbgmem_list = NULL;
209 osh->pub.pkttag = pkttag;
210 osh->bustype = bustype;
216 osh->pub.mmbus = TRUE;
223 osh->pub.mmbus = FALSE;
230 #ifdef DHD_USE_STATIC_BUF
231 if (!bcm_static_buf) {
232 if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(3, STATIC_BUF_SIZE+
233 STATIC_BUF_TOTAL_LEN))) {
234 OSL_MSG_ERROR(("can not alloc static buf!\n"));
237 OSL_MSG_INFO(("alloc static buf at %x!\n", (unsigned int)bcm_static_buf));
239 init_MUTEX(&bcm_static_buf->static_sem);
241 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
247 void *skb_buff_ptr = 0;
248 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
249 skb_buff_ptr = dhd_os_prealloc(4, 0);
251 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*17);
252 for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
253 bcm_static_skb->pkt_use[i] = 0;
255 init_MUTEX(&bcm_static_skb->osl_pkt_sem);
257 #endif /* DHD_USE_STATIC_BUF */
263 osl_detach(osl_t *osh)
268 ASSERT(osh->magic == OS_HANDLE_MAGIC);
274 * Allocate and add an object to packet pool.
277 osl_ctfpool_add(osl_t *osh)
281 if ((osh == NULL) || (osh->ctfpool == NULL))
284 spin_lock_bh(&osh->ctfpool->lock);
285 ASSERT(osh->ctfpool->curr_obj <= osh->ctfpool->max_obj);
287 /* No need to allocate more objects */
288 if (osh->ctfpool->curr_obj == osh->ctfpool->max_obj) {
289 spin_unlock_bh(&osh->ctfpool->lock);
293 /* Allocate a new skb and add it to the ctfpool */
294 skb = dev_alloc_skb(osh->ctfpool->obj_size);
296 OSL_MSG_ERROR(("%s: skb alloc of len %d failed\n", __FUNCTION__,
297 osh->ctfpool->obj_size));
298 spin_unlock_bh(&osh->ctfpool->lock);
303 skb->next = (struct sk_buff *)osh->ctfpool->head;
304 osh->ctfpool->head = skb;
305 osh->ctfpool->fast_frees++;
306 osh->ctfpool->curr_obj++;
308 /* Hijack a skb member to store ptr to ctfpool */
309 CTFPOOLPTR(osh, skb) = (void *)osh->ctfpool;
311 /* Use bit flag to indicate skb from fast ctfpool */
312 PKTFAST(osh, skb) = FASTBUF;
314 spin_unlock_bh(&osh->ctfpool->lock);
320 * Add new objects to the pool.
323 osl_ctfpool_replenish(osl_t *osh, uint thresh)
325 if ((osh == NULL) || (osh->ctfpool == NULL))
328 /* Do nothing if no refills are required */
329 while ((osh->ctfpool->refills > 0) && (thresh--)) {
330 osl_ctfpool_add(osh);
331 osh->ctfpool->refills--;
336 * Initialize the packet pool with specified number of objects.
339 osl_ctfpool_init(osl_t *osh, uint numobj, uint size)
341 osh->ctfpool = kmalloc(sizeof(ctfpool_t), GFP_ATOMIC);
342 ASSERT(osh->ctfpool);
343 bzero(osh->ctfpool, sizeof(ctfpool_t));
345 osh->ctfpool->max_obj = numobj;
346 osh->ctfpool->obj_size = size;
348 spin_lock_init(&osh->ctfpool->lock);
351 if (!osl_ctfpool_add(osh))
353 osh->ctfpool->fast_frees--;
360 * Cleanup the packet pool objects.
363 osl_ctfpool_cleanup(osl_t *osh)
365 struct sk_buff *skb, *nskb;
367 if ((osh == NULL) || (osh->ctfpool == NULL))
370 spin_lock_bh(&osh->ctfpool->lock);
372 skb = osh->ctfpool->head;
374 while (skb != NULL) {
378 osh->ctfpool->curr_obj--;
381 ASSERT(osh->ctfpool->curr_obj == 0);
382 osh->ctfpool->head = NULL;
383 spin_unlock_bh(&osh->ctfpool->lock);
390 osl_ctfpool_stats(osl_t *osh, void *b)
392 struct bcmstrbuf *bb;
394 if ((osh == NULL) || (osh->ctfpool == NULL))
397 #ifdef DHD_USE_STATIC_BUF
398 if (bcm_static_buf) {
401 if (bcm_static_skb) {
404 #endif /* DHD_USE_STATIC_BUF */
408 ASSERT((osh != NULL) && (bb != NULL));
410 bcm_bprintf(bb, "max_obj %d obj_size %d curr_obj %d refills %d\n",
411 osh->ctfpool->max_obj, osh->ctfpool->obj_size,
412 osh->ctfpool->curr_obj, osh->ctfpool->refills);
413 bcm_bprintf(bb, "fast_allocs %d fast_frees %d slow_allocs %d\n",
414 osh->ctfpool->fast_allocs, osh->ctfpool->fast_frees,
415 osh->ctfpool->slow_allocs);
418 static inline struct sk_buff *
419 osl_pktfastget(osl_t *osh, uint len)
423 /* Try to do fast allocate. Return null if ctfpool is not in use
424 * or if there are no items in the ctfpool.
426 if (osh->ctfpool == NULL)
429 spin_lock_bh(&osh->ctfpool->lock);
430 if (osh->ctfpool->head == NULL) {
431 ASSERT(osh->ctfpool->curr_obj == 0);
432 osh->ctfpool->slow_allocs++;
433 spin_unlock_bh(&osh->ctfpool->lock);
437 ASSERT(len <= osh->ctfpool->obj_size);
439 /* Get an object from ctfpool */
440 skb = (struct sk_buff *)osh->ctfpool->head;
441 osh->ctfpool->head = (void *)skb->next;
443 osh->ctfpool->fast_allocs++;
444 osh->ctfpool->curr_obj--;
445 ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head);
446 spin_unlock_bh(&osh->ctfpool->lock);
448 /* Init skb struct */
449 skb->next = skb->prev = NULL;
450 skb->data = skb->head + 16;
451 skb->tail = skb->head + 16;
455 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
458 atomic_set(&skb->users, 1);
464 /* Return a new packet. zero out pkttag */
466 osl_pktget(osl_t *osh, uint len)
471 /* Allocate from local pool */
472 skb = osl_pktfastget(osh, len);
473 if ((skb != NULL) || ((skb = dev_alloc_skb(len)) != NULL)) {
475 if ((skb = dev_alloc_skb(len))) {
481 osh->pub.pktalloced++;
484 return ((void*) skb);
489 osl_pktfastfree(osl_t *osh, struct sk_buff *skb)
493 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
494 ASSERT(ctfpool != NULL);
496 /* Add object to the ctfpool */
497 spin_lock_bh(&ctfpool->lock);
498 skb->next = (struct sk_buff *)ctfpool->head;
499 ctfpool->head = (void *)skb;
501 ctfpool->fast_frees++;
504 ASSERT(ctfpool->curr_obj <= ctfpool->max_obj);
505 spin_unlock_bh(&ctfpool->lock);
507 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
508 skb->tstamp.tv.sec = 0;
510 skb->stamp.tv_sec = 0;
513 /* We only need to init the fields that we change */
516 memset(skb->cb, 0, sizeof(skb->cb));
518 skb->destructor = NULL;
522 /* Free the driver packet. Free the tag if present */
524 osl_pktfree(osl_t *osh, void *p, bool send)
526 struct sk_buff *skb, *nskb;
528 skb = (struct sk_buff*) p;
530 if (send && osh->pub.tx_fn)
531 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
533 /* perversion: we use skb->next to chain multi-skb packets */
540 if (PKTISFAST(osh, skb))
541 osl_pktfastfree(osh, skb);
548 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
551 dev_kfree_skb_any(skb);
553 /* can free immediately (even in_irq()) if destructor
559 osh->pub.pktalloced--;
565 #ifdef DHD_USE_STATIC_BUF
567 osl_pktget_static(osl_t *osh, uint len)
572 if (len > DHD_SKB_4PAGE_BUFSIZE)
574 OSL_MSG_ERROR(("osl_pktget_static: Do we really need this big skb?? len=%d\n", len));
575 return osl_pktget(osh, len);
578 down(&bcm_static_skb->osl_pkt_sem);
579 if (len <= DHD_SKB_1PAGE_BUFSIZE)
581 for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
583 if (bcm_static_skb->pkt_use[i] == 0)
587 if (i != MAX_STATIC_PKT_NUM)
589 bcm_static_skb->pkt_use[i] = 1;
590 up(&bcm_static_skb->osl_pkt_sem);
592 skb = bcm_static_skb->skb_4k[i];
593 skb->tail = skb->data + len;
600 if (len <= DHD_SKB_2PAGE_BUFSIZE)
602 for (i = 0; i < MAX_STATIC_PKT_NUM; i++)
604 if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0)
608 if (i != MAX_STATIC_PKT_NUM)
610 bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1;
611 up(&bcm_static_skb->osl_pkt_sem);
613 skb = bcm_static_skb->skb_8k[i];
614 skb->tail = skb->data + len;
621 if (bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] == 0)
623 bcm_static_skb->pkt_use[MAX_STATIC_PKT_NUM*2] = 1;
624 up(&bcm_static_skb->osl_pkt_sem);
626 skb = bcm_static_skb->skb_16k;
627 skb->tail = skb->data + len;
633 up(&bcm_static_skb->osl_pkt_sem);
634 OSL_MSG_ERROR(("osl_pktget_static: all static pkt in use!\n"));
635 return osl_pktget(osh, len);
640 osl_pktfree_static(osl_t *osh, void *p, bool send)
644 for (i = 0; i < MAX_STATIC_PKT_NUM*2+1; i++)
646 if (p == bcm_static_skb->skb_4k[i])
648 down(&bcm_static_skb->osl_pkt_sem);
649 bcm_static_skb->pkt_use[i] = 0;
650 up(&bcm_static_skb->osl_pkt_sem);
655 return osl_pktfree(osh, p, send);
657 #endif /* DHD_USE_STATIC_BUF */
659 osl_pci_read_config(osl_t *osh, uint offset, uint size)
662 uint retry = PCI_CFG_RETRY;
664 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
666 /* only 4byte access supported */
670 pci_read_config_dword(osh->pdev, offset, &val);
671 if (val != 0xffffffff)
680 osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val)
682 uint retry = PCI_CFG_RETRY;
684 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
686 /* only 4byte access supported */
690 pci_write_config_dword(osh->pdev, offset, val);
691 if (offset != PCI_BAR0_WIN)
693 if (osl_pci_read_config(osh, offset, size) == val)
699 /* return bus # for the pci device pointed by osh->pdev */
701 osl_pci_bus(osl_t *osh)
703 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
705 return ((struct pci_dev *)osh->pdev)->bus->number;
708 /* return slot # for the pci device pointed by osh->pdev */
710 osl_pci_slot(osl_t *osh)
712 ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev);
714 return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn);
718 osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write)
723 osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size)
725 osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE);
729 osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size)
731 osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE);
735 osl_malloc(osl_t *osh, uint size)
739 /* only ASSERT if osh is defined */
741 ASSERT(osh->magic == OS_HANDLE_MAGIC);
743 #ifdef DHD_USE_STATIC_BUF
747 if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE))
749 down(&bcm_static_buf->static_sem);
751 for (i = 0; i < MAX_STATIC_BUF_NUM; i++)
753 if (bcm_static_buf->buf_use[i] == 0)
757 if (i == MAX_STATIC_BUF_NUM)
759 up(&bcm_static_buf->static_sem);
760 OSL_MSG_INFO(("osl_malloc: all static buff in use!\n"));
764 bcm_static_buf->buf_use[i] = 1;
765 up(&bcm_static_buf->static_sem);
767 bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size);
769 atomic_add(size, &osh->malloced);
771 return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i));
777 if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
778 OSL_MSG_ERROR(("osl_malloc: GFP_ATOMIC failed, trying GFP_KERNEL\n"));
779 if ((addr = kmalloc(size, GFP_KERNEL)) == NULL) {
780 OSL_MSG_ERROR(("osl_malloc: GFP_KERNEL failed also\n"));
787 atomic_add(size, &osh->malloced);
793 osl_mfree(osl_t *osh, void *addr, uint size)
795 #ifdef DHD_USE_STATIC_BUF
798 if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr
799 <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN)))
803 buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE;
805 down(&bcm_static_buf->static_sem);
806 bcm_static_buf->buf_use[buf_idx] = 0;
807 up(&bcm_static_buf->static_sem);
810 ASSERT(osh->magic == OS_HANDLE_MAGIC);
811 atomic_sub(size, &osh->malloced);
818 ASSERT(osh->magic == OS_HANDLE_MAGIC);
819 atomic_sub(size, &osh->malloced);
825 osl_malloced(osl_t *osh)
827 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
828 return (atomic_read(&osh->malloced));
832 osl_malloc_failed(osl_t *osh)
834 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
835 return (osh->failed);
841 osl_dma_consistent_align(void)
847 osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap)
849 uint16 align = (1 << align_bits);
850 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
852 if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align))
856 return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
860 osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
862 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
864 pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
868 osl_dma_map(osl_t *osh, void *va, uint size, int direction)
872 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
873 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
874 return (pci_map_single(osh->pdev, va, size, dir));
878 osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction)
882 ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
883 dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
884 pci_unmap_single(osh->pdev, (uint32)pa, size, dir);
887 #if defined(BCMASSERT_LOG)
889 osl_assert(char *exp, char *file, int line)
894 basename = strrchr(file, '/');
903 snprintf(tempbuf, 64, "\"%s\": file \"%s\", line %d\n",
904 exp, basename, line);
906 bcm_assert_log(tempbuf);
907 #endif /* BCMASSERT_LOG */
911 /* Inform Coverity that execution will not continue past this point */
912 __coverity_panic__();
931 * The pkttag contents are NOT cloned.
934 osl_pktdup(osl_t *osh, void *skb)
938 if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
942 if (PKTISFAST(osh, skb)) {
945 /* if the buffer allocated from ctfpool is cloned then
946 * we can't be sure when it will be freed. since there
947 * is a chance that we will be losing a buffer
948 * from our pool, we increment the refill count for the
949 * object to be alloced later.
951 ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb);
952 ASSERT(ctfpool != NULL);
954 PKTCLRFAST(osh, skb);
959 /* skb_clone copies skb->cb.. we don't want that */
961 bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
963 /* Increment the packet counter */
964 osh->pub.pktalloced++;
970 * OSLREGOPS specifies the use of osl_XXX routines to be used for register access
974 * BINOSL selects the slightly slower function-call-based binary compatible osl.
977 /* Linux Kernel: File Operations: start */
979 osl_os_open_image(char *filename)
983 fp = filp_open(filename, O_RDONLY, 0);
985 * 2.6.11 (FC4) supports filp_open() but later revs don't?
987 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
997 osl_os_get_image_block(char *buf, int len, void *image)
999 struct file *fp = (struct file *)image;
1005 rdlen = kernel_read(fp, fp->f_pos, buf, len);
1013 osl_os_close_image(void *image)
1016 filp_close((struct file *)image, NULL);
1018 /* Linux Kernel: File Operations: end */