#include "max_buf.h"
#include "debug_scsc.h"
-/**
- * mbuf_panic
- * @mbuf: buffer
- * @sz: size
- * @addr: address
- * @msg: mbuf_over_panic or mbuf_under_panic
- *
- * Out-of-line support for mbuf_put() and mbuf_push().
- * Called via the wrapper mbuf_over_panic() or mbuf_under_panic().
- */
-static void mbuf_panic(struct max_buff *mbuf, unsigned int sz, void *addr, const char msg[])
-{
- SLSI_WARN_NODEV("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx \n", msg, addr, mbuf->len, sz, mbuf->head, mbuf->data, (unsigned long)mbuf->tail, (unsigned long)mbuf->end);
-}
-
-static void mbuf_over_panic(struct max_buff *mbuf, unsigned int sz, void *addr)
-{
- mbuf_panic(mbuf, sz, addr, __func__);
-}
-
-static void mbuf_under_panic(struct max_buff *mbuf, unsigned int sz, void *addr)
-{
- mbuf_panic(mbuf, sz, addr, __func__);
-}
-
+/* This function resets all the members of the max_buff */
void mbuf_reset(struct max_buff *mbuf)
{
unsigned char *head;
memset(mbuf, 0, sizeof(struct max_buff));
mbuf->head = mbuf->data = mbuf->tail = head;
mbuf->end = end;
- mbuf->mac_header = (typeof(mbuf->mac_header)) ~ 0U;
}
-/**
- * __alloc_mbuf - allocate a maxwell buffer
- * @size: size to allocate
- *
- * Allocate a new &max_buff. The returned buffer has no headroom and a
- * tail room of at least size bytes with reference count of one.
- *
- */
-struct max_buff *__alloc_mbuf(unsigned int size)
+/* This function allocates a max_buff. This function retuns NULL if the memory allocation fails. */
+struct max_buff *alloc_mbuf(unsigned int size)
{
struct max_buff *mbuf = NULL;
u8 *data;
- /* Get the HEAD */
+ /* Allocate the max_buff structure */
mbuf = kmm_malloc(sizeof(struct max_buff));
if (!mbuf) {
goto out;
}
+ /* Allocate the data for max_buff */
data = kmm_malloc(size);
-
if (!data) {
- goto nodata;
+ kmm_free(mbuf);
+ mbuf = NULL;
+ goto out;
}
memset(mbuf, 0, sizeof(struct max_buff));
mbuf->data = data;
mbuf->tail = mbuf->data;
mbuf->end = mbuf->tail + size;
- mbuf->mac_header = (typeof(mbuf->mac_header)) ~ 0U;
-
out:
return mbuf;
-nodata:
- kmm_free(mbuf);
- mbuf = NULL;
- goto out;
}
-/**
- * kfree_mbuf - free an max_buff
- * @mbuf: buffer to free
- *
- * Drop a reference to the buffer and free it if the usage count has
- * hit zero.
- */
-void kfree_mbuf(struct max_buff *mbuf)
+/* This function releases memory allocated for the max_buff */
+void free_mbuf(struct max_buff *mbuf)
{
if (!mbuf) {
return;
}
/**
- * mbuf_put - add data to a buffer
- * @mbuf: buffer pointer
- * @len: lenght of data to add
- *
- * This function writes the data at the end of the data and updates length appropriately.
- * If it exceeds the total buffer size then panic will be raised. A pointer to the
- * first byte of the extra data is returned.
+ * This function moves the tail pointer and increases the length of the max_buff
+ * by given number of bytes.
+ * If it exceeds the total buffer size then error message will be printed.
+ * A pointer to the first byte of the extra data is returned.
*/
unsigned char *mbuf_put(struct max_buff *mbuf, unsigned int len)
{
unsigned char *tmp = mbuf->tail;
mbuf->tail += len;
- mbuf->len += len;
+ mbuf->len += len;
if (mbuf->tail > mbuf->end) {
- mbuf_over_panic(mbuf, len, __builtin_return_address(0));
+ SLSI_WARN_NODEV("%s: over_panic: mbuf->len:%d len:%d head:%p data:%p tail:%#lx end:%#lx\n",
+ mbuf->len, len, mbuf->head, mbuf->data,
+ (unsigned long)mbuf->tail, (unsigned long)mbuf->end);
}
+
return tmp;
}
/**
- * mbuf_push - add data at the start of a buffer
- * @mbuf: buffer pointer
- * @len: length of data to add
- *
- * This function moves the data area of the buffer at the buffer
- * start. If this would exceed the total headroom then panic will be raised.
- * A pointer to the first byte of the extra data is returned.
+ * This function moves the data area of the buffer at the start of the buffer.
+ * If it exceeds the total headroom then error message will be printed.
+ * A pointer to the first byte of the extra data is returned.
*/
unsigned char *mbuf_push(struct max_buff *mbuf, unsigned int len)
{
mbuf->data -= len;
- mbuf->len += len;
+ mbuf->len += len;
if (mbuf->data < mbuf->head) {
- mbuf_under_panic(mbuf, len, __builtin_return_address(0));
+ SLSI_WARN_NODEV("%s: under_panic: mbuf->len:%d len:%d head:%p data:%p tail:%#lx end:%#lx\n",
+ mbuf->len, len, mbuf->head, mbuf->data,
+ (unsigned long)mbuf->tail, (unsigned long)mbuf->end);
}
+
return mbuf->data;
}
/**
- * mbuf_pull - remove data from the start of a buffer
- * @mbuf: buffer pointer
- * @len: amount of data to remove
- *
- * This function removes data from the start of a buffer, returning
- * the memory to the headroom. A pointer to the next data in the buffer
- * is returned. Once the data has been pulled future pushes will overwrite
- * the old data.
+ * This function removes data from the start of a buffer.
+ * A pointer to the next data in the buffer is returned.
*/
unsigned char *mbuf_pull(struct max_buff *mbuf, unsigned int len)
{
- return mbuf_pull_inline(mbuf, len);
+ if (len > mbuf->len)
+ return NULL;
+
+ mbuf->len -= len;
+
+ return mbuf->data += len;
}
/**
- * mbuf_copy - create private copy of an max_buff
- * @mbuf: buffer to copy
- * @gfp_mask: allocation priority
- *
- * Make a copy of both an &max_buff and its data. This is used when the
- * caller wishes to modify the data and needs a private copy of the
- * data to alter. Returns %NULL on failure or the pointer to the buffer
- * on success. The returned buffer has a reference count of 1.
- *
- * As by-product this function converts non-linear &max_buff to linear
- * one, so that &max_buff becomes completely private and caller is allowed
- * to modify all the data of returned buffer. This means that this
- * function is not recommended for use in circumstances when only
- * header is going to be modified. Use pmbuf_copy() instead.
+ * This function copies the max_buff header and it's data
+ * Returns NULL if memory allocation fails else pointer to new max_buff
*/
struct max_buff *mbuf_copy(const struct max_buff *mbuf)
{
- int headerlen = mbuf_headroom(mbuf);
- unsigned int size = mbuf_end_offset(mbuf) + mbuf->data_len;
- struct max_buff *n = __alloc_mbuf(size);
+ unsigned int mbuf_size = mbuf->end - mbuf->head;
+ struct max_buff *new_mbuf = alloc_mbuf(mbuf_size);
- if (!n) {
+ if (!new_mbuf) {
return NULL;
}
/* Set the data pointer */
- mbuf_reserve(n, headerlen);
- /* Set the tail pointer and length */
- mbuf_put(n, mbuf->len);
-
- return n;
-}
+ mbuf_reserve(new_mbuf, mbuf_headroom(mbuf));
-/**
- * __mbuf_queue_after - queue a buffer at the list head
- * @list: list to use
- * @prev: place after this buffer
- * @newsk: buffer to queue
- *
- * Queue a buffer int the middle of a list. This function takes no locks
- * and you must therefore hold required locks before calling it.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-static inline void __mbuf_queue_after(struct max_buff_head *list, struct max_buff *prev, struct max_buff *newsk)
-{
- __mbuf_insert(newsk, prev, prev->next, list);
-}
+ /* Set the tail pointer and length */
+ mbuf_put(new_mbuf, mbuf->len);
-static inline void __mbuf_queue_before(struct max_buff_head *list, struct max_buff *next, struct max_buff *newsk)
-{
- __mbuf_insert(newsk, next->prev, next, list);
-}
+ /* Copy the max_buff header */
+ memcpy(&new_mbuf->cb, &mbuf->cb, sizeof(struct slsi_mbuf_cb));
+ new_mbuf->queue_mapping = mbuf->queue_mapping;
+ new_mbuf->priority = mbuf->priority;
+ new_mbuf->protocol = mbuf->protocol;
+ new_mbuf->mac_header = mbuf->mac_header;
-static inline struct max_buff *__mbuf_dequeue(struct max_buff_head *list)
-{
- struct max_buff *mbuf = mbuf_peek(list);
+ /* Copy the mbuf->data */
+ memcpy(new_mbuf->data, mbuf->data, mbuf->len);
- if (mbuf) {
- __mbuf_unlink(mbuf, list);
- }
- return mbuf;
+ return new_mbuf;
}
/**
- * mbuf_dequeue - remove from the head of the queue
- * @list: list to dequeue from
- *
- * Remove the head of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The head item is
- * returned or %NULL if the list is empty.
+ * This function returns the mbuf that is at the head of the list. This operation
+ * is protected by list->lock. This function returns NULL if the list is empty.
*/
struct max_buff *mbuf_dequeue(struct max_buff_head *list)
{
- struct max_buff *result;
+ struct max_buff *mbuf;
SLSI_MUTEX_LOCK(list->lock);
- result = __mbuf_dequeue(list);
- SLSI_MUTEX_UNLOCK(list->lock);
- return result;
-}
-
-/**
- * __mbuf_dequeue_tail - remove from the tail of the queue
- * @list: list to dequeue from
- *
- * Remove the tail of the list. This function does not take any locks
- * so must be used with appropriate locks held only. The tail item is
- * returned or %NULL if the list is empty.
- */
-static inline struct max_buff *__mbuf_dequeue_tail(struct max_buff_head *list)
-{
- struct max_buff *mbuf = mbuf_peek_tail(list);
+ mbuf = list->next;
+ if (mbuf == (struct max_buff *)list)
+ mbuf = NULL;
if (mbuf) {
- __mbuf_unlink(mbuf, list);
+ mbuf_unlink(mbuf, list);
}
- return mbuf;
-}
-
-/**
- * mbuf_dequeue_tail - remove from the tail of the queue
- * @list: list to dequeue from
- *
- * Remove the tail of the list. The list lock is taken so the function
- * may be used safely with other locking list functions. The tail item is
- * returned or %NULL if the list is empty.
- */
-struct max_buff *mbuf_dequeue_tail(struct max_buff_head *list)
-{
- struct max_buff *result;
-
- SLSI_MUTEX_LOCK(list->lock);
- result = __mbuf_dequeue_tail(list);
SLSI_MUTEX_UNLOCK(list->lock);
- return result;
+
+ return mbuf;
}
-/**
- * mbuf_queue_purge - empty a list
- * @list: list to empty
- *
- * Delete all buffers on an &max_buff list. Each buffer is removed from
- * the list and one reference dropped. This function takes the list
- * lock and is atomic with respect to other list locking functions.
- */
+/* Delete all buffers on an max_buff_head list. This operation is protected by list->lock */
void mbuf_queue_purge(struct max_buff_head *list)
{
struct max_buff *mbuf;
- while ((mbuf = mbuf_dequeue(list)) != NULL) {
- kfree_mbuf(mbuf);
+ mbuf = mbuf_dequeue(list);
+ while (mbuf != NULL) {
+ free_mbuf(mbuf);
+ mbuf = mbuf_dequeue(list);
}
}
-/**
- * mbuf_queue_head - queue a buffer at the list head
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the start of the list. This function takes the
- * list lock and can be used safely with other locking &max_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-void mbuf_queue_head(struct max_buff_head *list, struct max_buff *newsk)
+/* Queue a buffer at the start of the list */
+void mbuf_queue_head(struct max_buff_head *list, struct max_buff *mbuf)
{
SLSI_MUTEX_LOCK(list->lock);
- __mbuf_queue_after(list, (struct max_buff *)list, newsk);
+ mbuf_insert(mbuf, (struct max_buff *)list, list->next, list);
SLSI_MUTEX_UNLOCK(list->lock);
}
-/**
- * mbuf_queue_tail - queue a buffer at the list tail
- * @list: list to use
- * @newsk: buffer to queue
- *
- * Queue a buffer at the tail of the list. This function takes the
- * list lock and can be used safely with other locking &max_buff functions
- * safely.
- *
- * A buffer cannot be placed on two lists at the same time.
- */
-void mbuf_queue_tail(struct max_buff_head *list, struct max_buff *newsk)
+/* Queue a buffer at the tail of the list. */
+void mbuf_queue_tail(struct max_buff_head *list, struct max_buff *mbuf)
{
SLSI_MUTEX_LOCK(list->lock);
- __mbuf_queue_before(list, (struct max_buff *)list, newsk);
+ mbuf_insert(mbuf, list->prev, (struct max_buff *)list, list);
SLSI_MUTEX_UNLOCK(list->lock);
}
struct max_buff *next;
struct max_buff *prev;
- __u32 qlen;
+ unsigned int queue_len;
pthread_mutex_t lock;
};
-struct max_buff;
-
/* Maximum number of packets to store in RX data mbuf queue */
#define SCSC_MAX_RX_MBUF_QUEUE_LEN (32)
#define SLSI_HIP_WORK_QID LPWORK
/**
- * struct max_buff - socket buffer
- * @next: Next maxwell buffer in list
- * @prev: Previous maxwell buffer in list
- * @cb: Control buffer. FAPI signal details are placed in this
- * @len: Length of actual data
- * @data_len: Data length
- * @queue_mapping: Queue mapping for multiqueue devices
- * @priority: Packet queueing priority
- * @protocol: Packet protocol from driver
- * @mac_header: Link layer header
- * @tail: Tail pointer of data
- * @end: End pointer of max_buff
- * @head: Head of max_buff buffer
- * @data: Data head pointer
+ * struct max_buff
+ * next: Next max_buff in the list
+ * prev: Previous max_buff in the list
+ * head: Head of max_buff buffer
+ * data: Data pointer of max_buff
+ * tail: Tail pointer of max_buff
+ * end: End pointer of max_buff
+ * len: Length of actual data in max_buff
+ * cb: FAPI signal details are placed in this
+ * queue_mapping: Queue mapping for for the max_buff
+ * mac_header: Starting offset of the MAC header
+ * priority: TID priority of max_buff
+ * protocol: Ether protocol of the max_buff
*/
-
struct max_buff {
- struct max_buff *next;
- struct max_buff *prev;
+ struct max_buff *next;
+ struct max_buff *prev;
+ unsigned char *head;
+ unsigned char *data;
+ unsigned char *tail;
+ unsigned char *end;
+ unsigned int len;
struct slsi_mbuf_cb cb;
- unsigned int len, data_len;
- __u16 queue_mapping;
- __u16 priority;
- __be16 protocol;
- __u16 mac_header;
- unsigned char *head;
- unsigned char *data;
- unsigned char *tail;
- unsigned char *end;
+ unsigned short queue_mapping;
+ unsigned short mac_header;
+ unsigned short priority;
+ unsigned short protocol;
};
struct slsi_mbuf_work {
unsigned char *mbuf_put(struct max_buff *mbuf, unsigned int len);
unsigned char *mbuf_push(struct max_buff *mbuf, unsigned int len);
unsigned char *mbuf_pull(struct max_buff *mbuf, unsigned int len);
-static inline unsigned int mbuf_headroom(const struct max_buff *mbuf);
-struct max_buff *__alloc_mbuf(unsigned int size);
+struct max_buff *alloc_mbuf(unsigned int size);
struct max_buff *mbuf_dequeue(struct max_buff_head *list);
-void mbuf_queue_head(struct max_buff_head *list, struct max_buff *newsk);
+void mbuf_queue_head(struct max_buff_head *list, struct max_buff *mbuf);
void mbuf_queue_purge(struct max_buff_head *list);
-void kfree_mbuf(struct max_buff *mbuf);
-void mbuf_queue_tail(struct max_buff_head *list, struct max_buff *newsk);
+void free_mbuf(struct max_buff *mbuf);
+void mbuf_queue_tail(struct max_buff_head *list, struct max_buff *mbuf);
void mbuf_reset(struct max_buff *mbuf);
static inline struct slsi_mbuf_cb *slsi_mbuf_cb_get(struct max_buff *mbuf)
return slsi_mbuf_cb_get(mbuf);
}
-/*
- * Insert an max_buff on a list.
- *
- */
-static inline void __mbuf_insert(struct max_buff *newsk, struct max_buff *prev, struct max_buff *next, struct max_buff_head *list)
+/* Insert an max_buff on a list */
+static inline void mbuf_insert(struct max_buff *mbuf, struct max_buff *prev,
+ struct max_buff *next, struct max_buff_head *list)
{
- newsk->next = next;
- newsk->prev = prev;
- next->prev = prev->next = newsk;
- list->qlen++;
+ mbuf->next = next;
+ mbuf->prev = prev;
+ next->prev = prev->next = mbuf;
+ list->queue_len++;
}
/**
- * mbuf_reserve - adjust headroom
- * @mbuf: buffer to alter
- * @len: bytes to move
- *
- * Increase the headroom of an empty &max_buff by reducing the tail
- * room. This is only allowed for an empty buffer.
+ * Increase the headroom of an empty max_buff by moving the data and tail pointers.
+ * This is only allowed for an empty buffer.
*/
static inline void mbuf_reserve(struct max_buff *mbuf, int len)
{
mbuf->tail += len;
}
-static inline struct max_buff *alloc_mbuf(unsigned int size)
-{
- return __alloc_mbuf(size);
-}
-
-static inline unsigned char *__mbuf_pull(struct max_buff *mbuf, unsigned int len)
-{
- mbuf->len -= len;
- WARN_ON(mbuf->len < mbuf->data_len);
- return mbuf->data += len;
-}
-
-static inline unsigned char *mbuf_pull_inline(struct max_buff *mbuf, unsigned int len)
-{
- return (len > mbuf->len) ? NULL : __mbuf_pull(mbuf, len);
-}
-
-/**
- * __mbuf_queue_head_init - initialize max_buff_head
- * @list: queue to initialize
- *
- * This initializes only the list and queue length aspects of
- * an max_buff_head object. It can also be used for on-stack max_buff_head
- * objects where the spinlock is known to not be used.
- */
-static inline void __mbuf_queue_head_init(struct max_buff_head *list)
-{
- list->prev = list->next = (struct max_buff *)list;
- list->qlen = 0;
-}
-
-/*
- * This function initializes qhead and related locks
- */
+/* This function initializes max_buff_head and the list lock */
static inline void mbuf_queue_head_init(struct max_buff_head *list)
{
SLSI_MUTEX_INIT(list->lock);
- __mbuf_queue_head_init(list);
+ list->prev = list->next = (struct max_buff *)list;
+ list->queue_len = 0;
}
-/**
- * mbuf_headroom - bytes at buffer head
- * @mbuf: buffer to check
- *
- * Return the number of bytes of free space at the head of an &max_buff.
- */
+/* This function returns the number of bytes of free space at the head of an max_buff */
static inline unsigned int mbuf_headroom(const struct max_buff *mbuf)
{
return mbuf->data - mbuf->head;
}
-static inline bool mbuf_is_nonlinear(const struct max_buff *mbuf)
-{
- return mbuf->data_len;
-}
-
-/**
- * mbuf_tailroom - bytes at buffer end
- * @mbuf: buffer to check
- *
- * Return the number of bytes of free space at the tail of an max_buff
- */
+/* This function returns the number of bytes of free space at the tail of an max_buff */
static inline int mbuf_tailroom(const struct max_buff *mbuf)
{
- return mbuf_is_nonlinear(mbuf) ? 0 : mbuf->end - mbuf->tail;
+ return mbuf->end - mbuf->tail;
}
-/**
- * mbuf_queue_len - get queue length
- * @list_: list to measure
- *
- * Return the length of an &max_buff queue.
- */
-static inline __u32 mbuf_queue_len(const struct max_buff_head *list_)
+/* Returns the length of a max_buff_head queue */
+static inline __u32 mbuf_queue_len(const struct max_buff_head *list)
{
- return list_->qlen;
+ return list->queue_len;
}
-static inline void mbuf_set_tail_pointer(struct max_buff *mbuf, unsigned int offset)
-{
- mbuf->tail = mbuf->data + offset;
-}
-
-static inline unsigned int mbuf_end_offset(const struct max_buff *mbuf)
-{
- return mbuf->end - mbuf->head;
-}
-
-/*
- * remove max_buff from list. _Must_ be called atomically, and with
- * the list known..
- */
-static inline void __mbuf_unlink(struct max_buff *mbuf, struct max_buff_head *list)
+/* Removes max_buff from list. _Must_ be called atomically */
+static inline void mbuf_unlink(struct max_buff *mbuf, struct max_buff_head *list)
{
struct max_buff *next, *prev;
- list->qlen--;
+ list->queue_len--;
next = mbuf->next;
prev = mbuf->prev;
mbuf->next = mbuf->prev = NULL;
prev->next = next;
}
-/**
- * mbuf_peek - peek at the head of an &max_buff_head
- * @list_: list to peek at
- *
- * Peek an &max_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the head element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-static inline struct max_buff *mbuf_peek(const struct max_buff_head *list_)
-{
- struct max_buff *mbuf = list_->next;
-
- if (mbuf == (struct max_buff *)list_) {
- mbuf = NULL;
- }
- return mbuf;
-}
-
-/**
- * mbuf_peek_tail - peek at the tail of an &max_buff_head
- * @list_: list to peek at
- *
- * Peek an &max_buff. Unlike most other operations you _MUST_
- * be careful with this one. A peek leaves the buffer on the
- * list and someone else may run off with it. You must hold
- * the appropriate locks or have a private queue to do this.
- *
- * Returns %NULL for an empty list or a pointer to the tail element.
- * The reference count is not incremented and the reference is therefore
- * volatile. Use with caution.
- */
-static inline struct max_buff *mbuf_peek_tail(const struct max_buff_head *list_)
-{
- struct max_buff *mbuf = list_->prev;
-
- if (mbuf == (struct max_buff *)list_) {
- mbuf = NULL;
- }
- return mbuf;
-}
-
static inline unsigned char *mbuf_mac_header(const struct max_buff *mbuf)
{
return mbuf->head + mbuf->mac_header;
mbuf->mac_header += offset;
}
-static inline struct max_buff *slsi_dev_alloc_mbuf_f(unsigned int length, const char *file, int line)
-{
- struct max_buff *mbuf = __alloc_mbuf(SLSI_NETIF_MBUF_HEADROOM + SLSI_NETIF_MBUF_TAILROOM + length);
-
- SLSI_UNUSED_PARAMETER(file);
- SLSI_UNUSED_PARAMETER(line);
-
- if (mbuf) {
- mbuf_reserve(mbuf, SLSI_NETIF_MBUF_HEADROOM - SLSI_MBUF_GET_ALIGNMENT_OFFSET(mbuf));
- }
- return mbuf;
-}
-
static inline struct max_buff *slsi_alloc_mbuf_f(unsigned int size, const char *file, int line)
{
struct max_buff *mbuf = alloc_mbuf(SLSI_NETIF_MBUF_HEADROOM + SLSI_NETIF_MBUF_TAILROOM + size);
#define slsi_alloc_mbuf(size_) slsi_alloc_mbuf_f(size_, __FILE__, __LINE__)
#define slsi_mbuf_copy(mbuf_) mbuf_copy(mbuf_)
-#define slsi_kfree_mbuf(mbuf_) kfree_mbuf(mbuf_)
+#define slsi_kfree_mbuf(mbuf_) free_mbuf(mbuf_)
#define slsi_mbuf_queue_tail(list_, mbuf_) mbuf_queue_tail(list_, mbuf_)
#define slsi_mbuf_queue_head(list_, mbuf_) mbuf_queue_head(list_, mbuf_)
#define slsi_mbuf_dequeue(list_) mbuf_dequeue(list_)
return;
}
- if (work->queue.qlen == 0) {
+ if (work->queue.queue_len == 0) {
mbuf_queue_tail(&work->queue, mbuf);
slsi_mbuf_schedule_work(work, func);
} else {
do {
spin_lock_irqsave(&intr->spinlock, flags);
while ((irq_reg[0] = intr->mif->irq_get(intr->mif)) != 0) {
- for_each_set_bit(bit, (unsigned long int *)irq_reg, MIFINTRBIT_NUM_INT) {
- if (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler) {
+ for (bit = 0; bit < MIFINTRBIT_NUM_INT; bit++) {
+ if ((irq_reg[0] & (1 << bit)) && (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler)) {
intr->mifintrbit_irq_handler[bit](bit, intr->irq_data[bit]);
}
}
#else
spin_lock_irqsave(&intr->spinlock, flags);
irq_reg[0] = intr->mif->irq_get(intr->mif);
- for_each_set_bit(bit, (unsigned long int *)irq_reg, MIFINTRBIT_NUM_INT) {
- if (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler) {
+ for (bit = 0; bit < MIFINTRBIT_NUM_INT; bit++) {
+ if ((irq_reg[0] & (1 << bit)) && (intr->mifintrbit_irq_handler[bit] != mifintrbit_default_handler)) {
intr->mifintrbit_irq_handler[bit](bit, intr->irq_data[bit]);
}
}
-/*****************************************************************************
+/******************************************************************************
*
- * Copyright 2017 Samsung Electronics All Rights Reserved.
+ * Copyright (c) 2012 - 2016 Samsung Electronics Co., Ltd. All rights reserved
*
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
- * either express or implied. See the License for the specific
- * language governing permissions and limitations under the License.
- *
- ****************************************************************************/
+ *****************************************************************************/
#ifndef SLSI_UTILS_MISC_H__
#define SLSI_UTILS_MISC_H__
#include <tinyara/kmalloc.h>
#include "utils_scsc.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
#define __bitwise
#define __force __attribute__((force))
#define __aligned(x) __attribute__((aligned(x)))
#define SSID_MAX_LEN 32
-struct firmware {
- size_t size;
- const u8 *data;
- //struct page **pages;
-
- /* firmware loader private fields */
- void *priv;
-};
-
static inline void *ERR_PTR(long error)
{
return (void *)error;
return 1 & (addr[BIT_BYTE(nr)] >> (nr & (BITS_PER_BYTE - 1)));
}
-/*We can use ndbg,nlldbg,... defined under CONFIG_DEBUG_NET.
- * face compiler errors when we enable the CONFIG.*/
-
#define pr_err(_msg, args ...) \
lldbg(_msg, ## args)
#define pr_warn(_msg, args ...) \
}
#define __round_mask(x, y) ((__typeof__(x))((y) - 1))
#define round_up(x, y) ((((x) - 1) | __round_mask(x, y)) + 1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-/**
- * get_first_bit - find first set bit in word
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long get_first_bit(unsigned long elem)
-{
- int num = 0;
-
-#if BITS_PER_LONG == 64
- if ((elem & 0xffffffff) == 0) {
- elem >>= 32;
- num += 32;
- }
-#endif
- if ((elem & 0xffff) == 0) {
- elem >>= 16;
- num += 16;
- }
- if ((elem & 0xff) == 0) {
- elem >>= 8;
- num += 8;
- }
- if ((elem & 0xf) == 0) {
- elem >>= 4;
- num += 4;
- }
- if ((elem & 0x3) == 0) {
- elem >>= 2;
- num += 2;
- }
- if ((elem & 0x1) == 0) {
- num += 1;
- }
-
- return num;
-}
-
-#define min(x, y) ({ \
- typeof(x) _x = (x); \
- typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x < _y ? _x : _y; })
-
-#define max(x, y) ({ \
- typeof(x) _x = (x); \
- typeof(y) _y = (y); \
- (void) (&_x == &_y); \
- _x > _y ? _x : _y; })
-
-static unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
-{
- unsigned long idx;
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx]) {
- return min(idx * BITS_PER_LONG + get_first_bit(addr[idx]), size);
- }
- }
-
- return size;
-}
-#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
-/*
- * This is a common helper function for find_next_bit and
- * find_next_zero_bit. The difference is the "invert" argument, which
- * is XORed with each fetched word before searching it for one bits.
- */
-static unsigned long _find_next_bit(const unsigned long *addr, unsigned long nbits, unsigned long start, unsigned long invert)
-{
- unsigned long tmp;
-
- if (!nbits || start >= nbits) {
- return nbits;
- }
-
- tmp = addr[start / BITS_PER_LONG] ^ invert;
-
- /* Handle 1st word. */
- tmp &= BITMAP_FIRST_WORD_MASK(start);
- start = round_down(start, BITS_PER_LONG);
-
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= nbits) {
- return nbits;
- }
-
- tmp = addr[start / BITS_PER_LONG] ^ invert;
- }
-
- return min(start + get_first_bit(tmp), nbits);
-}
-
-/*
- * Find the next set bit in a memory region.
- */
-static unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
-{
- return _find_next_bit(addr, size, offset, 0UL);
-}
-
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-#define ffz(x) get_first_bit(~(x))
+/* This API finds the first non zero bit in the given address. This API checks only one address */
static inline unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
- unsigned long idx;
+ int i;
- for (idx = 0; idx * BITS_PER_LONG < size; idx++) {
- if (addr[idx] != ~0UL) {
- return min(idx * BITS_PER_LONG + ffz(addr[idx]), size);
- }
+ for (i = 0; i < size; i++) {
+ if (!(addr[0] & (1 << i)))
+ return i;
}
return size;
}
-#ifdef __cplusplus
-}
-#endif
/*
* SLSI doubly linked llist implementation.
*
#define container_of(ptr, type, member) ({ \
const typeof(((type*)0)->member) *__mptr = (ptr); \
- (type *)((char *)__mptr - offsetof(type, member)); })
+ (type *)((char *)__mptr - offsetof(type, member));})
/**
* dlist_entry - get the struct for this entry
} while (0)
#endif /* SLSI_UTILS_MISC_H__ */
+
+
goto exit_with_lock;
}
- num_scan_ind = ndev_vif->scan[scan_id].scan_results.qlen;
+ num_scan_ind = ndev_vif->scan[scan_id].scan_results.queue_len;
SLSI_NET_DBG3(dev, SLSI_T20_80211, "slsi_get_scan_results(interface:%d, scan_id:%d, num_scan_ind: %d)\n", ndev_vif->ifnum, scan_id, num_scan_ind);
if (num_scan_ind == 0) {