This is a new generic kernel FIFO implementation.
The current kernel fifo API is not very widely used, because it has to
many constrains. Only 17 files in the current 2.6.31-rc5 used it.
FIFO's are like list's a very basic thing and a kfifo API which handles
the most use case would save a lot of development time and memory
resources.
I think this are the reasons why kfifo is not in use:
- The API is to simple, important functions are missing
- A fifo can be only allocated dynamically
- There is a requirement of a spinlock whether you need it or not
- There is no support for data records inside a fifo
So I decided to extend the kfifo in a more generic way without blowing up
the API to much. The new API has the following benefits:
- Generic usage: For kernel internal use and/or device driver.
- Provide an API for the most use case.
- Slim API: The whole API provides 25 functions.
- Linux style habit.
- DECLARE_KFIFO, DEFINE_KFIFO and INIT_KFIFO Macros
- Direct copy_to_user from the fifo and copy_from_user into the fifo.
- The kfifo itself is an in place member of the using data structure, this save an
indirection access and does not waste the kernel allocator.
- Lockless access: if only one reader and one writer is active on the fifo,
which is the common use case, no additional locking is necessary.
- Remove spinlock - give the user the freedom of choice what kind of locking to use if
one is required.
- Ability to handle records. Three type of records are supported:
- Variable length records between 0-255 bytes, with a record size
field of 1 bytes.
- Variable length records between 0-65535 bytes, with a record size
field of 2 bytes.
- Fixed size records, which no record size field.
- Preserve memory resource.
- Performance!
- Easy to use!
This patch:
Since most users want to have the kfifo as part of another object,
reorganize the code to allow including struct kfifo in another data
structure. This requires changing the kfifo_alloc and kfifo_init
prototypes so that we pass an existing kfifo pointer into them. This
patch changes the implementation and all existing users.
[akpm@linux-foundation.org: fix warning]
Signed-off-by: Stefani Seibold <stefani@seibold.net>
Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Acked-by: Mauro Carvalho Chehab <mchehab@redhat.com>
Acked-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
u8 update_flow_control;
struct ctrl_ul ctrl_ul;
struct ctrl_dl ctrl_dl;
- struct kfifo *fifo_ul;
+ struct kfifo fifo_ul;
void __iomem *dl_addr[2];
u32 dl_size[2];
u8 toggle_dl;
dump_table(dc);
for (i = PORT_MDM; i < MAX_PORT; i++) {
- dc->port[i].fifo_ul =
- kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
+ kfifo_alloc(&dc->port[i].fifo_ul,
+ FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL);
memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl));
memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul));
}
struct tty_struct *tty = tty_port_tty_get(&port->port);
/* Get data from tty and place in buf for now */
- size = __kfifo_get(port->fifo_ul, dc->send_buf,
+ size = __kfifo_get(&port->fifo_ul, dc->send_buf,
ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX);
if (size == 0) {
} else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) {
- if (__kfifo_len(dc->port[port].fifo_ul)) {
+ if (__kfifo_len(&dc->port[port].fifo_ul)) {
DBG1("Enable interrupt (0x%04X) on port: %d",
enable_ier, port);
DBG1("Data in buffer [%d], enable transmit! ",
- __kfifo_len(dc->port[port].fifo_ul));
+ __kfifo_len(&dc->port[port].fifo_ul));
enable_transmit_ul(port, dc);
} else {
DBG1("No data in buffer...");
free_irq(pdev->irq, dc);
for (i = 0; i < MAX_PORT; i++)
- if (dc->port[i].fifo_ul)
- kfifo_free(dc->port[i].fifo_ul);
+ kfifo_free(&dc->port[i].fifo_ul);
kfree(dc->send_buf);
goto exit;
}
- rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count);
+ rval = __kfifo_put(&port->fifo_ul, (unsigned char *)buffer, count);
/* notify card */
if (unlikely(dc == NULL)) {
if (!port->port.count)
goto exit;
- room = port->fifo_ul->size - __kfifo_len(port->fifo_ul);
+ room = port->fifo_ul.size - __kfifo_len(&port->fifo_ul);
exit:
mutex_unlock(&port->tty_sem);
goto exit_in_buffer;
}
- rval = __kfifo_len(port->fifo_ul);
+ rval = __kfifo_len(&port->fifo_ul);
exit_in_buffer:
return rval;
int camera_power;
int bluetooth_power;
struct mutex lock;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
struct fasync_struct *fifo_async;
struct input_dev *input_jog_dev;
struct input_dev *input_key_dev;
struct work_struct input_work;
- struct kfifo *input_fifo;
+ struct kfifo input_fifo;
spinlock_t input_fifo_lock;
} sonypi_device;
{
struct sonypi_keypress kp;
- while (kfifo_get(sonypi_device.input_fifo, (unsigned char *)&kp,
+ while (kfifo_get(&sonypi_device.input_fifo, (unsigned char *)&kp,
sizeof(kp)) == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
if (kp.dev) {
input_report_key(kp.dev, kp.key, 1);
input_sync(kp.dev);
- kfifo_put(sonypi_device.input_fifo,
+ kfifo_put(&sonypi_device.input_fifo,
(unsigned char *)&kp, sizeof(kp));
schedule_work(&sonypi_device.input_work);
}
acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
#endif
- kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_put(&sonypi_device.fifo, (unsigned char *)&event, sizeof(event));
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_device.fifo_proc_list);
mutex_lock(&sonypi_device.lock);
/* Flush input queue on first open */
if (!sonypi_device.open_count)
- kfifo_reset(sonypi_device.fifo);
+ kfifo_reset(&sonypi_device.fifo);
sonypi_device.open_count++;
mutex_unlock(&sonypi_device.lock);
unlock_kernel();
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_device.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_device.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_device.fifo_proc_list,
- kfifo_len(sonypi_device.fifo) != 0);
+ kfifo_len(&sonypi_device.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_get(&sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_device.fifo_proc_list, wait);
- if (kfifo_len(sonypi_device.fifo))
+ if (kfifo_len(&sonypi_device.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
"http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n");
spin_lock_init(&sonypi_device.fifo_lock);
- sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
+ error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL,
&sonypi_device.fifo_lock);
- if (IS_ERR(sonypi_device.fifo)) {
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- return PTR_ERR(sonypi_device.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_device.fifo_proc_list);
}
spin_lock_init(&sonypi_device.input_fifo_lock);
- sonypi_device.input_fifo =
- kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL,
- &sonypi_device.input_fifo_lock);
- if (IS_ERR(sonypi_device.input_fifo)) {
+ error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE,
+ GFP_KERNEL, &sonypi_device.input_fifo_lock);
+ if (error) {
printk(KERN_ERR "sonypi: kfifo_alloc failed\n");
- error = PTR_ERR(sonypi_device.input_fifo);
goto err_inpdev_unregister;
}
pci_disable_device(pcidev);
err_put_pcidev:
pci_dev_put(pcidev);
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return error;
}
if (useinput) {
input_unregister_device(sonypi_device.input_key_dev);
input_unregister_device(sonypi_device.input_jog_dev);
- kfifo_free(sonypi_device.input_fifo);
+ kfifo_free(&sonypi_device.input_fifo);
}
misc_deregister(&sonypi_misc_device);
pci_dev_put(sonypi_device.dev);
}
- kfifo_free(sonypi_device.fifo);
+ kfifo_free(&sonypi_device.fifo);
return 0;
}
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/kfifo.h>
#include "t3_cpl.h"
#include "t3cdev.h"
};
struct cxio_hal_resource {
- struct kfifo *tpt_fifo;
+ struct kfifo tpt_fifo;
spinlock_t tpt_fifo_lock;
- struct kfifo *qpid_fifo;
+ struct kfifo qpid_fifo;
spinlock_t qpid_fifo_lock;
- struct kfifo *cqid_fifo;
+ struct kfifo cqid_fifo;
spinlock_t cqid_fifo_lock;
- struct kfifo *pdid_fifo;
+ struct kfifo pdid_fifo;
spinlock_t pdid_fifo_lock;
};
#include "cxio_resource.h"
#include "cxio_hal.h"
-static struct kfifo *rhdl_fifo;
+static struct kfifo rhdl_fifo;
static spinlock_t rhdl_fifo_lock;
#define RANDOM_SIZE 16
-static int __cxio_init_resource_fifo(struct kfifo **fifo,
+static int __cxio_init_resource_fifo(struct kfifo *fifo,
spinlock_t *fifo_lock,
u32 nr, u32 skip_low,
u32 skip_high,
u32 rarray[16];
spin_lock_init(fifo_lock);
- *fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock);
- if (IS_ERR(*fifo))
+ if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL, fifo_lock))
return -ENOMEM;
for (i = 0; i < skip_low + skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32));
+ __kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32));
if (random) {
j = 0;
random_bytes = random32();
random_bytes = random32();
}
idx = (random_bytes >> (j * 2)) & 0xF;
- __kfifo_put(*fifo,
+ __kfifo_put(fifo,
(unsigned char *) &rarray[idx],
sizeof(u32));
rarray[idx] = i;
j++;
}
for (i = 0; i < RANDOM_SIZE; i++)
- __kfifo_put(*fifo,
+ __kfifo_put(fifo,
(unsigned char *) &rarray[i],
sizeof(u32));
} else
for (i = skip_low; i < nr - skip_high; i++)
- __kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32));
+ __kfifo_put(fifo, (unsigned char *) &i, sizeof(u32));
for (i = 0; i < skip_low + skip_high; i++)
- kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32));
+ kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32));
return 0;
}
-static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock,
+static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
skip_high, 0));
}
-static int cxio_init_resource_fifo_random(struct kfifo **fifo,
+static int cxio_init_resource_fifo_random(struct kfifo *fifo,
spinlock_t * fifo_lock,
u32 nr, u32 skip_low, u32 skip_high)
{
spin_lock_init(&rdev_p->rscp->qpid_fifo_lock);
- rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32),
+ if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32),
GFP_KERNEL,
- &rdev_p->rscp->qpid_fifo_lock);
- if (IS_ERR(rdev_p->rscp->qpid_fifo))
+ &rdev_p->rscp->qpid_fifo_lock))
return -ENOMEM;
for (i = 16; i < T3_MAX_NUM_QP; i++)
if (!(i & rdev_p->qpmask))
- __kfifo_put(rdev_p->rscp->qpid_fifo,
+ __kfifo_put(&rdev_p->rscp->qpid_fifo,
(unsigned char *) &i, sizeof(u32));
return 0;
}
void cxio_hal_destroy_rhdl_resource(void)
{
- kfifo_free(rhdl_fifo);
+ kfifo_free(&rhdl_fifo);
}
/* nr_* must be power of 2 */
goto pdid_err;
return 0;
pdid_err:
- kfifo_free(rscp->cqid_fifo);
+ kfifo_free(&rscp->cqid_fifo);
cqid_err:
- kfifo_free(rscp->qpid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
qpid_err:
- kfifo_free(rscp->tpt_fifo);
+ kfifo_free(&rscp->tpt_fifo);
tpt_err:
return -ENOMEM;
}
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->tpt_fifo);
+ return cxio_hal_get_resource(&rscp->tpt_fifo);
}
void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag)
{
- cxio_hal_put_resource(rscp->tpt_fifo, stag);
+ cxio_hal_put_resource(&rscp->tpt_fifo, stag);
}
u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp)
{
- u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo);
+ u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo);
PDBG("%s qpid 0x%x\n", __func__, qpid);
return qpid;
}
void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid)
{
PDBG("%s qpid 0x%x\n", __func__, qpid);
- cxio_hal_put_resource(rscp->qpid_fifo, qpid);
+ cxio_hal_put_resource(&rscp->qpid_fifo, qpid);
}
u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->cqid_fifo);
+ return cxio_hal_get_resource(&rscp->cqid_fifo);
}
void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid)
{
- cxio_hal_put_resource(rscp->cqid_fifo, cqid);
+ cxio_hal_put_resource(&rscp->cqid_fifo, cqid);
}
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp)
{
- return cxio_hal_get_resource(rscp->pdid_fifo);
+ return cxio_hal_get_resource(&rscp->pdid_fifo);
}
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid)
{
- cxio_hal_put_resource(rscp->pdid_fifo, pdid);
+ cxio_hal_put_resource(&rscp->pdid_fifo, pdid);
}
void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp)
{
- kfifo_free(rscp->tpt_fifo);
- kfifo_free(rscp->cqid_fifo);
- kfifo_free(rscp->qpid_fifo);
- kfifo_free(rscp->pdid_fifo);
+ kfifo_free(&rscp->tpt_fifo);
+ kfifo_free(&rscp->cqid_fifo);
+ kfifo_free(&rscp->qpid_fifo);
+ kfifo_free(&rscp->pdid_fifo);
kfree(rscp);
}
return IRQ_HANDLED;
if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) {
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
+ if (kfifo_get(&meye.grabq, (unsigned char *)&reqnr,
sizeof(int)) != sizeof(int)) {
mchip_free_frame();
return IRQ_HANDLED;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_put(&meye.doneq, (unsigned char *)&reqnr, sizeof(int));
wake_up_interruptible(&meye.proc_list);
} else {
int size;
mchip_free_frame();
goto again;
}
- if (kfifo_get(meye.grabq, (unsigned char *)&reqnr,
+ if (kfifo_get(&meye.grabq, (unsigned char *)&reqnr,
sizeof(int)) != sizeof(int)) {
mchip_free_frame();
goto again;
meye.grab_buffer[reqnr].state = MEYE_BUF_DONE;
do_gettimeofday(&meye.grab_buffer[reqnr].timestamp);
meye.grab_buffer[reqnr].sequence = sequence++;
- kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int));
+ kfifo_put(&meye.doneq, (unsigned char *)&reqnr, sizeof(int));
wake_up_interruptible(&meye.proc_list);
}
mchip_free_frame();
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
return 0;
}
mchip_cont_compression_start();
meye.grab_buffer[*nb].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int));
+ kfifo_put(&meye.grabq, (unsigned char *)nb, sizeof(int));
mutex_unlock(&meye.lock);
return 0;
/* fall through */
case MEYE_BUF_DONE:
meye.grab_buffer[*i].state = MEYE_BUF_UNUSED;
- kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int));
+ kfifo_get(&meye.doneq, (unsigned char *)&unused, sizeof(int));
}
*i = meye.grab_buffer[*i].size;
mutex_unlock(&meye.lock);
buf->flags |= V4L2_BUF_FLAG_QUEUED;
buf->flags &= ~V4L2_BUF_FLAG_DONE;
meye.grab_buffer[buf->index].state = MEYE_BUF_USING;
- kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int));
+ kfifo_put(&meye.grabq, (unsigned char *)&buf->index, sizeof(int));
mutex_unlock(&meye.lock);
return 0;
mutex_lock(&meye.lock);
- if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
+ if (kfifo_len(&meye.doneq) == 0 && file->f_flags & O_NONBLOCK) {
mutex_unlock(&meye.lock);
return -EAGAIN;
}
if (wait_event_interruptible(meye.proc_list,
- kfifo_len(meye.doneq) != 0) < 0) {
+ kfifo_len(&meye.doneq) != 0) < 0) {
mutex_unlock(&meye.lock);
return -EINTR;
}
- if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr,
+ if (!kfifo_get(&meye.doneq, (unsigned char *)&reqnr,
sizeof(int))) {
mutex_unlock(&meye.lock);
return -EBUSY;
{
mutex_lock(&meye.lock);
mchip_hic_stop();
- kfifo_reset(meye.grabq);
- kfifo_reset(meye.doneq);
+ kfifo_reset(&meye.grabq);
+ kfifo_reset(&meye.doneq);
for (i = 0; i < MEYE_MAX_BUFNBRS; i++)
meye.grab_buffer[i].state = MEYE_BUF_UNUSED;
mutex_lock(&meye.lock);
poll_wait(file, &meye.proc_list, wait);
- if (kfifo_len(meye.doneq))
+ if (kfifo_len(&meye.doneq))
res = POLLIN | POLLRDNORM;
mutex_unlock(&meye.lock);
return res;
}
spin_lock_init(&meye.grabq_lock);
- meye.grabq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.grabq_lock);
- if (IS_ERR(meye.grabq)) {
+ if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
+ &meye.grabq_lock)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc1;
}
spin_lock_init(&meye.doneq_lock);
- meye.doneq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
- &meye.doneq_lock);
- if (IS_ERR(meye.doneq)) {
+ if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL,
+ &meye.doneq_lock)) {
printk(KERN_ERR "meye: fifo allocation failed\n");
goto outkfifoalloc2;
}
outenabledev:
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
outsonypienable:
- kfifo_free(meye.doneq);
+ kfifo_free(&meye.doneq);
outkfifoalloc2:
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.grabq);
outkfifoalloc1:
vfree(meye.grab_temp);
outvmalloc:
sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0);
- kfifo_free(meye.doneq);
- kfifo_free(meye.grabq);
+ kfifo_free(&meye.doneq);
+ kfifo_free(&meye.grabq);
vfree(meye.grab_temp);
struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS];
int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */
struct mutex lock; /* mutex for open/mmap... */
- struct kfifo *grabq; /* queue for buffers to be grabbed */
+ struct kfifo grabq; /* queue for buffers to be grabbed */
spinlock_t grabq_lock; /* lock protecting the queue */
- struct kfifo *doneq; /* queue for grabbed buffers */
+ struct kfifo doneq; /* queue for grabbed buffers */
spinlock_t doneq_lock; /* lock protecting the queue */
wait_queue_head_t proc_list; /* wait queue */
struct video_device *video_dev; /* video device parameters */
priv->dnld_sent = DNLD_RES_RECEIVED;
/* If nothing to do, go back to sleep (?) */
- if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx])
+ if (!__kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx])
priv->psstate = PS_STATE_SLEEP;
spin_unlock_irqrestore(&priv->driver_lock, flags);
}
/* Pending events or command responses? */
- if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
+ if (__kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
allowed = 0;
lbs_deb_host("pending events or command responses\n");
}
#include "scan.h"
#include "assoc.h"
-
+#include <linux/kfifo.h>
/** sleep_params */
struct sleep_params {
u32 resp_len[2];
/* Events sent from hardware to driver */
- struct kfifo *event_fifo;
+ struct kfifo event_fifo;
/** thread to service interrupts */
struct task_struct *main_thread;
else if (!list_empty(&priv->cmdpendingq) &&
!(priv->wakeup_dev_required))
shouldsleep = 0; /* We have a command to send */
- else if (__kfifo_len(priv->event_fifo))
+ else if (__kfifo_len(&priv->event_fifo))
shouldsleep = 0; /* We have an event to process */
else
shouldsleep = 1; /* No command */
/* Process hardware events, e.g. card removed, link lost */
spin_lock_irq(&priv->driver_lock);
- while (__kfifo_len(priv->event_fifo)) {
+ while (__kfifo_len(&priv->event_fifo)) {
u32 event;
- __kfifo_get(priv->event_fifo, (unsigned char *) &event,
+ __kfifo_get(&priv->event_fifo, (unsigned char *) &event,
sizeof(event));
spin_unlock_irq(&priv->driver_lock);
lbs_process_event(priv, event);
priv->resp_len[0] = priv->resp_len[1] = 0;
/* Create the event FIFO */
- priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL);
- if (IS_ERR(priv->event_fifo)) {
+ ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL, NULL);
+ if (ret) {
lbs_pr_err("Out of memory allocating event FIFO buffer\n");
- ret = -ENOMEM;
goto out;
}
lbs_deb_enter(LBS_DEB_MAIN);
lbs_free_cmd_buffer(priv);
- if (priv->event_fifo)
- kfifo_free(priv->event_fifo);
+ kfifo_free(&priv->event_fifo);
del_timer(&priv->command_timer);
del_timer(&priv->auto_deepsleep_timer);
kfree(priv->networks);
if (priv->psstate == PS_STATE_SLEEP)
priv->psstate = PS_STATE_AWAKE;
- __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32));
+ __kfifo_put(&priv->event_fifo, (unsigned char *) &event, sizeof(u32));
wake_up_interruptible(&priv->waitq);
struct input_dev *input;
char phys[32];
struct platform_device *pf_device;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
int rfkill_supported;
int rfkill_state;
/* kfifo */
spin_lock_init(&fujitsu_hotkey->fifo_lock);
- fujitsu_hotkey->fifo =
- kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL,
- &fujitsu_hotkey->fifo_lock);
- if (IS_ERR(fujitsu_hotkey->fifo)) {
+ error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+ GFP_KERNEL, &fujitsu_hotkey->fifo_lock);
+ if (error) {
printk(KERN_ERR "kfifo_alloc failed\n");
- error = PTR_ERR(fujitsu_hotkey->fifo);
goto err_stop;
}
err_free_input_dev:
input_free_device(input);
err_free_fifo:
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
err_stop:
return result;
}
input_free_device(input);
- kfifo_free(fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_hotkey->fifo);
fujitsu_hotkey->acpi_handle = NULL;
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"Push keycode into ringbuffer [%d]\n",
keycode);
- status = kfifo_put(fujitsu_hotkey->fifo,
+ status = kfifo_put(&fujitsu_hotkey->fifo,
(unsigned char *)&keycode,
sizeof(keycode));
if (status != sizeof(keycode)) {
} else if (keycode == 0) {
while ((status =
kfifo_get
- (fujitsu_hotkey->fifo, (unsigned char *)
+ (&fujitsu_hotkey->fifo, (unsigned char *)
&keycode_r,
sizeof
(keycode_r))) == sizeof(keycode_r)) {
atomic_t users;
struct input_dev *jog_dev;
struct input_dev *key_dev;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
struct workqueue_struct *wq;
};
{
struct sony_laptop_keypress kp;
- while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp,
+ while (kfifo_get(&sony_laptop_input.fifo, (unsigned char *)&kp,
sizeof(kp)) == sizeof(kp)) {
msleep(10);
input_report_key(kp.dev, kp.key, 0);
/* we emit the scancode so we can always remap the key */
input_event(kp.dev, EV_MSC, MSC_SCAN, event);
input_sync(kp.dev);
- kfifo_put(sony_laptop_input.fifo,
+ kfifo_put(&sony_laptop_input.fifo,
(unsigned char *)&kp, sizeof(kp));
if (!work_pending(&sony_laptop_release_key_work))
/* kfifo */
spin_lock_init(&sony_laptop_input.fifo_lock);
- sony_laptop_input.fifo =
- kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
+ error =
+ kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
&sony_laptop_input.fifo_lock);
- if (IS_ERR(sony_laptop_input.fifo)) {
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- error = PTR_ERR(sony_laptop_input.fifo);
goto err_dec_users;
}
destroy_workqueue(sony_laptop_input.wq);
err_free_kfifo:
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
err_dec_users:
atomic_dec(&sony_laptop_input.users);
}
destroy_workqueue(sony_laptop_input.wq);
- kfifo_free(sony_laptop_input.fifo);
+ kfifo_free(&sony_laptop_input.fifo);
}
/*********** Platform Device ***********/
struct sonypi_compat_s {
struct fasync_struct *fifo_async;
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t fifo_lock;
wait_queue_head_t fifo_proc_list;
atomic_t open_count;
/* Flush input queue on first open */
unsigned long flags;
- spin_lock_irqsave(sonypi_compat.fifo->lock, flags);
+ spin_lock_irqsave(&sonypi_compat.fifo_lock, flags);
if (atomic_inc_return(&sonypi_compat.open_count) == 1)
- __kfifo_reset(sonypi_compat.fifo);
+ __kfifo_reset(&sonypi_compat.fifo);
- spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags);
+ spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags);
return 0;
}
ssize_t ret;
unsigned char c;
- if ((kfifo_len(sonypi_compat.fifo) == 0) &&
+ if ((kfifo_len(&sonypi_compat.fifo) == 0) &&
(file->f_flags & O_NONBLOCK))
return -EAGAIN;
ret = wait_event_interruptible(sonypi_compat.fifo_proc_list,
- kfifo_len(sonypi_compat.fifo) != 0);
+ kfifo_len(&sonypi_compat.fifo) != 0);
if (ret)
return ret;
while (ret < count &&
- (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
+ (kfifo_get(&sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) {
if (put_user(c, buf++))
return -EFAULT;
ret++;
static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait)
{
poll_wait(file, &sonypi_compat.fifo_proc_list, wait);
- if (kfifo_len(sonypi_compat.fifo))
+ if (kfifo_len(&sonypi_compat.fifo))
return POLLIN | POLLRDNORM;
return 0;
}
static void sonypi_compat_report_event(u8 event)
{
- kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
+ kfifo_put(&sonypi_compat.fifo, (unsigned char *)&event, sizeof(event));
kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN);
wake_up_interruptible(&sonypi_compat.fifo_proc_list);
}
int error;
spin_lock_init(&sonypi_compat.fifo_lock);
- sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
+ error =
+ kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL,
&sonypi_compat.fifo_lock);
- if (IS_ERR(sonypi_compat.fifo)) {
+ if (error) {
printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n");
- return PTR_ERR(sonypi_compat.fifo);
+ return error;
}
init_waitqueue_head(&sonypi_compat.fifo_proc_list);
return 0;
err_free_kfifo:
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
return error;
}
static void sonypi_compat_exit(void)
{
misc_deregister(&sonypi_misc_device);
- kfifo_free(sonypi_compat.fifo);
+ kfifo_free(&sonypi_compat.fifo);
}
#else
static int sonypi_compat_init(void) { return 0; }
if (conn->login_task == task)
return;
- __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
+ __kfifo_put(&session->cmdpool.queue, (void*)&task, sizeof(void*));
if (sc) {
task->sc = NULL;
BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!__kfifo_get(&session->cmdpool.queue,
(void*)&task, sizeof(void*)))
return NULL;
}
{
struct iscsi_task *task;
- if (!__kfifo_get(conn->session->cmdpool.queue,
+ if (!__kfifo_get(&conn->session->cmdpool.queue,
(void *) &task, sizeof(void *)))
return NULL;
if (q->pool == NULL)
return -ENOMEM;
- q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
- GFP_KERNEL, NULL);
- if (IS_ERR(q->queue)) {
- q->queue = NULL;
- goto enomem;
- }
+ kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*), NULL);
for (i = 0; i < max; i++) {
q->pool[i] = kzalloc(item_size, GFP_KERNEL);
q->max = i;
goto enomem;
}
- __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
+ __kfifo_put(&q->queue, (void*)&q->pool[i], sizeof(void*));
}
if (items) {
for (i = 0; i < q->max; i++)
kfree(q->pool[i]);
kfree(q->pool);
- kfree(q->queue);
}
EXPORT_SYMBOL_GPL(iscsi_pool_free);
/* allocate login_task used for the login/text sequences */
spin_lock_bh(&session->lock);
- if (!__kfifo_get(session->cmdpool.queue,
+ if (!__kfifo_get(&session->cmdpool.queue,
(void*)&conn->login_task,
sizeof(void*))) {
spin_unlock_bh(&session->lock);
return cls_conn;
login_task_data_alloc_fail:
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ __kfifo_put(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
login_task_alloc_fail:
iscsi_destroy_conn(cls_conn);
free_pages((unsigned long) conn->data,
get_order(ISCSI_DEF_MAX_RECV_SEG_LEN));
kfree(conn->persistent_address);
- __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task,
+ __kfifo_put(&session->cmdpool.queue, (void*)&conn->login_task,
sizeof(void*));
if (session->leadconn == conn)
session->leadconn = NULL;
return;
/* flush task's r2t queues */
- while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ while (__kfifo_get(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) {
+ __kfifo_put(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n");
}
r2t = tcp_task->r2t;
if (r2t != NULL) {
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ __kfifo_put(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
tcp_task->r2t = NULL;
}
return 0;
}
- rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
+ rc = __kfifo_get(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*));
if (!rc) {
iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. "
"Target has sent more R2Ts than it "
if (r2t->data_length == 0) {
iscsi_conn_printk(KERN_ERR, conn,
"invalid R2T with zero data len\n");
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ __kfifo_put(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
"invalid R2T with data len %u at offset %u "
"and total length %d\n", r2t->data_length,
r2t->data_offset, scsi_out(task->sc)->length);
- __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t,
+ __kfifo_put(&tcp_task->r2tpool.queue, (void*)&r2t,
sizeof(void*));
return ISCSI_ERR_DATALEN;
}
r2t->sent = 0;
tcp_task->exp_datasn = r2tsn + 1;
- __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
+ __kfifo_put(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*));
conn->r2t_pdus_cnt++;
iscsi_requeue_task(task);
return conn->session->tt->init_pdu(task, 0, task->data_count);
}
- BUG_ON(__kfifo_len(tcp_task->r2tqueue));
+ BUG_ON(__kfifo_len(&tcp_task->r2tqueue));
tcp_task->exp_datasn = 0;
/* Prepare PDU, optionally w/ immediate data */
if (r2t->data_length <= r2t->sent) {
ISCSI_DBG_TCP(task->conn,
" done with r2t %p\n", r2t);
- __kfifo_put(tcp_task->r2tpool.queue,
+ __kfifo_put(&tcp_task->r2tpool.queue,
(void *)&tcp_task->r2t,
sizeof(void *));
tcp_task->r2t = r2t = NULL;
}
if (r2t == NULL) {
- __kfifo_get(tcp_task->r2tqueue,
+ __kfifo_get(&tcp_task->r2tqueue,
(void *)&tcp_task->r2t, sizeof(void *));
r2t = tcp_task->r2t;
}
}
/* R2T xmit queue */
- tcp_task->r2tqueue = kfifo_alloc(
- session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
- if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) {
+ if (kfifo_alloc(&tcp_task->r2tqueue,
+ session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL)) {
iscsi_pool_free(&tcp_task->r2tpool);
goto r2t_alloc_fail;
}
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
return -ENOMEM;
struct iscsi_task *task = session->cmds[i];
struct iscsi_tcp_task *tcp_task = task->dd_data;
- kfifo_free(tcp_task->r2tqueue);
+ kfifo_free(&tcp_task->r2tqueue);
iscsi_pool_free(&tcp_task->r2tpool);
}
}
goto free_pool;
spin_lock_init(&q->lock);
- q->queue = kfifo_init((void *) q->pool, max * sizeof(void *),
- GFP_KERNEL, &q->lock);
- if (IS_ERR(q->queue))
- goto free_item;
+ kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *),
+ &q->lock);
for (i = 0, iue = q->items; i < max; i++) {
- __kfifo_put(q->queue, (void *) &iue, sizeof(void *));
+ __kfifo_put(&q->queue, (void *) &iue, sizeof(void *));
iue->sbuf = ring[i];
iue++;
}
return 0;
-free_item:
kfree(q->items);
free_pool:
kfree(q->pool);
{
struct iu_entry *iue = NULL;
- kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ kfifo_get(&target->iu_queue.queue, (void *) &iue, sizeof(void *));
if (!iue)
return iue;
iue->target = target;
void srp_iu_put(struct iu_entry *iue)
{
- kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
+ kfifo_put(&iue->target->iu_queue.queue, (void *) &iue, sizeof(void *));
}
EXPORT_SYMBOL_GPL(srp_iu_put);
pkt->info = 0;
pkt->priv_data = NULL;
- cq_put(usb->ep0->empty_frame_Q, pkt);
+ cq_put(&usb->ep0->empty_frame_Q, pkt);
}
/* confirm submitted packet */
if ((td->data + td->actual_len) && trans_len)
memcpy(td->data + td->actual_len, pkt->data,
trans_len);
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
}
recycle_frame(usb, pkt);
}
/* update frame object fields before transmitting */
- pkt = cq_get(usb->ep0->empty_frame_Q);
+ pkt = cq_get(&usb->ep0->empty_frame_Q);
if (!pkt) {
fhci_dbg(usb->fhci, "there is no empty frame\n");
return -1;
pkt->info = 0;
if (data == NULL) {
- data = cq_get(usb->ep0->dummy_packets_Q);
+ data = cq_get(&usb->ep0->dummy_packets_Q);
BUG_ON(!data);
pkt->info = PKT_DUMMY_PACKET;
}
list_del_init(&td->frame_lh);
td->status = USB_TD_OK;
if (pkt->info & PKT_DUMMY_PACKET)
- cq_put(usb->ep0->dummy_packets_Q, pkt->data);
+ cq_put(&usb->ep0->dummy_packets_Q, pkt->data);
recycle_frame(usb, pkt);
usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD);
fhci_err(usb->fhci, "host transaction failed\n");
cpm_muram_free(cpm_muram_offset(ep->td_base));
if (ep->conf_frame_Q) {
- size = cq_howmany(ep->conf_frame_Q);
+ size = cq_howmany(&ep->conf_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->conf_frame_Q);
+ struct packet *pkt = cq_get(&ep->conf_frame_Q);
kfree(pkt);
}
- cq_delete(ep->conf_frame_Q);
+ cq_delete(&ep->conf_frame_Q);
}
if (ep->empty_frame_Q) {
- size = cq_howmany(ep->empty_frame_Q);
+ size = cq_howmany(&ep->empty_frame_Q);
for (; size; size--) {
- struct packet *pkt = cq_get(ep->empty_frame_Q);
+ struct packet *pkt = cq_get(&ep->empty_frame_Q);
kfree(pkt);
}
- cq_delete(ep->empty_frame_Q);
+ cq_delete(&ep->empty_frame_Q);
}
if (ep->dummy_packets_Q) {
- size = cq_howmany(ep->dummy_packets_Q);
+ size = cq_howmany(&ep->dummy_packets_Q);
for (; size; size--) {
- u8 *buff = cq_get(ep->dummy_packets_Q);
+ u8 *buff = cq_get(&ep->dummy_packets_Q);
kfree(buff);
}
- cq_delete(ep->dummy_packets_Q);
+ cq_delete(&ep->dummy_packets_Q);
}
kfree(ep);
ep->td_base = cpm_muram_addr(ep_offset);
/* zero all queue pointers */
- ep->conf_frame_Q = cq_new(ring_len + 2);
- ep->empty_frame_Q = cq_new(ring_len + 2);
- ep->dummy_packets_Q = cq_new(ring_len + 2);
- if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) {
+ if (cq_new(&ep->conf_frame_Q, ring_len + 2) ||
+ cq_new(&ep->empty_frame_Q, ring_len + 2) ||
+ cq_new(&ep->dummy_packets_Q, ring_len + 2)) {
err_for = "frame_queues";
goto err;
}
err_for = "buffer";
goto err;
}
- cq_put(ep->empty_frame_Q, pkt);
- cq_put(ep->dummy_packets_Q, buff);
+ cq_put(&ep->empty_frame_Q, pkt);
+ cq_put(&ep->dummy_packets_Q, buff);
}
/* we put the endpoint parameter RAM right behind the TD ring */
if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W))
continue;
- pkt = cq_get(ep->conf_frame_Q);
+ pkt = cq_get(&ep->conf_frame_Q);
if (!pkt)
fhci_err(usb->fhci, "no frame to confirm\n");
out_be16(&td->length, pkt->len);
/* put the frame to the confirmation queue */
- cq_put(ep->conf_frame_Q, pkt);
+ cq_put(&ep->conf_frame_Q, pkt);
- if (cq_howmany(ep->conf_frame_Q) == 1)
+ if (cq_howmany(&ep->conf_frame_Q) == 1)
out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO);
return 0;
struct usb_td __iomem *td_base; /* first TD in the ring */
struct usb_td __iomem *conf_td; /* next TD for confirm after transac */
struct usb_td __iomem *empty_td;/* next TD for new transaction req. */
- struct kfifo *empty_frame_Q; /* Empty frames list to use */
- struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */
- struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */
+ struct kfifo empty_frame_Q; /* Empty frames list to use */
+ struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */
+ struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */
bool already_pushed_dummy_bd;
};
}
/* fifo of pointers */
-static inline struct kfifo *cq_new(int size)
+static inline int cq_new(struct kfifo *fifo, int size)
{
- return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL);
+ return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL, NULL);
}
static inline void cq_delete(struct kfifo *kfifo)
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL,
- &port->lock);
- if (IS_ERR(port->write_fifo))
+ if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL,
+ &port->lock))
goto probe_error;
buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
port->bulk_out_size = buffer_size;
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
spinlock_t *lock; /* protects concurrent modifications */
};
-extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
- spinlock_t *lock);
+extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+ unsigned int size, spinlock_t *lock);
+extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size,
+ gfp_t gfp_mask, spinlock_t *lock);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int __kfifo_put(struct kfifo *fifo,
const unsigned char *buffer, unsigned int len);
#include <linux/mutex.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+#include <linux/kfifo.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_transport_iscsi.h>
};
struct iscsi_pool {
- struct kfifo *queue; /* FIFO Queue */
+ struct kfifo queue; /* FIFO Queue */
void **pool; /* Pool of elements */
int max; /* Max number of elements */
};
int data_offset;
struct iscsi_r2t_info *r2t; /* in progress solict R2T */
struct iscsi_pool r2tpool;
- struct kfifo *r2tqueue;
+ struct kfifo r2tqueue;
void *dd_data;
};
struct srp_queue {
void *pool;
void *items;
- struct kfifo *queue;
+ struct kfifo queue;
spinlock_t lock;
};
/*
- * A simple kernel FIFO implementation.
+ * A generic kernel FIFO implementation.
*
+ * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net>
* Copyright (C) 2004 Stelian Pop <stelian@popies.net>
*
* This program is free software; you can redistribute it and/or modify
#include <linux/kfifo.h>
#include <linux/log2.h>
+static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer,
+ unsigned int size, spinlock_t *lock)
+{
+ fifo->buffer = buffer;
+ fifo->size = size;
+ fifo->lock = lock;
+
+ kfifo_reset(fifo);
+}
+
/**
- * kfifo_init - allocates a new FIFO using a preallocated buffer
+ * kfifo_init - initialize a FIFO using a preallocated buffer
+ * @fifo: the fifo to assign the buffer
* @buffer: the preallocated buffer to be used.
* @size: the size of the internal buffer, this have to be a power of 2.
- * @gfp_mask: get_free_pages mask, passed to kmalloc()
* @lock: the lock to be used to protect the fifo buffer
*
- * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
- * &struct kfifo with kfree().
*/
-struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- gfp_t gfp_mask, spinlock_t *lock)
+void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size,
+ spinlock_t *lock)
{
- struct kfifo *fifo;
-
/* size must be a power of 2 */
BUG_ON(!is_power_of_2(size));
- fifo = kmalloc(sizeof(struct kfifo), gfp_mask);
- if (!fifo)
- return ERR_PTR(-ENOMEM);
-
- fifo->buffer = buffer;
- fifo->size = size;
- fifo->in = fifo->out = 0;
- fifo->lock = lock;
-
- return fifo;
+ _kfifo_init(fifo, buffer, size, lock);
}
EXPORT_SYMBOL(kfifo_init);
/**
- * kfifo_alloc - allocates a new FIFO and its internal buffer
- * @size: the size of the internal buffer to be allocated.
+ * kfifo_alloc - allocates a new FIFO internal buffer
+ * @fifo: the fifo to assign then new buffer
+ * @size: the size of the buffer to be allocated, this have to be a power of 2.
* @gfp_mask: get_free_pages mask, passed to kmalloc()
* @lock: the lock to be used to protect the fifo buffer
*
+ * This function dynamically allocates a new fifo internal buffer
+ *
* The size will be rounded-up to a power of 2.
+ * The buffer will be release with kfifo_free().
+ * Return 0 if no error, otherwise the an error code
*/
-struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
+int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask,
+ spinlock_t *lock)
{
unsigned char *buffer;
- struct kfifo *ret;
/*
* round up to the next power of 2, since our 'let the indices
}
buffer = kmalloc(size, gfp_mask);
- if (!buffer)
- return ERR_PTR(-ENOMEM);
-
- ret = kfifo_init(buffer, size, gfp_mask, lock);
+ if (!buffer) {
+ _kfifo_init(fifo, 0, 0, NULL);
+ return -ENOMEM;
+ }
- if (IS_ERR(ret))
- kfree(buffer);
+ _kfifo_init(fifo, buffer, size, lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(kfifo_alloc);
/**
- * kfifo_free - frees the FIFO
+ * kfifo_free - frees the FIFO internal buffer
* @fifo: the fifo to be freed.
*/
void kfifo_free(struct kfifo *fifo)
{
kfree(fifo->buffer);
- kfree(fifo);
}
EXPORT_SYMBOL(kfifo_free);
static const char procname[] = "dccpprobe";
static struct {
- struct kfifo *fifo;
+ struct kfifo fifo;
spinlock_t lock;
wait_queue_head_t wait;
struct timespec tstart;
len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
va_end(args);
- kfifo_put(dccpw.fifo, tbuf, len);
+ kfifo_put(&dccpw.fifo, tbuf, len);
wake_up(&dccpw.wait);
}
static int dccpprobe_open(struct inode *inode, struct file *file)
{
- kfifo_reset(dccpw.fifo);
+ kfifo_reset(&dccpw.fifo);
getnstimeofday(&dccpw.tstart);
return 0;
}
return -ENOMEM;
error = wait_event_interruptible(dccpw.wait,
- __kfifo_len(dccpw.fifo) != 0);
+ __kfifo_len(&dccpw.fifo) != 0);
if (error)
goto out_free;
- cnt = kfifo_get(dccpw.fifo, tbuf, len);
+ cnt = kfifo_get(&dccpw.fifo, tbuf, len);
error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
out_free:
init_waitqueue_head(&dccpw.wait);
spin_lock_init(&dccpw.lock);
- dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock);
- if (IS_ERR(dccpw.fifo))
- return PTR_ERR(dccpw.fifo);
-
+ if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL, &dccpw.lock))
+ return ret;
if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops))
goto err0;
err1:
proc_net_remove(&init_net, procname);
err0:
- kfifo_free(dccpw.fifo);
+ kfifo_free(&dccpw.fifo);
return ret;
}
module_init(dccpprobe_init);
static __exit void dccpprobe_exit(void)
{
- kfifo_free(dccpw.fifo);
+ kfifo_free(&dccpw.fifo);
proc_net_remove(&init_net, procname);
unregister_jprobe(&dccp_send_probe);