[FIX] Buffer: disabling irqs in the same func
authorAlexander Aksenov <a.aksenov@samsung.com>
Wed, 2 Apr 2014 07:25:06 +0000 (11:25 +0400)
committerDmitry Kovalenko <d.kovalenko@samsung.com>
Mon, 7 Apr 2014 05:50:34 +0000 (22:50 -0700)
Now subbuffer spinlock and irqs disabling for
looking for this subbuffer are separated

Change-Id: I0018b3739a06905f1321287fe5ee7ccf1645d7d2
Signed-off-by: Alexander Aksenov <a.aksenov@samsung.com>
buffer/buffer_queue.c
buffer/kernel_operations.h
buffer/swap_buffer_module.c

index 8744b21..2b2f6ec 100644 (file)
@@ -424,7 +424,7 @@ struct swap_subbuffer *get_from_write_list(size_t size, void **ptr_to_write)
                        write_queue.start_ptr->full_buffer_part += size;
 
                        /* Lock rw sync. Should be unlocked in swap_buffer_write() */
-                       sync_lock(&result->buffer_sync);
+                       sync_lock_no_flags(&result->buffer_sync);
                        break;
                /* This subbuffer is not enough => it goes to read list */
                } else {
index 9327c1e..feb544b 100644 (file)
@@ -62,18 +62,43 @@ static inline void sync_init(struct sync_t *buffer_sync)
        spin_lock_init(&buffer_sync->spinlock);
 }
 
-/* Lock spinlock */
+/* Lock spinlock with save flags */
 static inline void sync_lock(struct sync_t *buffer_sync)
 {
        spin_lock_irqsave(&buffer_sync->spinlock, buffer_sync->flags);
 }
 
-/* Unlock spinlock */
+/* Unlock spinlock with restore flags */
 static inline void sync_unlock(struct sync_t *buffer_sync)
 {
        spin_unlock_irqrestore(&buffer_sync->spinlock, buffer_sync->flags);
 }
 
+/* Lock spinlock */
+static inline void sync_lock_no_flags(struct sync_t *buffer_sync)
+{
+       spin_lock(&buffer_sync->spinlock);
+}
+
+/* Unlock spinlock */
+static inline void sync_unlock_no_flags(struct sync_t *buffer_sync)
+{
+       spin_unlock(&buffer_sync->spinlock);
+}
+
+/* Disable preemption and irqs */
+static inline void swap_irq_disable(unsigned long *flags)
+{
+       preempt_disable();
+       local_irq_save(*flags);
+}
+
+/* Enable preemption and irqs */
+static inline void swap_irq_enable(unsigned long *flags)
+{
+       local_irq_restore(*flags);
+       preempt_enable();
+}
 
 /* SWAP SUBBUFER */
 
index fb0f72b..211601a 100644 (file)
@@ -170,6 +170,7 @@ ssize_t swap_buffer_write(void *data, size_t size)
        int result = E_SB_SUCCESS;
        struct swap_subbuffer *buffer_to_write = NULL;
        void *ptr_to_write = NULL;
+       unsigned long flags = 0;
 
        /* Size sanitization */
        if ((size > subbuffers_size) || (size == 0))
@@ -179,10 +180,15 @@ ssize_t swap_buffer_write(void *data, size_t size)
        if (!(swap_buffer_status & BUFFER_WORK))
                return -E_SB_IS_STOPPED;
 
+       /* We're going to look for writable buffer, so disable irqs */
+       swap_irq_disable(&flags);
+
        /* Get next write buffer and occupying semaphore */
        buffer_to_write = get_from_write_list(size, &ptr_to_write);
-       if (!buffer_to_write)
+       if (!buffer_to_write) {
+               swap_irq_enable(&flags);
                return -E_SB_NO_WRITABLE_BUFFERS;
+       }
 
        /* Check for overlapping */
        if (areas_overlap(ptr_to_write, data, size)) {
@@ -204,9 +210,10 @@ ssize_t swap_buffer_write(void *data, size_t size)
                        low_mem_cb();
        }
 
-       /* Unlock sync (Locked in get_from_write_list()) */
+       /* Unlock sync (Locked in get_from_write_list()) and enable irqs */
 buf_write_sem_post:
-       sync_unlock(&buffer_to_write->buffer_sync);
+       sync_unlock_no_flags(&buffer_to_write->buffer_sync);
+       swap_irq_enable(&flags);
 
        return result;
 }