struct queue {
struct swap_subbuffer *start_ptr;
struct swap_subbuffer *end_ptr;
- sync_t queue_sync;
+ struct sync_t queue_sync;
};
/* Write queue */
struct queue write_queue = {
.start_ptr = NULL,
- .end_ptr = NULL
+ .end_ptr = NULL,
+ .queue_sync = {
+ .flags = 0x0
+ }
};
/* Read queue */
struct queue read_queue = {
.start_ptr = NULL,
- .end_ptr = NULL
+ .end_ptr = NULL,
+ .queue_sync = {
+ .flags = 0x0
+ }
};
/* Pointers array. Points to busy buffers */
-static struct swap_buffer **queue_busy = NULL;
+static struct swap_subbuffer **queue_busy = NULL;
/* Store last busy element */
static unsigned int queue_busy_last_element;
static size_t queue_subbuffer_size = 0;
/* Busy list sync */
-static sync_t buffer_busy_sync;
+static struct sync_t buffer_busy_sync = {
+ .flags = 0x0
+};
/* Memory pages count in one subbuffer */
static int pages_order_in_subbuffer = 0;
/* Callbacks are called at the end of the function to prevent deadlocks */
struct queue callback_queue = {
.start_ptr = NULL,
- .end_ptr = NULL
+ .end_ptr = NULL,
+ .queue_sync = {
+ .flags = 0x0
+ }
};
struct swap_subbuffer *tmp_buffer = NULL;
break;
/* This subbuffer is not enough => it goes to read list */
} else {
-
result = write_queue.start_ptr;
/* If we reached end of the list */
#include <linux/mm.h>
+
+/* MESSAGES */
+#define print_debug(msg, args...) \
+ printk(KERN_DEBUG "SWAP_BUFFER DEBUG : " msg, ##args)
+#define print_msg(msg, args...) \
+ printk(KERN_INFO "SWAP_BUFFER : " msg, ##args)
+#define print_warn(msg, args...) \
+ printk(KERN_WARNING "SWAP_BUFFER WARNING : " msg, ##args)
+#define print_err(msg, args...) \
+ printk(KERN_ERR "SWAP_BUFFER ERROR : " msg, ##args)
+#define print_crit(msg, args...) \
+ printk(KERN_CRIT "SWAP_BUFFER CRITICAL : " msg, ##args)
+
+
+
+
/* LOCKS */
/* Using spinlocks as sync primitives */
-typedef spinlock_t sync_t;
-
-/* Spinlock flags */
-static unsigned long flags;
+struct sync_t {
+ spinlock_t spinlock;
+ unsigned long flags;
+};
/* Spinlocks initialization */
-static inline void sync_init(sync_t *buffer_sync)
+static inline void sync_init(struct sync_t *buffer_sync)
{
- spin_lock_init(buffer_sync);
+ spin_lock_init(&buffer_sync->spinlock);
}
/* Lock spinlock */
-static inline void sync_lock(sync_t *buffer_sync)
+static inline void sync_lock(struct sync_t *buffer_sync)
{
- spin_lock_irqsave(buffer_sync, flags);
+ spin_lock_irqsave(&buffer_sync->spinlock, buffer_sync->flags);
}
/* Unlock spinlock */
-static inline void sync_unlock(sync_t *buffer_sync)
+static inline void sync_unlock(struct sync_t *buffer_sync)
{
- spin_unlock_irqrestore(buffer_sync, flags);
+ spin_unlock_irqrestore(&buffer_sync->spinlock, buffer_sync->flags);
}
return nearest_power_of_two(aligned_size / PAGE_SIZE);
}
-
-/* MESSAGES */
-#define print_debug(msg, args...) \
- printk(KERN_DEBUG "SWAP_BUFFER DEBUG : " msg, ##args)
-#define print_msg(msg, args...) \
- printk(KERN_INFO "SWAP_BUFFER : " msg, ##args)
-#define print_warn(msg, args...) \
- printk(KERN_WARNING "SWAP_BUFFER WARNING : " msg, ##args)
-#define print_err(msg, args...) \
- printk(KERN_ERR "SWAP_BUFFER ERROR : " msg, ##args)
-#define print_crit(msg, args...) \
- printk(KERN_CRIT "SWAP_BUFFER CRITICAL : " msg, ##args)
-
#endif /* __KERNEL_OPERATIONS_H__ */