#define _vmm_raw_spin_lock(x) do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
+typedef struct {
+ volatile unsigned int lock;
+} vmm_spinlock_t;
#define _vmm_raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
#define _vmm_raw_spin_unlock(x) \
do { barrier(); \
- ((spinlock_t *)x)->raw_lock.lock = 0; } \
+ ((vmm_spinlock_t *)x)->lock = 0; } \
while (0)
#endif
-void vmm_spin_lock(spinlock_t *lock);
-void vmm_spin_unlock(spinlock_t *lock);
+void vmm_spin_lock(vmm_spinlock_t *lock);
+void vmm_spin_unlock(vmm_spinlock_t *lock);
enum {
I_TLB = 1,
D_TLB = 2
return ;
}
-void vmm_spin_lock(spinlock_t *lock)
+void vmm_spin_lock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_lock(lock);
}
-void vmm_spin_unlock(spinlock_t *lock)
+void vmm_spin_unlock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_unlock(lock);
}
{
u64 i, dirty_pages = 1;
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
- spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+ vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;