projects
/
platform
/
kernel
/
linux-rpi.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
scripts: mkbootimg_rpi4: Fix url path to tizen_7.0
[platform/kernel/linux-rpi.git]
/
fs
/
dax.c
diff --git
a/fs/dax.c
b/fs/dax.c
index
5b47834
..
d5d7b93
100644
(file)
--- a/
fs/dax.c
+++ b/
fs/dax.c
@@
-144,6
+144,16
@@
struct wait_exceptional_entry_queue {
struct exceptional_entry_key key;
};
struct exceptional_entry_key key;
};
+/**
+ * enum dax_wake_mode: waitqueue wakeup behaviour
+ * @WAKE_ALL: wake all waiters in the waitqueue
+ * @WAKE_NEXT: wake only the first waiter in the waitqueue
+ */
+enum dax_wake_mode {
+ WAKE_ALL,
+ WAKE_NEXT,
+};
+
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
void *entry, struct exceptional_entry_key *key)
{
static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
void *entry, struct exceptional_entry_key *key)
{
@@
-182,7
+192,8
@@
static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
* The important information it's conveying is whether the entry at
* this index used to be a PMD entry.
*/
-static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
+static void dax_wake_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
{
struct exceptional_entry_key key;
wait_queue_head_t *wq;
@@
-196,7
+207,7
@@
static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
* must be in the waitqueue and the following check will see them.
*/
if (waitqueue_active(wq))
* must be in the waitqueue and the following check will see them.
*/
if (waitqueue_active(wq))
- __wake_up(wq, TASK_NORMAL,
wake_all
? 0 : 1, &key);
+ __wake_up(wq, TASK_NORMAL,
mode == WAKE_ALL
? 0 : 1, &key);
}
/*
}
/*
@@
-264,11
+275,11
@@
static void wait_entry_unlocked(struct xa_state *xas, void *entry)
finish_wait(wq, &ewait.wait);
}
finish_wait(wq, &ewait.wait);
}
-static void put_unlocked_entry(struct xa_state *xas, void *entry)
+static void put_unlocked_entry(struct xa_state *xas, void *entry,
+ enum dax_wake_mode mode)
{
{
- /* If we were the only waiter woken, wake the next one */
if (entry && !dax_is_conflict(entry))
if (entry && !dax_is_conflict(entry))
- dax_wake_entry(xas, entry,
fals
e);
+ dax_wake_entry(xas, entry,
mod
e);
}
/*
}
/*
@@
-286,7
+297,7
@@
static void dax_unlock_entry(struct xa_state *xas, void *entry)
old = xas_store(xas, entry);
xas_unlock_irq(xas);
BUG_ON(!dax_is_locked(old));
old = xas_store(xas, entry);
xas_unlock_irq(xas);
BUG_ON(!dax_is_locked(old));
- dax_wake_entry(xas, entry,
false
);
+ dax_wake_entry(xas, entry,
WAKE_NEXT
);
}
/*
}
/*
@@
-477,10
+488,11
@@
static void *grab_mapping_entry(struct xa_state *xas,
struct address_space *mapping, unsigned int order)
{
unsigned long index = xas->xa_index;
struct address_space *mapping, unsigned int order)
{
unsigned long index = xas->xa_index;
- bool pmd_downgrade
= false;
/* splitting PMD entry into PTE entries? */
+ bool pmd_downgrade
;
/* splitting PMD entry into PTE entries? */
void *entry;
retry:
void *entry;
retry:
+ pmd_downgrade = false;
xas_lock_irq(xas);
entry = get_unlocked_entry(xas, order);
xas_lock_irq(xas);
entry = get_unlocked_entry(xas, order);
@@
-524,7
+536,7
@@
retry:
dax_disassociate_entry(entry, mapping, false);
xas_store(xas, NULL); /* undo the PMD join */
dax_disassociate_entry(entry, mapping, false);
xas_store(xas, NULL); /* undo the PMD join */
- dax_wake_entry(xas, entry,
true
);
+ dax_wake_entry(xas, entry,
WAKE_ALL
);
mapping->nrexceptional--;
entry = NULL;
xas_set(xas, index);
mapping->nrexceptional--;
entry = NULL;
xas_set(xas, index);
@@
-622,7
+634,7
@@
struct page *dax_layout_busy_page_range(struct address_space *mapping,
entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
entry = get_unlocked_entry(&xas, 0);
if (entry)
page = dax_busy_page(entry);
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry
, WAKE_NEXT
);
if (page)
break;
if (++scanned % XA_CHECK_SCHED)
if (page)
break;
if (++scanned % XA_CHECK_SCHED)
@@
-664,7
+676,7
@@
static int __dax_invalidate_entry(struct address_space *mapping,
mapping->nrexceptional--;
ret = 1;
out:
mapping->nrexceptional--;
ret = 1;
out:
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry
, WAKE_ALL
);
xas_unlock_irq(&xas);
return ret;
}
xas_unlock_irq(&xas);
return ret;
}
@@
-810,12
+822,12
@@
static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
address = pgoff_address(index, vma);
/*
address = pgoff_address(index, vma);
/*
- *
Note because we provide range to follow_pte_pmd it wi
ll
- *
call mmu_notifier_invalidate_range_start() on our behalf
- *
before
taking any lock.
+ *
follow_invalidate_pte() will use the range to ca
ll
+ *
mmu_notifier_invalidate_range_start() on our behalf before
+ * taking any lock.
*/
*/
- if (follow_
pte_pmd(vma->vm_mm, address, &range
,
-
&ptep,
&pmdp, &ptl))
+ if (follow_
invalidate_pte(vma->vm_mm, address, &range, &ptep
,
+
&pmdp, &ptl))
continue;
/*
continue;
/*
@@
-937,13
+949,13
@@
static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
xas_lock_irq(xas);
xas_store(xas, entry);
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
xas_lock_irq(xas);
xas_store(xas, entry);
xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
- dax_wake_entry(xas, entry,
false
);
+ dax_wake_entry(xas, entry,
WAKE_NEXT
);
trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
trace_dax_writeback_one(mapping->host, index, count);
return ret;
put_unlocked:
- put_unlocked_entry(xas, entry);
+ put_unlocked_entry(xas, entry
, WAKE_NEXT
);
return ret;
}
return ret;
}
@@
-1684,7
+1696,7
@@
dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
/* Did we race with someone splitting entry or so? */
if (!entry || dax_is_conflict(entry) ||
(order == 0 && !dax_is_pte_entry(entry))) {
- put_unlocked_entry(&xas, entry);
+ put_unlocked_entry(&xas, entry
, WAKE_NEXT
);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
VM_FAULT_NOPAGE);
xas_unlock_irq(&xas);
trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
VM_FAULT_NOPAGE);