Commit 9f32d221 authored by Matthew Wilcox's avatar Matthew Wilcox

dax: Convert dax_lock_mapping_entry to XArray

Instead of always retrying when we slept, only retry if the page has
moved.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
parent 9fc747f6
......@@ -99,6 +99,17 @@ static void *dax_make_locked(unsigned long pfn, unsigned long flags)
DAX_LOCKED);
}
static void *dax_make_entry(pfn_t pfn, unsigned long flags)
{
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
}
static void *dax_make_page_entry(struct page *page)
{
pfn_t pfn = page_to_pfn_t(page);
return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
}
static bool dax_is_locked(void *entry)
{
return xa_to_value(entry) & DAX_LOCKED;
......@@ -487,33 +498,16 @@ static struct page *dax_busy_page(void *entry)
return NULL;
}
static bool entry_wait_revalidate(void)
{
rcu_read_unlock();
schedule();
rcu_read_lock();
/*
* Tell __get_unlocked_mapping_entry() to take a break, we need
* to revalidate page->mapping after dropping locks
*/
return true;
}
bool dax_lock_mapping_entry(struct page *page)
{
pgoff_t index;
struct inode *inode;
bool did_lock = false;
void *entry = NULL, **slot;
struct address_space *mapping;
XA_STATE(xas, NULL, 0);
void *entry;
rcu_read_lock();
for (;;) {
mapping = READ_ONCE(page->mapping);
struct address_space *mapping = READ_ONCE(page->mapping);
if (!dax_mapping(mapping))
break;
return false;
/*
* In the device-dax case there's no need to lock, a
......@@ -522,47 +516,40 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
*/
inode = mapping->host;
if (S_ISCHR(inode->i_mode)) {
did_lock = true;
break;
}
if (S_ISCHR(mapping->host->i_mode))
return true;
xa_lock_irq(&mapping->i_pages);
xas.xa = &mapping->i_pages;
xas_lock_irq(&xas);
if (mapping != page->mapping) {
xa_unlock_irq(&mapping->i_pages);
xas_unlock_irq(&xas);
continue;
}
index = page->index;
entry = __get_unlocked_mapping_entry(mapping, index, &slot,
entry_wait_revalidate);
if (!entry) {
xa_unlock_irq(&mapping->i_pages);
break;
} else if (IS_ERR(entry)) {
WARN_ON_ONCE(PTR_ERR(entry) != -EAGAIN);
continue;
xas_set(&xas, page->index);
entry = xas_load(&xas);
if (dax_is_locked(entry)) {
entry = get_unlocked_entry(&xas);
/* Did the page move while we slept? */
if (dax_to_pfn(entry) != page_to_pfn(page)) {
xas_unlock_irq(&xas);
continue;
}
}
lock_slot(mapping, slot);
did_lock = true;
xa_unlock_irq(&mapping->i_pages);
break;
dax_lock_entry(&xas, entry);
xas_unlock_irq(&xas);
return true;
}
rcu_read_unlock();
return did_lock;
}
void dax_unlock_mapping_entry(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
XA_STATE(xas, &mapping->i_pages, page->index);
if (S_ISCHR(inode->i_mode))
if (S_ISCHR(mapping->host->i_mode))
return;
unlock_mapping_entry(mapping, page->index);
dax_unlock_entry(&xas, dax_make_page_entry(page));
}
/*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment