Commit fda490d3 authored by Matthew Wilcox's avatar Matthew Wilcox

dax: Fix dax_unlock_mapping_entry for PMD pages

Device DAX PMD pages do not set the PageHead bit for compound pages.
Fix for now by retrieving the PMD bit from the entry, but eventually we
will be passed the page size by the caller.
Reported-by: default avatarDan Williams <>
Fixes: 9f32d221 ("dax: Convert dax_lock_mapping_entry to XArray")
Signed-off-by: default avatarMatthew Wilcox <>
parent c5bbd451
......@@ -98,12 +98,6 @@ static void *dax_make_entry(pfn_t pfn, unsigned long flags)
return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
static void *dax_make_page_entry(struct page *page)
pfn_t pfn = page_to_pfn_t(page);
return dax_make_entry(pfn, PageHead(page) ? DAX_PMD : 0);
static bool dax_is_locked(void *entry)
return xa_to_value(entry) & DAX_LOCKED;
......@@ -116,12 +110,12 @@ static unsigned int dax_entry_order(void *entry)
return 0;
static int dax_is_pmd_entry(void *entry)
static unsigned long dax_is_pmd_entry(void *entry)
return xa_to_value(entry) & DAX_PMD;
static int dax_is_pte_entry(void *entry)
static bool dax_is_pte_entry(void *entry)
return !(xa_to_value(entry) & DAX_PMD);
......@@ -413,11 +407,16 @@ void dax_unlock_mapping_entry(struct page *page)
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
void *entry;
if (S_ISCHR(mapping->host->i_mode))
dax_unlock_entry(&xas, dax_make_page_entry(page));
entry = xas_load(&xas);
entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
dax_unlock_entry(&xas, entry);
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment