Commit 27359fd6 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Dan Williams

dax: Fix unlock mismatch with updated API

Internal to dax_unlock_mapping_entry(), dax_unlock_entry() is used to
store a replacement entry in the Xarray at the given xas-index with the
DAX_LOCKED bit clear. When called, dax_unlock_entry() expects the unlocked
value of the entry relative to the current Xarray state to be specified.

In most contexts dax_unlock_entry() is operating in the same scope as
the matched dax_lock_entry(). However, in the dax_unlock_mapping_entry()
case the implementation needs to recall the original entry. In the case
where the original entry is a 'pmd' entry it is possible that the pfn
performed to do the lookup is misaligned to the value retrieved in the

Change the api to return the unlock cookie from dax_lock_page() and pass
it to dax_unlock_page(). This fixes a bug where dax_unlock_page() was
assuming that the page was PMD-aligned if the entry was a PMD entry with
signatures like:

 WARNING: CPU: 38 PID: 1396 at fs/dax.c:340 dax_insert_entry+0x2b2/0x2d0
 RIP: 0010:dax_insert_entry+0x2b2/0x2d0
 Call Trace:
  ? up_read+0x1c/0xa0

Fixes: 9f32d221 ("dax: Convert dax_lock_mapping_entry to XArray")
Reported-by: default avatarDan Williams <>
Signed-off-by: default avatarMatthew Wilcox <>
Tested-by: default avatarDan Williams <>
Reviewed-by: default avatarJan Kara <>
Signed-off-by: default avatarDan Williams <>
parent 55e56f06
......@@ -379,20 +379,20 @@ static struct page *dax_busy_page(void *entry)
* @page: The page whose entry we want to lock
* Context: Process context.
* Return: %true if the entry was locked or does not need to be locked.
* Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
* not be locked.
bool dax_lock_mapping_entry(struct page *page)
dax_entry_t dax_lock_page(struct page *page)
XA_STATE(xas, NULL, 0);
void *entry;
bool locked;
/* Ensure page->mapping isn't freed while we look at it */
for (;;) {
struct address_space *mapping = READ_ONCE(page->mapping);
locked = false;
entry = NULL;
if (!mapping || !dax_mapping(mapping))
......@@ -403,7 +403,7 @@ bool dax_lock_mapping_entry(struct page *page)
* otherwise we would not have a valid pfn_to_page()
* translation.
locked = true;
entry = (void *)~0UL;
if (S_ISCHR(mapping->host->i_mode))
......@@ -426,23 +426,18 @@ bool dax_lock_mapping_entry(struct page *page)
return locked;
return (dax_entry_t)entry;
void dax_unlock_mapping_entry(struct page *page)
void dax_unlock_page(struct page *page, dax_entry_t cookie)
struct address_space *mapping = page->mapping;
XA_STATE(xas, &mapping->i_pages, page->index);
void *entry;
if (S_ISCHR(mapping->host->i_mode))
entry = xas_load(&xas);
entry = dax_make_entry(page_to_pfn_t(page), dax_is_pmd_entry(entry));
dax_unlock_entry(&xas, entry);
dax_unlock_entry(&xas, (void *)cookie);
......@@ -7,6 +7,8 @@
#include <linux/radix-tree.h>
#include <asm/pgtable.h>
typedef unsigned long dax_entry_t;
struct iomap_ops;
struct dax_device;
struct dax_operations {
......@@ -88,8 +90,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct block_device *bdev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
bool dax_lock_mapping_entry(struct page *page);
void dax_unlock_mapping_entry(struct page *page);
dax_entry_t dax_lock_page(struct page *page);
void dax_unlock_page(struct page *page, dax_entry_t cookie);
static inline bool bdev_dax_supported(struct block_device *bdev,
int blocksize)
......@@ -122,14 +124,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
static inline bool dax_lock_mapping_entry(struct page *page)
static inline dax_entry_t dax_lock_page(struct page *page)
if (IS_DAX(page->mapping->host))
return true;
return false;
return ~0UL;
return 0;
static inline void dax_unlock_mapping_entry(struct page *page)
static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
......@@ -1161,6 +1161,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
int rc = -EBUSY;
loff_t start;
dax_entry_t cookie;
* Prevent the inode from being freed while we are interrogating
......@@ -1169,7 +1170,8 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
* also prevents changes to the mapping of this pfn until
* poison signaling is complete.
if (!dax_lock_mapping_entry(page))
cookie = dax_lock_page(page);
if (!cookie)
goto out;
if (hwpoison_filter(page)) {
......@@ -1220,7 +1222,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
kill_procs(&tokill, flags & MF_MUST_KILL, !unmap_success, pfn, flags);
rc = 0;
dax_unlock_page(page, cookie);
/* drop pgmap ref acquired in caller */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment