dax: guarantee page aligned results from bdev_direct_access()
If a ->direct_access() implementation ever returns a map count less than PAGE_SIZE, catch the error in bdev_direct_access(). This simplifies error checking in upper layers. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Reported-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
0e749e5424
commit
fe683adabf
|
@ -494,6 +494,8 @@ long bdev_direct_access(struct block_device *bdev, sector_t sector,
|
||||||
avail = ops->direct_access(bdev, sector, addr, pfn);
|
avail = ops->direct_access(bdev, sector, addr, pfn);
|
||||||
if (!avail)
|
if (!avail)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
|
if (avail > 0 && avail & ~PAGE_MASK)
|
||||||
|
return -ENXIO;
|
||||||
return min(avail, size);
|
return min(avail, size);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bdev_direct_access);
|
EXPORT_SYMBOL_GPL(bdev_direct_access);
|
||||||
|
|
1
fs/dax.c
1
fs/dax.c
|
@ -52,7 +52,6 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
|
||||||
sz = min_t(long, count, SZ_128K);
|
sz = min_t(long, count, SZ_128K);
|
||||||
clear_pmem(addr, sz);
|
clear_pmem(addr, sz);
|
||||||
size -= sz;
|
size -= sz;
|
||||||
BUG_ON(sz & 511);
|
|
||||||
sector += sz / 512;
|
sector += sz / 512;
|
||||||
cond_resched();
|
cond_resched();
|
||||||
} while (size);
|
} while (size);
|
||||||
|
|
Loading…
Reference in New Issue