hugetlb: fix i_blocks accounting
For administrative purpose, we want to query actual block usage for hugetlbfs file via fstat. Currently, hugetlbfs always return 0. Fix that up since kernel already has all the information to track it properly. Signed-off-by: Ken Chen <kenchen@google.com> Acked-by: Adam Litke <agl@us.ibm.com> Cc: Badari Pulavarty <pbadari@us.ibm.com> Cc: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8cde045c7e
commit
45c682a68a
|
@ -168,6 +168,8 @@ struct file *hugetlb_file_setup(const char *name, size_t);
|
|||
int hugetlb_get_quota(struct address_space *mapping, long delta);
|
||||
void hugetlb_put_quota(struct address_space *mapping, long delta);
|
||||
|
||||
#define BLOCKS_PER_HUGEPAGE (HPAGE_SIZE / 512)
|
||||
|
||||
static inline int is_file_hugepages(struct file *file)
|
||||
{
|
||||
if (file->f_op == &hugetlbfs_file_operations)
|
||||
|
|
10
mm/hugetlb.c
10
mm/hugetlb.c
|
@ -801,6 +801,7 @@ retry:
|
|||
|
||||
if (vma->vm_flags & VM_SHARED) {
|
||||
int err;
|
||||
struct inode *inode = mapping->host;
|
||||
|
||||
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
|
||||
if (err) {
|
||||
|
@ -809,6 +810,10 @@ retry:
|
|||
goto retry;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_blocks += BLOCKS_PER_HUGEPAGE;
|
||||
spin_unlock(&inode->i_lock);
|
||||
} else
|
||||
lock_page(page);
|
||||
}
|
||||
|
@ -1160,6 +1165,11 @@ int hugetlb_reserve_pages(struct inode *inode, long from, long to)
|
|||
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
|
||||
{
|
||||
long chg = region_truncate(&inode->i_mapping->private_list, offset);
|
||||
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_blocks -= BLOCKS_PER_HUGEPAGE * freed;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
hugetlb_put_quota(inode->i_mapping, (chg - freed));
|
||||
hugetlb_acct_memory(-(chg - freed));
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue