fs: export inode_to_bdi and use it in favor of mapping->backing_dev_info
Now that we got rid of the bdi abuse on character devices we can always use sb->s_bdi to get at the backing_dev_info for a file, except for the block device special case. Export inode_to_bdi and replace uses of mapping->backing_dev_info with it to prepare for the removal of mapping->backing_dev_info. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Tejun Heo <tj@kernel.org> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
26ff13047e
commit
de1414a654
|
@ -1746,7 +1746,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
|
||||||
|
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
|
|
||||||
current->backing_dev_info = inode->i_mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (err) {
|
if (err) {
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
|
|
|
@ -945,7 +945,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = file->f_mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
|
|
||||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -170,7 +170,7 @@ static void ext2_preread_inode(struct inode *inode)
|
||||||
struct ext2_group_desc * gdp;
|
struct ext2_group_desc * gdp;
|
||||||
struct backing_dev_info *bdi;
|
struct backing_dev_info *bdi;
|
||||||
|
|
||||||
bdi = inode->i_mapping->backing_dev_info;
|
bdi = inode_to_bdi(inode);
|
||||||
if (bdi_read_congested(bdi))
|
if (bdi_read_congested(bdi))
|
||||||
return;
|
return;
|
||||||
if (bdi_write_congested(bdi))
|
if (bdi_write_congested(bdi))
|
||||||
|
|
|
@ -334,7 +334,7 @@ static void save_error_info(struct super_block *sb, const char *func,
|
||||||
static int block_device_ejected(struct super_block *sb)
|
static int block_device_ejected(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct inode *bd_inode = sb->s_bdev->bd_inode;
|
struct inode *bd_inode = sb->s_bdev->bd_inode;
|
||||||
struct backing_dev_info *bdi = bd_inode->i_mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(bd_inode);
|
||||||
|
|
||||||
return bdi->dev == NULL;
|
return bdi->dev == NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,7 +66,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(writeback_in_progress);
|
EXPORT_SYMBOL(writeback_in_progress);
|
||||||
|
|
||||||
static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
||||||
{
|
{
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
#ifdef CONFIG_BLOCK
|
#ifdef CONFIG_BLOCK
|
||||||
|
@ -75,6 +75,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
|
||||||
#endif
|
#endif
|
||||||
return sb->s_bdi;
|
return sb->s_bdi;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(inode_to_bdi);
|
||||||
|
|
||||||
static inline struct inode *wb_inode(struct list_head *head)
|
static inline struct inode *wb_inode(struct list_head *head)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1159,7 +1159,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||||
mutex_lock(&inode->i_mutex);
|
mutex_lock(&inode->i_mutex);
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
|
|
||||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (err)
|
if (err)
|
||||||
|
@ -1464,7 +1464,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
|
||||||
{
|
{
|
||||||
struct inode *inode = req->inode;
|
struct inode *inode = req->inode;
|
||||||
struct fuse_inode *fi = get_fuse_inode(inode);
|
struct fuse_inode *fi = get_fuse_inode(inode);
|
||||||
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
list_del(&req->writepages_entry);
|
list_del(&req->writepages_entry);
|
||||||
|
@ -1658,7 +1658,7 @@ static int fuse_writepage_locked(struct page *page)
|
||||||
req->end = fuse_writepage_end;
|
req->end = fuse_writepage_end;
|
||||||
req->inode = inode;
|
req->inode = inode;
|
||||||
|
|
||||||
inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
|
inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
|
||||||
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
||||||
|
|
||||||
spin_lock(&fc->lock);
|
spin_lock(&fc->lock);
|
||||||
|
@ -1768,7 +1768,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
|
||||||
|
|
||||||
if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
|
if (old_req->num_pages == 1 && (old_req->state == FUSE_REQ_INIT ||
|
||||||
old_req->state == FUSE_REQ_PENDING)) {
|
old_req->state == FUSE_REQ_PENDING)) {
|
||||||
struct backing_dev_info *bdi = page->mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
|
||||||
|
|
||||||
copy_highpage(old_req->pages[0], page);
|
copy_highpage(old_req->pages[0], page);
|
||||||
spin_unlock(&fc->lock);
|
spin_unlock(&fc->lock);
|
||||||
|
@ -1872,7 +1872,7 @@ static int fuse_writepages_fill(struct page *page,
|
||||||
req->page_descs[req->num_pages].offset = 0;
|
req->page_descs[req->num_pages].offset = 0;
|
||||||
req->page_descs[req->num_pages].length = PAGE_SIZE;
|
req->page_descs[req->num_pages].length = PAGE_SIZE;
|
||||||
|
|
||||||
inc_bdi_stat(page->mapping->backing_dev_info, BDI_WRITEBACK);
|
inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK);
|
||||||
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
|
||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
|
|
|
@ -289,7 +289,7 @@ continue_unlock:
|
||||||
if (!clear_page_dirty_for_io(page))
|
if (!clear_page_dirty_for_io(page))
|
||||||
goto continue_unlock;
|
goto continue_unlock;
|
||||||
|
|
||||||
trace_wbc_writepage(wbc, mapping->backing_dev_info);
|
trace_wbc_writepage(wbc, inode_to_bdi(inode));
|
||||||
|
|
||||||
ret = __gfs2_jdata_writepage(page, wbc);
|
ret = __gfs2_jdata_writepage(page, wbc);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
|
|
|
@ -743,7 +743,7 @@ static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
|
||||||
struct gfs2_inode *ip = GFS2_I(inode);
|
struct gfs2_inode *ip = GFS2_I(inode);
|
||||||
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
||||||
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
|
struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
|
||||||
struct backing_dev_info *bdi = metamapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (wbc->sync_mode == WB_SYNC_ALL)
|
if (wbc->sync_mode == WB_SYNC_ALL)
|
||||||
|
|
|
@ -1081,7 +1081,7 @@ mds_commit:
|
||||||
spin_unlock(cinfo->lock);
|
spin_unlock(cinfo->lock);
|
||||||
if (!cinfo->dreq) {
|
if (!cinfo->dreq) {
|
||||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||||
inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||||
BDI_RECLAIMABLE);
|
BDI_RECLAIMABLE);
|
||||||
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
||||||
I_DIRTY_DATASYNC);
|
I_DIRTY_DATASYNC);
|
||||||
|
|
|
@ -786,7 +786,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
|
||||||
spin_unlock(cinfo->lock);
|
spin_unlock(cinfo->lock);
|
||||||
if (!cinfo->dreq) {
|
if (!cinfo->dreq) {
|
||||||
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||||
inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||||
BDI_RECLAIMABLE);
|
BDI_RECLAIMABLE);
|
||||||
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
__mark_inode_dirty(req->wb_context->dentry->d_inode,
|
||||||
I_DIRTY_DATASYNC);
|
I_DIRTY_DATASYNC);
|
||||||
|
@ -853,7 +853,7 @@ static void
|
||||||
nfs_clear_page_commit(struct page *page)
|
nfs_clear_page_commit(struct page *page)
|
||||||
{
|
{
|
||||||
dec_zone_page_state(page, NR_UNSTABLE_NFS);
|
dec_zone_page_state(page, NR_UNSTABLE_NFS);
|
||||||
dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE);
|
dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Called holding inode (/cinfo) lock */
|
/* Called holding inode (/cinfo) lock */
|
||||||
|
@ -1564,7 +1564,7 @@ void nfs_retry_commit(struct list_head *page_list,
|
||||||
nfs_mark_request_commit(req, lseg, cinfo);
|
nfs_mark_request_commit(req, lseg, cinfo);
|
||||||
if (!cinfo->dreq) {
|
if (!cinfo->dreq) {
|
||||||
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
|
||||||
dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info,
|
dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
|
||||||
BDI_RECLAIMABLE);
|
BDI_RECLAIMABLE);
|
||||||
}
|
}
|
||||||
nfs_unlock_and_release_request(req);
|
nfs_unlock_and_release_request(req);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
|
@ -2091,7 +2092,7 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
|
||||||
count = iov_length(iov, nr_segs);
|
count = iov_length(iov, nr_segs);
|
||||||
pos = *ppos;
|
pos = *ppos;
|
||||||
/* We can write back this queue in page reclaim. */
|
/* We can write back this queue in page reclaim. */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
written = 0;
|
written = 0;
|
||||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -2363,7 +2363,7 @@ relock:
|
||||||
goto out_dio;
|
goto out_dio;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
current->backing_dev_info = file->f_mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
written = generic_perform_write(file, from, *ppos);
|
written = generic_perform_write(file, from, *ppos);
|
||||||
if (likely(written >= 0))
|
if (likely(written >= 0))
|
||||||
iocb->ki_pos = *ppos + written;
|
iocb->ki_pos = *ppos + written;
|
||||||
|
|
|
@ -699,7 +699,7 @@ xfs_file_buffered_aio_write(
|
||||||
|
|
||||||
iov_iter_truncate(from, count);
|
iov_iter_truncate(from, count);
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
|
|
||||||
write_retry:
|
write_retry:
|
||||||
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
|
trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
|
||||||
|
|
|
@ -106,6 +106,8 @@ struct backing_dev_info {
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct backing_dev_info *inode_to_bdi(struct inode *inode);
|
||||||
|
|
||||||
int __must_check bdi_init(struct backing_dev_info *bdi);
|
int __must_check bdi_init(struct backing_dev_info *bdi);
|
||||||
void bdi_destroy(struct backing_dev_info *bdi);
|
void bdi_destroy(struct backing_dev_info *bdi);
|
||||||
|
|
||||||
|
@ -303,12 +305,12 @@ static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
|
||||||
|
|
||||||
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
|
static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
|
||||||
{
|
{
|
||||||
return bdi_cap_writeback_dirty(mapping->backing_dev_info);
|
return bdi_cap_writeback_dirty(inode_to_bdi(mapping->host));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
|
static inline bool mapping_cap_account_dirty(struct address_space *mapping)
|
||||||
{
|
{
|
||||||
return bdi_cap_account_dirty(mapping->backing_dev_info);
|
return bdi_cap_account_dirty(inode_to_bdi(mapping->host));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int bdi_sched_wait(void *word)
|
static inline int bdi_sched_wait(void *word)
|
||||||
|
|
|
@ -47,7 +47,7 @@ TRACE_EVENT(writeback_dirty_page,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
strncpy(__entry->name,
|
strncpy(__entry->name,
|
||||||
mapping ? dev_name(mapping->backing_dev_info->dev) : "(unknown)", 32);
|
mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)", 32);
|
||||||
__entry->ino = mapping ? mapping->host->i_ino : 0;
|
__entry->ino = mapping ? mapping->host->i_ino : 0;
|
||||||
__entry->index = page->index;
|
__entry->index = page->index;
|
||||||
),
|
),
|
||||||
|
@ -72,7 +72,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(inode);
|
||||||
|
|
||||||
/* may be called for files on pseudo FSes w/ unregistered bdi */
|
/* may be called for files on pseudo FSes w/ unregistered bdi */
|
||||||
strncpy(__entry->name,
|
strncpy(__entry->name,
|
||||||
|
@ -116,7 +116,7 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
strncpy(__entry->name,
|
strncpy(__entry->name,
|
||||||
dev_name(inode->i_mapping->backing_dev_info->dev), 32);
|
dev_name(inode_to_bdi(inode)->dev), 32);
|
||||||
__entry->ino = inode->i_ino;
|
__entry->ino = inode->i_ino;
|
||||||
__entry->sync_mode = wbc->sync_mode;
|
__entry->sync_mode = wbc->sync_mode;
|
||||||
),
|
),
|
||||||
|
|
|
@ -73,7 +73,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
|
||||||
else
|
else
|
||||||
endbyte--; /* inclusive */
|
endbyte--; /* inclusive */
|
||||||
|
|
||||||
bdi = mapping->backing_dev_info;
|
bdi = inode_to_bdi(mapping->host);
|
||||||
|
|
||||||
switch (advice) {
|
switch (advice) {
|
||||||
case POSIX_FADV_NORMAL:
|
case POSIX_FADV_NORMAL:
|
||||||
|
@ -113,7 +113,7 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
|
||||||
case POSIX_FADV_NOREUSE:
|
case POSIX_FADV_NOREUSE:
|
||||||
break;
|
break;
|
||||||
case POSIX_FADV_DONTNEED:
|
case POSIX_FADV_DONTNEED:
|
||||||
if (!bdi_write_congested(mapping->backing_dev_info))
|
if (!bdi_write_congested(bdi))
|
||||||
__filemap_fdatawrite_range(mapping, offset, endbyte,
|
__filemap_fdatawrite_range(mapping, offset, endbyte,
|
||||||
WB_SYNC_NONE);
|
WB_SYNC_NONE);
|
||||||
|
|
||||||
|
|
|
@ -211,7 +211,7 @@ void __delete_from_page_cache(struct page *page, void *shadow)
|
||||||
*/
|
*/
|
||||||
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
|
if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
|
||||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||||
dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2565,7 +2565,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||||
size_t count = iov_iter_count(from);
|
size_t count = iov_iter_count(from);
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -9,6 +9,7 @@
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
|
@ -410,7 +411,7 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
|
||||||
count = len;
|
count = len;
|
||||||
|
|
||||||
/* We can write back this queue in page reclaim */
|
/* We can write back this queue in page reclaim */
|
||||||
current->backing_dev_info = mapping->backing_dev_info;
|
current->backing_dev_info = inode_to_bdi(inode);
|
||||||
|
|
||||||
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
|
ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
|
||||||
if (ret)
|
if (ret)
|
||||||
|
|
|
@ -1351,7 +1351,7 @@ static void balance_dirty_pages(struct address_space *mapping,
|
||||||
unsigned long task_ratelimit;
|
unsigned long task_ratelimit;
|
||||||
unsigned long dirty_ratelimit;
|
unsigned long dirty_ratelimit;
|
||||||
unsigned long pos_ratio;
|
unsigned long pos_ratio;
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
|
bool strictlimit = bdi->capabilities & BDI_CAP_STRICTLIMIT;
|
||||||
unsigned long start_time = jiffies;
|
unsigned long start_time = jiffies;
|
||||||
|
|
||||||
|
@ -1574,7 +1574,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
|
||||||
*/
|
*/
|
||||||
void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
void balance_dirty_pages_ratelimited(struct address_space *mapping)
|
||||||
{
|
{
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
int ratelimit;
|
int ratelimit;
|
||||||
int *p;
|
int *p;
|
||||||
|
|
||||||
|
@ -1929,7 +1929,7 @@ continue_unlock:
|
||||||
if (!clear_page_dirty_for_io(page))
|
if (!clear_page_dirty_for_io(page))
|
||||||
goto continue_unlock;
|
goto continue_unlock;
|
||||||
|
|
||||||
trace_wbc_writepage(wbc, mapping->backing_dev_info);
|
trace_wbc_writepage(wbc, inode_to_bdi(mapping->host));
|
||||||
ret = (*writepage)(page, wbc, data);
|
ret = (*writepage)(page, wbc, data);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret)) {
|
||||||
if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
||||||
|
@ -2094,10 +2094,12 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
|
||||||
trace_writeback_dirty_page(page, mapping);
|
trace_writeback_dirty_page(page, mapping);
|
||||||
|
|
||||||
if (mapping_cap_account_dirty(mapping)) {
|
if (mapping_cap_account_dirty(mapping)) {
|
||||||
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
|
|
||||||
__inc_zone_page_state(page, NR_FILE_DIRTY);
|
__inc_zone_page_state(page, NR_FILE_DIRTY);
|
||||||
__inc_zone_page_state(page, NR_DIRTIED);
|
__inc_zone_page_state(page, NR_DIRTIED);
|
||||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
|
__inc_bdi_stat(bdi, BDI_RECLAIMABLE);
|
||||||
__inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
__inc_bdi_stat(bdi, BDI_DIRTIED);
|
||||||
task_io_account_write(PAGE_CACHE_SIZE);
|
task_io_account_write(PAGE_CACHE_SIZE);
|
||||||
current->nr_dirtied++;
|
current->nr_dirtied++;
|
||||||
this_cpu_inc(bdp_ratelimits);
|
this_cpu_inc(bdp_ratelimits);
|
||||||
|
@ -2156,7 +2158,7 @@ void account_page_redirty(struct page *page)
|
||||||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||||
current->nr_dirtied--;
|
current->nr_dirtied--;
|
||||||
dec_zone_page_state(page, NR_DIRTIED);
|
dec_zone_page_state(page, NR_DIRTIED);
|
||||||
dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
|
dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(account_page_redirty);
|
EXPORT_SYMBOL(account_page_redirty);
|
||||||
|
@ -2295,7 +2297,7 @@ int clear_page_dirty_for_io(struct page *page)
|
||||||
*/
|
*/
|
||||||
if (TestClearPageDirty(page)) {
|
if (TestClearPageDirty(page)) {
|
||||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||||
dec_bdi_stat(mapping->backing_dev_info,
|
dec_bdi_stat(inode_to_bdi(mapping->host),
|
||||||
BDI_RECLAIMABLE);
|
BDI_RECLAIMABLE);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -2315,7 +2317,7 @@ int test_clear_page_writeback(struct page *page)
|
||||||
|
|
||||||
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||||
|
@ -2352,7 +2354,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
|
||||||
|
|
||||||
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
|
||||||
if (mapping) {
|
if (mapping) {
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
struct backing_dev_info *bdi = inode_to_bdi(mapping->host);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(&mapping->tree_lock, flags);
|
spin_lock_irqsave(&mapping->tree_lock, flags);
|
||||||
|
@ -2406,12 +2408,7 @@ EXPORT_SYMBOL(mapping_tagged);
|
||||||
*/
|
*/
|
||||||
void wait_for_stable_page(struct page *page)
|
void wait_for_stable_page(struct page *page)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = page_mapping(page);
|
if (bdi_cap_stable_pages_required(inode_to_bdi(page->mapping->host)))
|
||||||
struct backing_dev_info *bdi = mapping->backing_dev_info;
|
wait_on_page_writeback(page);
|
||||||
|
|
||||||
if (!bdi_cap_stable_pages_required(bdi))
|
|
||||||
return;
|
|
||||||
|
|
||||||
wait_on_page_writeback(page);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
EXPORT_SYMBOL_GPL(wait_for_stable_page);
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
void
|
void
|
||||||
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
|
||||||
{
|
{
|
||||||
ra->ra_pages = mapping->backing_dev_info->ra_pages;
|
ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages;
|
||||||
ra->prev_pos = -1;
|
ra->prev_pos = -1;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
EXPORT_SYMBOL_GPL(file_ra_state_init);
|
||||||
|
@ -541,7 +541,7 @@ page_cache_async_readahead(struct address_space *mapping,
|
||||||
/*
|
/*
|
||||||
* Defer asynchronous read-ahead on IO congestion.
|
* Defer asynchronous read-ahead on IO congestion.
|
||||||
*/
|
*/
|
||||||
if (bdi_read_congested(mapping->backing_dev_info))
|
if (bdi_read_congested(inode_to_bdi(mapping->host)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* do read-ahead */
|
/* do read-ahead */
|
||||||
|
|
|
@ -112,7 +112,7 @@ void cancel_dirty_page(struct page *page, unsigned int account_size)
|
||||||
struct address_space *mapping = page->mapping;
|
struct address_space *mapping = page->mapping;
|
||||||
if (mapping && mapping_cap_account_dirty(mapping)) {
|
if (mapping && mapping_cap_account_dirty(mapping)) {
|
||||||
dec_zone_page_state(page, NR_FILE_DIRTY);
|
dec_zone_page_state(page, NR_FILE_DIRTY);
|
||||||
dec_bdi_stat(mapping->backing_dev_info,
|
dec_bdi_stat(inode_to_bdi(mapping->host),
|
||||||
BDI_RECLAIMABLE);
|
BDI_RECLAIMABLE);
|
||||||
if (account_size)
|
if (account_size)
|
||||||
task_io_account_cancelled_write(account_size);
|
task_io_account_cancelled_write(account_size);
|
||||||
|
|
|
@ -497,7 +497,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
|
||||||
}
|
}
|
||||||
if (mapping->a_ops->writepage == NULL)
|
if (mapping->a_ops->writepage == NULL)
|
||||||
return PAGE_ACTIVATE;
|
return PAGE_ACTIVATE;
|
||||||
if (!may_write_to_queue(mapping->backing_dev_info, sc))
|
if (!may_write_to_queue(inode_to_bdi(mapping->host), sc))
|
||||||
return PAGE_KEEP;
|
return PAGE_KEEP;
|
||||||
|
|
||||||
if (clear_page_dirty_for_io(page)) {
|
if (clear_page_dirty_for_io(page)) {
|
||||||
|
@ -876,7 +876,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
|
||||||
*/
|
*/
|
||||||
mapping = page_mapping(page);
|
mapping = page_mapping(page);
|
||||||
if (((dirty || writeback) && mapping &&
|
if (((dirty || writeback) && mapping &&
|
||||||
bdi_write_congested(mapping->backing_dev_info)) ||
|
bdi_write_congested(inode_to_bdi(mapping->host))) ||
|
||||||
(writeback && PageReclaim(page)))
|
(writeback && PageReclaim(page)))
|
||||||
nr_congested++;
|
nr_congested++;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue