Merge branch 'PAGE_CACHE_SIZE-removal'

Merge PAGE_CACHE_SIZE removal patches from Kirill Shutemov:
 "PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
  ago with promise that one day it will be possible to implement page
  cache with bigger chunks than PAGE_SIZE.

  This promise never materialized.  And unlikely will.

  Let's stop pretending that pages in page cache are special.  They are
  not.

  The first patch with most changes has been done with coccinelle.  The
  second is manual fixups on top.

  The third patch removes macros definition"

[ I was planning to apply this just before rc2, but then I spaced out,
  so here it is right _after_ rc2 instead.

  As Kirill suggested as a possibility, I could have decided to only
  merge the first two patches, and leave the old interfaces for
  compatibility, but I'd rather get it all done and any out-of-tree
  modules and patches can trivially do the converstion while still also
  working with older kernels, so there is little reason to try to
  maintain the redundant legacy model.    - Linus ]

* PAGE_CACHE_SIZE-removal:
  mm: drop PAGE_CACHE_* and page_cache_{get,release} definition
  mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
  mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
This commit is contained in:
Linus Torvalds 2016-04-04 10:50:24 -07:00
commit 4a2d057e4f
398 changed files with 2840 additions and 2869 deletions

View File

@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the
same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't

View File

@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory.
nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,

View File

@ -708,9 +708,9 @@ struct address_space_operations {
from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page
will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released,
length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.

View File

@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
unsigned long vaddr = page->index << PAGE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);

View File

@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
page->index << PAGE_SHIFT);
}
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {

View File

@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
if (!mapping)
return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether

View File

@ -22,7 +22,7 @@
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>

View File

@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;

View File

@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->uid = current_uid();
sbi->gid = current_gid();
sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
if (hypfs_parse_options(data, sb))

View File

@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any
*/
while (j < page_limit)
page_cache_release(pages[j++]);
put_page(pages[j++]);
}
kfree(pages);
@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
page_cache_release(pages[j]);
put_page(pages[j]);
}
out:
kfree(pages);
@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
put_page(bvec->bv_page);
}
bio_put(bio);
@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one page_cache_release() against each page and will
* run one bio_put() against the BIO.
* here on. It will run one put_page() against each page and will run one
* bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page);
put_page(page);
bvec->bv_page = NULL;
} else {
nr_clean_pages++;

View File

@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;

View File

@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
if (max_size < PAGE_SIZE) {
max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
if (mask < PAGE_SIZE - 1) {
mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}

View File

@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
(PAGE_CACHE_SHIFT - 10);
(PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
}
@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page));
return queue_var_show(PAGE_SIZE, (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0)

View File

@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);

View File

@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
(bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);

View File

@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp);

View File

@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
NULL);
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
NULL);
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
if (PageError(page))
goto fail;
p->v = page;
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
fail:
page_cache_release(page);
put_page(page);
}
p->v = NULL;
return NULL;

View File

@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;

View File

@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}

View File

@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
* so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,

View File

@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection;

View File

@ -622,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}
sg_free_table(ttm->sg);

View File

@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
release:
for_each_sg(sgt->sgl, sg, num, i)
page_cache_release(sg_page(sg));
put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
page_cache_release(sg_page(sg));
put_page(sg_page(sg));
}
sg_free_table(sgt);

View File

@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
fail:
while (i--)
page_cache_release(pages[i]);
put_page(pages[i]);
drm_free_large(pages);
return ERR_CAST(p);
@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);
/* Undo the reference we took when populating the table */
page_cache_release(pages[i]);
put_page(pages[i]);
}
drm_free_large(pages);

View File

@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);
page_cache_release(page);
put_page(page);
vaddr += PAGE_SIZE;
}
@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
vaddr += PAGE_SIZE;
}
obj->dirty = 0;
@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}
obj->dirty = 0;
@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
page_cache_release(sg_page_iter_page(&sg_iter));
put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);

View File

@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}
obj->dirty = 0;

View File

@ -615,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}
sg_free_table(ttm->sg);

View File

@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;
copy_highpage(to_page, from_page);
page_cache_release(from_page);
put_page(from_page);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
put_page(to_page);
}
ttm_tt_unpopulate(ttm);

View File

@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
put_page(page);
}
}
case dr_via_pages_alloc:

View File

@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page)
{
ClearPagePrivate(page);
set_page_private(page, 0);
page_cache_release(page);
put_page(page);
}
static void free_buffers(struct page *page)
{

View File

@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma)
if (dma->pages) {
for (i = 0; i < dma->nr_pages; i++)
page_cache_release(dma->pages[i]);
put_page(dma->pages[i]);
kfree(dma->pages);
dma->pages = NULL;
}

View File

@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent)
{
struct inode *root;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = IBMASMFS_MAGIC;
sb->s_op = &ibmasmfs_s_ops;
sb->s_time_gran = 1;

View File

@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages,
if (dirty)
set_page_dirty(pages[i]);
page_cache_release(pages[i]);
put_page(pages[i]);
pages[i] = NULL;
}
}

View File

@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
* They have to set these according to their abilities.
*/
host->max_segs = 1;
host->max_seg_size = PAGE_CACHE_SIZE;
host->max_seg_size = PAGE_SIZE;
host->max_req_size = PAGE_CACHE_SIZE;
host->max_req_size = PAGE_SIZE;
host->max_blk_size = 512;
host->max_blk_count = PAGE_CACHE_SIZE / 512;
host->max_blk_count = PAGE_SIZE / 512;
return host;
}

View File

@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
mmc->caps |= pd->caps;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
mmc->max_seg_size = mmc->max_req_size;

View File

@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;
@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
}
}
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE ||
if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE ||
(align & PAGE_MASK))) || !multiple) {
ret = -EINVAL;
goto pio;

View File

@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host,
mmc->caps2 |= pdata->capabilities2;
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) *
mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) *
mmc->max_segs;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;

View File

@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev)
/* Set .max_segs to some random number. Feel free to adjust. */
mmc->max_segs = 32;
mmc->max_blk_size = 512;
mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
mmc->max_req_size = PAGE_SIZE * mmc->max_segs;
mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
/*
* Setting .max_seg_size to 1 page would simplify our page-mapping code,

View File

@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
break;
}
page_cache_release(page);
put_page(page);
pages--;
index++;
}
@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
return PTR_ERR(page);
memcpy(buf, page_address(page) + offset, cpylen);
page_cache_release(page);
put_page(page);
if (retlen)
*retlen += cpylen;
@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
unlock_page(page);
balance_dirty_pages_ratelimited(mapping);
}
page_cache_release(page);
put_page(page);
if (retlen)
*retlen += cpylen;

View File

@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns)
int i;
for (i = 0; i < ns->held_cnt; i++)
page_cache_release(ns->held_pages[i]);
put_page(ns->held_pages[i]);
}
/* Get page cache pages in advance to provide NOFS memory allocation */
@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t
struct page *page;
struct address_space *mapping = file->f_mapping;
start_index = pos >> PAGE_CACHE_SHIFT;
end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT;
start_index = pos >> PAGE_SHIFT;
end_index = (pos + count - 1) >> PAGE_SHIFT;
if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
return -EINVAL;
ns->held_cnt = 0;

View File

@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
{
struct btt *btt = bdev->bd_disk->private_data;
btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, 0);
return 0;
}

View File

@ -151,7 +151,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_disk->private_data;
int rc;
rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector);
if (rw & WRITE)
wmb_pmem();

View File

@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *root_inode;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = OPROFILEFS_MAGIC;
sb->s_op = &s_ops;
sb->s_time_gran = 1;

View File

@ -2891,7 +2891,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
rw_max = q->limits.io_opt =
sdkp->opt_xfer_blocks * sdp->sector_size;
else

View File

@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp,
out_unmap:
if (res > 0) {
for (j=0; j < res; j++)
page_cache_release(pages[j]);
put_page(pages[j]);
res = 0;
}
kfree(pages);
@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp,
/* FIXME: cache flush missing for rw==READ
* FIXME: call the correct reference counting function
*/
page_cache_release(page);
put_page(page);
}
kfree(STbp->mapped_pages);
STbp->mapped_pages = NULL;

View File

@ -88,7 +88,7 @@ do { \
} while (0)
#ifndef LIBCFS_VMALLOC_SIZE
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */
#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */
#endif
#define LIBCFS_ALLOC_PRE(size, mask) \

View File

@ -57,7 +57,7 @@
#include "../libcfs_cpu.h"
#endif
#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1))
#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
#define page_index(p) ((p)->index)
#define memory_pressure_get() (current->flags & PF_MEMALLOC)
@ -67,7 +67,7 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
#else
#define NUM_CACHEPAGES totalram_pages
#endif

View File

@ -514,7 +514,7 @@ typedef struct {
/**
* Starting offset of the fragment within the page. Note that the
* end of the fragment must not pass the end of the page; i.e.,
* kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
* kiov_len + kiov_offset <= PAGE_SIZE.
*/
unsigned int kiov_offset;
} lnet_kiov_t;

View File

@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
for (nob = i = 0; i < niov; i++) {
if ((kiov[i].kiov_offset && i > 0) ||
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1))
(kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1))
return NULL;
pages[i] = kiov[i].kiov_page;

View File

@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize)
max = TCD_MAX_PAGES;
} else {
max = max / num_possible_cpus();
max <<= (20 - PAGE_CACHE_SHIFT);
max <<= (20 - PAGE_SHIFT);
}
rc = cfs_tracefile_init(max);

View File

@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len)
if (tcd->tcd_cur_pages > 0) {
__LASSERT(!list_empty(&tcd->tcd_pages));
tage = cfs_tage_from_list(tcd->tcd_pages.prev);
if (tage->used + len <= PAGE_CACHE_SIZE)
if (tage->used + len <= PAGE_SIZE)
return tage;
}
@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd,
* from here: this will lead to infinite recursion.
*/
if (len > PAGE_CACHE_SIZE) {
if (len > PAGE_SIZE) {
pr_err("cowardly refusing to write %lu bytes in a page\n", len);
return NULL;
}
@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
for (i = 0; i < 2; i++) {
tage = cfs_trace_get_tage(tcd, needed + known_size + 1);
if (!tage) {
if (needed + known_size > PAGE_CACHE_SIZE)
if (needed + known_size > PAGE_SIZE)
mask |= D_ERROR;
cfs_trace_put_tcd(tcd);
@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
string_buf = (char *)page_address(tage->page) +
tage->used + known_size;
max_nob = PAGE_CACHE_SIZE - tage->used - known_size;
max_nob = PAGE_SIZE - tage->used - known_size;
if (max_nob <= 0) {
printk(KERN_EMERG "negative max_nob: %d\n",
max_nob);
@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
__LASSERT(debug_buf == string_buf);
tage->used += needed;
__LASSERT(tage->used <= PAGE_CACHE_SIZE);
__LASSERT(tage->used <= PAGE_SIZE);
console:
if ((mask & libcfs_printk) == 0) {
@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string);
int cfs_trace_allocate_string_buffer(char **str, int nob)
{
if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */
if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */
return -EINVAL;
*str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO);
@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb)
}
mb /= num_possible_cpus();
pages = mb << (20 - PAGE_CACHE_SHIFT);
pages = mb << (20 - PAGE_SHIFT);
cfs_tracefile_write_lock();
@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void)
cfs_tracefile_read_unlock();
return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1;
return (total_pages >> (20 - PAGE_SHIFT)) + 1;
}
static int tracefiled(void *arg)

View File

@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void);
extern int libcfs_panic_in_progress;
int cfs_trace_max_debug_mb(void);
#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void);
/*
* Private declare for tracefile
*/
#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT))
#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT))
#define TCD_STOCK_PAGES (TCD_MAX_PAGES)
#define CFS_TRACEFILE_SIZE (500 << 20)
@ -257,7 +257,7 @@ do { \
do { \
__LASSERT(tage); \
__LASSERT(tage->page); \
__LASSERT(tage->used <= PAGE_CACHE_SIZE); \
__LASSERT(tage->used <= PAGE_SIZE); \
__LASSERT(page_count(tage->page) > 0); \
} while (0)

View File

@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink)
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
if (lmd->md_iov.kiov[i].kiov_offset +
lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE)
lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
total_length += lmd->md_iov.kiov[i].kiov_len;

View File

@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
if (len <= frag_len) {
dst->kiov_len = len;
LASSERT(dst->kiov_offset + dst->kiov_len
<= PAGE_CACHE_SIZE);
<= PAGE_SIZE);
return niov;
}
dst->kiov_len = frag_len;
LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE);
LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
len -= frag_len;
dst++;
@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg)
rbp = &the_lnet.ln_rtrpools[cpt][0];
LASSERT(msg->msg_len <= LNET_MTU);
while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) {
while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
rbp++;
LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
}

View File

@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp)
nalloc = 16; /* first guess at max interfaces */
toobig = 0;
for (;;) {
if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) {
if (nalloc * sizeof(*ifr) > PAGE_SIZE) {
toobig = 1;
nalloc = PAGE_CACHE_SIZE / sizeof(*ifr);
nalloc = PAGE_SIZE / sizeof(*ifr);
CWARN("Too many interfaces: only enumerating first %d\n",
nalloc);
}

View File

@ -27,8 +27,8 @@
#define LNET_NRB_SMALL_PAGES 1
#define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */
#define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4)
#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \
PAGE_CACHE_SHIFT)
#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \
PAGE_SHIFT)
static char *forwarding = "";
module_param(forwarding, charp, 0444);
@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt)
return NULL;
}
rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE;
rb->rb_kiov[i].kiov_len = PAGE_SIZE;
rb->rb_kiov[i].kiov_offset = 0;
rb->rb_kiov[i].kiov_page = page;
}

View File

@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi)
* NB: this is not going to work for variable page size,
* but we have to keep it for compatibility
*/
len = npg * PAGE_CACHE_SIZE;
len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi)
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
if (npg > LNET_MAX_IOV || npg <= 0)
@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic)
if (pattern == LST_BRW_CHECK_SIMPLE) {
memcpy(addr, &magic, BRW_MSIZE);
addr += PAGE_CACHE_SIZE - BRW_MSIZE;
addr += PAGE_SIZE - BRW_MSIZE;
memcpy(addr, &magic, BRW_MSIZE);
return;
}
if (pattern == LST_BRW_CHECK_FULL) {
for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++)
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++)
memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE);
return;
}
@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
if (data != magic)
goto bad_data;
addr += PAGE_CACHE_SIZE - BRW_MSIZE;
addr += PAGE_SIZE - BRW_MSIZE;
data = *((__u64 *)addr);
if (data != magic)
goto bad_data;
@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic)
}
if (pattern == LST_BRW_CHECK_FULL) {
for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) {
for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) {
data = *(((__u64 *)addr) + i);
if (data != magic)
goto bad_data;
@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
len = npg * PAGE_CACHE_SIZE;
len = npg * PAGE_SIZE;
} else {
test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
opc = breq->blk_opc;
flags = breq->blk_flags;
len = breq->blk_len;
npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc);
@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc)
reply->brw_status = EINVAL;
return 0;
}
npg = reqst->brw_len >> PAGE_CACHE_SHIFT;
npg = reqst->brw_len >> PAGE_SHIFT;
} else {
npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
}
replymsg->msg_ses_feats = reqstmsg->msg_ses_feats;

View File

@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
PAGE_CACHE_SIZE - sizeof(lstcon_test_t)))
PAGE_SIZE - sizeof(lstcon_test_t)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr)
opc = data->ioc_u32[0];
if (data->ioc_plen1 > PAGE_CACHE_SIZE)
if (data->ioc_plen1 > PAGE_SIZE)
return -EINVAL;
LIBCFS_ALLOC(buf, data->ioc_plen1);

View File

@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE;
brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
PAGE_SIZE;
brq->blk_flags = param->blk_flags;
return 0;
@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
if (transop == LST_TRANS_TSBCLIADD) {
npg = sfw_id_pages(test->tes_span);
nob = !(feats & LST_FEAT_BULK_LEN) ?
npg * PAGE_CACHE_SIZE :
npg * PAGE_SIZE :
sizeof(lnet_process_id_packed_t) * test->tes_span;
}
@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
LASSERT(nob > 0);
len = !(feats & LST_FEAT_BULK_LEN) ?
PAGE_CACHE_SIZE :
min_t(int, nob, PAGE_CACHE_SIZE);
PAGE_SIZE :
min_t(int, nob, PAGE_SIZE);
nob -= len;
bulk->bk_iovs[i].kiov_offset = 0;

View File

@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc)
int len;
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
len = npg * PAGE_CACHE_SIZE;
len = npg * PAGE_SIZE;
} else {
len = sizeof(lnet_process_id_packed_t) *

View File

@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
static int
srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
{
nob = min_t(int, nob, PAGE_CACHE_SIZE);
nob = min_t(int, nob, PAGE_SIZE);
LASSERT(nob > 0);
LASSERT(i >= 0 && i < bk->bk_niov);

View File

@ -390,10 +390,10 @@ typedef struct sfw_test_instance {
} tsi_u;
} sfw_test_instance_t;
/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at
* the end of pages are not used */
/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* pages are not used */
#define SFW_MAX_CONCUR LST_MAX_CONCUR
#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t))
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)

View File

@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return;
if (PagePrivate(page))
page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
cancel_dirty_page(page);
ClearPageMappedToDisk(page);

View File

@ -1118,7 +1118,7 @@ struct lu_context_key {
{ \
type *value; \
\
CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \
CLASSERT(PAGE_SIZE >= sizeof (*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \

View File

@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent)
* MDS_READPAGE page size
*
* This is the directory page size packed in MDS_READPAGE RPC.
* It's different than PAGE_CACHE_SIZE because the client needs to
* It's different than PAGE_SIZE because the client needs to
* access the struct lu_dirpage header packed at the beginning of
* the "page" and without this there isn't any way to know find the
* lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
* lu_dirpage header is if client and server PAGE_SIZE differ.
*/
#define LU_PAGE_SHIFT 12
#define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
#define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
/** @} lu_dir */

View File

@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
if (cli->cl_max_mds_easize < body->max_mdsize) {
cli->cl_max_mds_easize = body->max_mdsize;
cli->cl_default_mds_easize =
min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE);
min_t(__u32, body->max_mdsize, PAGE_SIZE);
}
if (cli->cl_max_mds_cookiesize < body->max_cookiesize) {
cli->cl_max_mds_cookiesize = body->max_cookiesize;
cli->cl_default_mds_cookiesize =
min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE);
min_t(__u32, body->max_cookiesize, PAGE_SIZE);
}
}
}

View File

@ -99,21 +99,21 @@
*/
#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
# endif
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE"
# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
# endif
# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
# error "PTLRPC_MAX_BRW_SIZE too big"

View File

@ -272,7 +272,7 @@ struct client_obd {
int cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
* the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size)
* the extent size. A chunk is max(PAGE_SIZE, OST block size)
*/
int cl_chunkbits;
int cl_chunk;
@ -1318,7 +1318,7 @@ bad_format:
static inline int cli_brw_size(struct obd_device *obd)
{
return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
#endif /* __OBD_H */

View File

@ -500,7 +500,7 @@ extern char obd_jobid_var[];
#ifdef POISON_BULK
#define POISON_PAGE(page, val) do { \
memset(kmap(page), val, PAGE_CACHE_SIZE); \
memset(kmap(page), val, PAGE_SIZE); \
kunmap(page); \
} while (0)
#else

View File

@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
* --bug 17336
*/
loff_t size = cl_isize_read(inode);
loff_t cur_index = start >> PAGE_CACHE_SHIFT;
loff_t cur_index = start >> PAGE_SHIFT;
loff_t size_index = (size - 1) >>
PAGE_CACHE_SHIFT;
PAGE_SHIFT;
if ((size == 0 && cur_index != 0) ||
size_index < cur_index)

View File

@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8)
cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
* In the future this should likely be increased. LU-1431
*/
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
LNET_MTU >> PAGE_CACHE_SHIFT);
LNET_MTU >> PAGE_SHIFT);
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
} else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT;

View File

@ -107,7 +107,7 @@
/*
* 50 ldlm locks for 1MB of RAM.
*/
#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50)
#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50)
/*
* Maximal possible grant step plan in %.

View File

@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off)
{
int avail;
avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size;
avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
if (likely(avail >= 0))
avail /= (int)sizeof(struct lustre_handle);
else

View File

@ -134,9 +134,8 @@
* a header lu_dirpage which describes the start/end hash, and whether this
* page is empty (contains no dir entry) or hash collide with next page.
* After client receives reply, several pages will be integrated into dir page
* in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the
* lu_dirpage for this integrated page will be adjusted. See
* lmv_adjust_dirpages().
* in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage
* for this integrated page will be adjusted. See lmv_adjust_dirpages().
*
*/
@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
struct page **page_pool;
struct page *page;
struct lu_dirpage *dp;
int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT;
int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT;
int nrdpgs = 0; /* number of pages read actually */
int npages;
int i;
@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0)
if (body->valid & OBD_MD_FLSIZE)
cl_isize_write(inode, body->size);
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1)
>> PAGE_CACHE_SHIFT;
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
>> PAGE_SHIFT;
SetPageUptodate(page0);
}
unlock_page(page0);
@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page = page_pool[i];
if (rc < 0 || i >= nrdpgs) {
page_cache_release(page);
put_page(page);
continue;
}
@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n",
offset, ret);
}
page_cache_release(page);
put_page(page);
}
if (page_pool != &page0)
@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove)
truncate_complete_page(page->mapping, page);
unlock_page(page);
}
page_cache_release(page);
put_page(page);
}
/*
@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
if (found > 0 && !radix_tree_exceptional_entry(page)) {
struct lu_dirpage *dp;
page_cache_get(page);
get_page(page);
spin_unlock_irq(&mapping->tree_lock);
/*
* In contrast to find_lock_page() we are sure that directory
@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash,
page = NULL;
}
} else {
page_cache_release(page);
put_page(page);
page = ERR_PTR(-EIO);
}
@ -1507,7 +1506,7 @@ skip_lmm:
st.st_gid = body->gid;
st.st_rdev = body->rdev;
st.st_size = body->size;
st.st_blksize = PAGE_CACHE_SIZE;
st.st_blksize = PAGE_SIZE;
st.st_blocks = body->blocks;
st.st_atime = body->atime;
st.st_mtime = body->mtime;

View File

@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode)
/* default to about 40meg of readahead on a given system. That much tied
* up in 512k readahead requests serviced at 40ms each is about 1GB/s.
*/
#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT))
#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT))
/* default to read-ahead full files smaller than 2MB on the second read */
#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT))
#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT))
enum ra_stat {
RA_STAT_HIT = 0,
@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
static inline void ll_invalidate_page(struct page *vmpage)
{
struct address_space *mapping = vmpage->mapping;
loff_t offset = vmpage->index << PAGE_CACHE_SHIFT;
loff_t offset = vmpage->index << PAGE_SHIFT;
LASSERT(PageLocked(vmpage));
if (!mapping)
return;
ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE);
ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}

View File

@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
if (pages >> (20 - PAGE_CACHE_SHIFT) < 512)
if (pages >> (20 - PAGE_SHIFT) < 512)
lru_page_max = pages / 2;
else
lru_page_max = (pages / 4) * 3;
@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
valid != CLIENT_CONNECT_MDT_REQD) {
char *buf;
buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto out_md_fid;
}
obd_connect_flags2str(buf, PAGE_CACHE_SIZE,
obd_connect_flags2str(buf, PAGE_SIZE,
valid ^ CLIENT_CONNECT_MDT_REQD, ",");
LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n",
sbi->ll_md_exp->exp_obd->obd_name, buf);
@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
sbi->ll_md_brw_size = data->ocd_brw_size;
else
sbi->ll_md_brw_size = PAGE_CACHE_SIZE;
sbi->ll_md_brw_size = PAGE_SIZE;
if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
LCONSOLE_INFO("Layout lock feature supported.\n");

View File

@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy,
size_t count)
{
policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
(vma->vm_pgoff << PAGE_CACHE_SHIFT);
(vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
~CFS_PAGE_MASK;
}
@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) {
page_cache_release(vmpage);
put_page(vmpage);
vmf->page = NULL;
}
}
@ -360,7 +360,7 @@ restart:
lock_page(vmpage);
if (unlikely(!vmpage->mapping)) { /* unlucky */
unlock_page(vmpage);
page_cache_release(vmpage);
put_page(vmpage);
vmf->page = NULL;
if (!printed && ++count > 16) {
@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
LASSERTF(last > first, "last %llu first %llu\n", last, first);
if (mapping_mapped(mapping)) {
rc = 0;
unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
unmap_mapping_range(mapping, first + PAGE_SIZE - 1,
last - first + 1, 0);
}

View File

@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
bio_for_each_segment(bvec, bio, iter) {
BUG_ON(bvec.bv_offset != 0);
BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
BUG_ON(bvec.bv_len != PAGE_SIZE);
pages[page_count] = bvec.bv_page;
offsets[page_count] = offset;
@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
(rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ,
page_count);
pvec->ldp_size = page_count << PAGE_CACHE_SHIFT;
pvec->ldp_size = page_count << PAGE_SHIFT;
pvec->ldp_nr = page_count;
/* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to
@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->lo_blocksize = PAGE_CACHE_SIZE;
lo->lo_blocksize = PAGE_SIZE;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused,
lo->lo_queue->queuedata = lo;
/* queue parameters */
CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8)));
CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8)));
blk_queue_logical_block_size(lo->lo_queue,
(unsigned short)PAGE_CACHE_SIZE);
(unsigned short)PAGE_SIZE);
blk_queue_max_hw_sectors(lo->lo_queue,
LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9));
LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9));
blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS);
set_capacity(disks[lo->lo_number], size);

View File

@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages;
spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - PAGE_CACHE_SHIFT);
mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
if (rc)
return rc;
pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - PAGE_CACHE_SHIFT);
mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
spin_unlock(&sbi->ll_lock);
mult = 1 << (20 - PAGE_CACHE_SHIFT);
mult = 1 << (20 - PAGE_SHIFT);
return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
}
@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
*/
if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT));
sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
return -ERANGE;
}
@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
int shift = 20 - PAGE_CACHE_SHIFT;
int shift = 20 - PAGE_SHIFT;
int max_cached_mb;
int unused_mb;
@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
totalram_pages >> (20 - PAGE_CACHE_SHIFT));
totalram_pages >> (20 - PAGE_SHIFT));
return -ERANGE;
}

View File

@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
*/
io->ci_lockreq = CILR_NEVER;
pos = vmpage->index << PAGE_CACHE_SHIFT;
pos = vmpage->index << PAGE_SHIFT;
/* Create a temp IO to serve write. */
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE);
result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
if (result == 0) {
cio->cui_fd = LUSTRE_FPRIVATE(file);
cio->cui_iter = NULL;
@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
}
if (rc != 1)
unlock_page(vmpage);
page_cache_release(vmpage);
put_page(vmpage);
} else {
which = RA_STAT_FAILED_GRAB_PAGE;
msg = "g_c_p_n failed";
@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
* striped over, rather than having a constant value for all files here.
*/
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)).
/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
* Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
* by default, this should be adjusted corresponding with max_read_ahead_mb
* and max_read_ahead_per_file_mb otherwise the readahead budget can be used
* up quickly which will affect read performance significantly. See LU-2816
*/
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT)
#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
static inline int stride_io_mode(struct ll_readahead_state *ras)
{
@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
end = rpc_boundary;
/* Truncate RA window to end of file */
end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT));
end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT));
ras->ras_next_readahead = max(end, end + 1);
RAS_CDEBUG(ras);
@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT))
if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
ll_ra_stats_inc(mapping, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if (ras->ras_requests == 2 && !ras->ras_request_index) {
__u64 kms_pages;
kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >>
PAGE_SHIFT;
CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages,
ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file);
@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
* PageWriteback or clean the page.
*/
result = cl_sync_file_range(inode, offset,
offset + PAGE_CACHE_SIZE - 1,
offset + PAGE_SIZE - 1,
CL_FSYNC_LOCAL, 1);
if (result > 0) {
/* actually we may have written more than one page.
@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
int ignore_layout = 0;
if (wbc->range_cyclic) {
start = mapping->writeback_index << PAGE_CACHE_SHIFT;
start = mapping->writeback_index << PAGE_SHIFT;
end = OBD_OBJECT_EOF;
} else {
start = wbc->range_start;
@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
end = i_size_read(inode);
mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}

View File

@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
* below because they are run with page locked and all our io is
* happening with locked page too
*/
if (offset == 0 && length == PAGE_CACHE_SIZE) {
if (offset == 0 && length == PAGE_SIZE) {
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
inode = vmpage->mapping->host;
@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
return -EFBIG;
}
*max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
*max_pages -= user_addr >> PAGE_CACHE_SHIFT;
*max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
*max_pages -= user_addr >> PAGE_SHIFT;
*pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS);
if (*pages) {
@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty)
for (i = 0; i < npages; i++) {
if (do_dirty)
set_page_dirty_lock(pages[i]);
page_cache_release(pages[i]);
put_page(pages[i]);
}
kvfree(pages);
}
@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io,
* up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc.
*/
#define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \
PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1))
static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
loff_t file_offset)
{
@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
CDEBUG(D_VFSTRACE,
"VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
file_offset, file_offset, count >> PAGE_CACHE_SHIFT,
MAX_DIO_SIZE >> PAGE_CACHE_SHIFT);
file_offset, file_offset, count >> PAGE_SHIFT,
MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter,
* page worth of page pointers = 4MB on i386.
*/
if (result == -ENOMEM &&
size > (PAGE_CACHE_SIZE / sizeof(*pages)) *
PAGE_CACHE_SIZE) {
size > (PAGE_SIZE / sizeof(*pages)) *
PAGE_SIZE) {
size = ((((size / 2) - 1) |
~CFS_PAGE_MASK) + 1) &
CFS_PAGE_MASK;
@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int rc;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned from = pos & (PAGE_SIZE - 1);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
rc = ll_prepare_write(file, page, from, from + len);
if (rc) {
unlock_page(page);
page_cache_release(page);
put_page(page);
}
return rc;
}
@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
unsigned from = pos & (PAGE_SIZE - 1);
int rc;
rc = ll_commit_write(file, page, from, from + copied);
unlock_page(page);
page_cache_release(page);
put_page(page);
return rc ?: copied;
}

View File

@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env,
vio->cui_ra_window_set = 1;
bead->lrr_start = cl_index(obj, pos);
/*
* XXX: explicit PAGE_CACHE_SIZE
* XXX: explicit PAGE_SIZE
*/
bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1);
bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
ll_ra_read_in(file, bead);
}
@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
* We're completely overwriting an existing page, so _don't_
* set it up to date until commit_write
*/
if (from == 0 && to == PAGE_CACHE_SIZE) {
if (from == 0 && to == PAGE_SIZE) {
CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
POISON_PAGE(page, 0x11);
} else
@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
set_page_dirty(vmpage);
vvp_write_pending(cl2ccc(obj), cp);
} else if (result == -EDQUOT) {
pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
bool need_clip = true;
/*
@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
* being.
*/
if (last_index > pg->cp_index) {
to = PAGE_CACHE_SIZE;
to = PAGE_SIZE;
need_clip = false;
} else if (last_index == pg->cp_index) {
int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;

View File

@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp)
struct page *vmpage = cp->cpg_page;
LASSERT(vmpage);
page_cache_release(vmpage);
put_page(vmpage);
}
static void vvp_page_fini(const struct lu_env *env,
@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env,
LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
offset = vmpage->index << PAGE_CACHE_SHIFT;
offset = vmpage->index << PAGE_SHIFT;
/*
* XXX is it safe to call this with the page lock held?
*/
ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE);
ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
return 0;
}
@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
cpg->cpg_page = vmpage;
page_cache_get(vmpage);
get_page(vmpage);
INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {

View File

@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* |s|e|f|p|ent| 0 | ... | 0 |
* '----------------- -----'
*
* However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is
* However, on hosts where the native VM page size (PAGE_SIZE) is
* larger than LU_PAGE_SIZE, a single host page may contain multiple
* lu_dirpages. After reading the lu_dirpages from the MDS, the
* ldp_hash_end of the first lu_dirpage refers to the one immediately
@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
* - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
* to the first entry of the next lu_dirpage.
*/
#if PAGE_CACHE_SIZE > LU_PAGE_SIZE
#if PAGE_SIZE > LU_PAGE_SIZE
static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
{
int i;
@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */
#endif /* PAGE_SIZE > LU_PAGE_SIZE */
static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct page **pages, struct ptlrpc_request **request)
@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
__u64 offset = op_data->op_offset;
int rc;
int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int ncfspgs; /* pages read in PAGE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
if (rc != 0)
return rc;
ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
>> PAGE_CACHE_SHIFT;
ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1)
>> PAGE_SHIFT;
nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages);

View File

@ -1002,10 +1002,10 @@ restart_bulk:
/* NB req now owns desc and will free it when it gets freed */
for (i = 0; i < op_data->op_npages; i++)
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
mdc_readdir_pack(req, op_data->op_offset,
PAGE_CACHE_SIZE * op_data->op_npages,
PAGE_SIZE * op_data->op_npages,
&op_data->op_fid1);
ptlrpc_request_set_replen(req);
@ -1037,7 +1037,7 @@ restart_bulk:
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
CERROR("Unexpected # bytes transferred: %d (%ld expected)\n",
req->rq_bulk->bd_nob_transferred,
PAGE_CACHE_SIZE * op_data->op_npages);
PAGE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
return -EPROTO;
}

View File

@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd,
}
enum {
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT),
CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT),
CONFIG_READ_NRPAGES = 4
};
@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
LASSERT(cfg->cfg_instance);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
inst = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!inst)
return -ENOMEM;
pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
if (pos >= PAGE_CACHE_SIZE) {
pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance);
if (pos >= PAGE_SIZE) {
kfree(inst);
return -E2BIG;
}
++pos;
buf = inst + pos;
bufsz = PAGE_CACHE_SIZE - pos;
bufsz = PAGE_SIZE - pos;
while (datalen > 0) {
int entry_len = sizeof(*entry);
@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
/* Keep this swab for normal mixed endian handling. LU-1644 */
if (mne_swab)
lustre_swab_mgs_nidtbl_entry(entry);
if (entry->mne_length > PAGE_CACHE_SIZE) {
if (entry->mne_length > PAGE_SIZE) {
CERROR("MNE too large (%u)\n", entry->mne_length);
break;
}
@ -1371,7 +1371,7 @@ again:
}
body->mcb_offset = cfg->cfg_last_idx + 1;
body->mcb_type = cld->cld_type;
body->mcb_bits = PAGE_CACHE_SHIFT;
body->mcb_bits = PAGE_SHIFT;
body->mcb_units = nrpages;
/* allocate bulk transfer descriptor */
@ -1383,7 +1383,7 @@ again:
}
for (i = 0; i < nrpages; i++)
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE);
ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE);
ptlrpc_request_set_replen(req);
rc = ptlrpc_queue_wait(req);
@ -1411,7 +1411,7 @@ again:
goto out;
}
if (ealen > nrpages << PAGE_CACHE_SHIFT) {
if (ealen > nrpages << PAGE_SHIFT) {
rc = -EINVAL;
goto out;
}
@ -1439,7 +1439,7 @@ again:
ptr = kmap(pages[i]);
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
min_t(int, ealen, PAGE_CACHE_SIZE),
min_t(int, ealen, PAGE_SIZE),
mne_swab);
kunmap(pages[i]);
if (rc2 < 0) {
@ -1448,7 +1448,7 @@ again:
break;
}
ealen -= PAGE_CACHE_SIZE;
ealen -= PAGE_SIZE;
}
out:

View File

@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx)
/*
* XXX for now.
*/
return (loff_t)idx << PAGE_CACHE_SHIFT;
return (loff_t)idx << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_offset);
@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset)
/*
* XXX for now.
*/
return offset >> PAGE_CACHE_SHIFT;
return offset >> PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_index);
int cl_page_size(const struct cl_object *obj)
{
return 1 << PAGE_CACHE_SHIFT;
return 1 << PAGE_SHIFT;
}
EXPORT_SYMBOL(cl_page_size);

View File

@ -461,9 +461,9 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) {
if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
(__u64)PAGE_CACHE_SIZE);
(__u64)PAGE_SIZE);
ret = -EINVAL;
}
@ -509,7 +509,7 @@ static int __init obdclass_init(void)
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL).
*/
if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
if (totalram_pages <= 512 << (20 - PAGE_SHIFT))
obd_max_dirty_pages = totalram_pages / 4;
else
obd_max_dirty_pages = totalram_pages / 2;

View File

@ -47,7 +47,6 @@
#include "../../include/lustre/lustre_idl.h"
#include <linux/fs.h>
#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */
void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
{
@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid)
if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits))
dst->i_blkbits = ffs(src->o_blksize) - 1;
if (dst->i_blkbits < PAGE_CACHE_SHIFT)
dst->i_blkbits = PAGE_CACHE_SHIFT;
if (dst->i_blkbits < PAGE_SHIFT)
dst->i_blkbits = PAGE_SHIFT;
/* allocation of space */
if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks)

View File

@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr,
char *buf)
{
return sprintf(buf, "%ul\n",
obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT)));
obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT)));
}
static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr,
if (rc)
return rc;
val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */
val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */
if (val > ((totalram_pages / 10) * 9)) {
/* Somebody wants to assign too much memory to dirty pages */
return -EINVAL;
}
if (val < 4 << (20 - PAGE_CACHE_SHIFT)) {
if (val < 4 << (20 - PAGE_SHIFT)) {
/* Less than 4 Mb for dirty cache is also bad */
return -EINVAL;
}

View File

@ -840,8 +840,8 @@ static int lu_htable_order(void)
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
if (cache_size > 1 << (30 - PAGE_SHIFT))
cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4;
#endif
/* clear off unreasonable cache setting. */
@ -853,7 +853,7 @@ static int lu_htable_order(void)
lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
}
cache_size = cache_size / 100 * lu_cache_percent *
(PAGE_CACHE_SIZE / 1024);
(PAGE_SIZE / 1024);
for (bits = 1; (1 << bits) < cache_size; ++bits) {
;

View File

@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env,
struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
put_page(vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
struct echo_object *eco = cl2echo_obj(obj);
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
get_page(vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
atomic_inc(&eco->eo_npages);
@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
LASSERT(rc == 0);
rc = cl_echo_enqueue0(env, eco, offset,
offset + npages * PAGE_CACHE_SIZE - 1,
offset + npages * PAGE_SIZE - 1,
rw == READ ? LCK_PR : LCK_PW, &lh.cookie,
CEF_NEVER);
if (rc < 0)
@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id,
int delta;
/* no partial pages on the client */
LASSERT(count == PAGE_CACHE_SIZE);
LASSERT(count == PAGE_SIZE);
addr = kmap(page);
for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off = offset + delta;
stripe_id = id;
@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id,
int rc2;
/* no partial pages on the client */
LASSERT(count == PAGE_CACHE_SIZE);
LASSERT(count == PAGE_SIZE);
addr = kmap(page);
for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
stripe_id = id;
@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
return -EINVAL;
/* XXX think again with misaligned I/O */
npages = count >> PAGE_CACHE_SHIFT;
npages = count >> PAGE_SHIFT;
if (rw == OBD_BRW_WRITE)
brw_flags = OBD_BRW_ASYNC;
@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
for (i = 0, pgp = pga, off = offset;
i < npages;
i++, pgp++, off += PAGE_CACHE_SIZE) {
i++, pgp++, off += PAGE_SIZE) {
LASSERT(!pgp->pg); /* for cleanup */
@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
goto out;
pages[i] = pgp->pg;
pgp->count = PAGE_CACHE_SIZE;
pgp->count = PAGE_SIZE;
pgp->off = off;
pgp->flag = brw_flags;
@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
return -EINVAL;
npages = batch >> PAGE_CACHE_SHIFT;
tot_pages = count >> PAGE_CACHE_SHIFT;
npages = batch >> PAGE_SHIFT;
tot_pages = count >> PAGE_SHIFT;
lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS);
rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS);
@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
if (tot_pages < npages)
npages = tot_pages;
for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) {
for (i = 0; i < npages; i++, off += PAGE_SIZE) {
rnb[i].offset = off;
rnb[i].len = PAGE_CACHE_SIZE;
rnb[i].len = PAGE_SIZE;
rnb[i].flags = brw_flags;
}
@ -1878,7 +1878,7 @@ static int __init obdecho_init(void)
{
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
return echo_client_init();
}

View File

@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
if (rc)
return rc;
pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number <= 0 ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) ||
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT);
cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v)
{
struct obd_device *dev = m->private;
struct client_obd *cli = &dev->u.cli;
int shift = 20 - PAGE_CACHE_SHIFT;
int shift = 20 - PAGE_SHIFT;
seq_printf(m,
"used_mb: %d\n"
@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -EFAULT;
kernbuf[count] = 0;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
mult = 1 << (20 - PAGE_SHIFT);
buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) -
kernbuf;
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
/* if the max_pages is specified in bytes, convert to pages */
if (val >= ONE_MB_BRW_SIZE)
val >>= PAGE_CACHE_SHIFT;
val >>= PAGE_SHIFT;
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1);
chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
/* max_pages_per_rpc must be chunk aligned */
val = (val + ~chunk_mask) & chunk_mask;
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) {
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
client_obd_list_lock(&cli->cl_loi_list_lock);

View File

@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
return -ERANGE;
LASSERT(cur->oe_osclock == victim->oe_osclock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
chunk_mask = ~((1 << ppc_bits) - 1);
chunksize = 1 << cli->cl_chunkbits;
chunk = index >> ppc_bits;
@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (!sent) {
lost_grant = ext->oe_grants;
} else if (blocksize < PAGE_CACHE_SIZE &&
last_count != PAGE_CACHE_SIZE) {
} else if (blocksize < PAGE_SIZE &&
last_count != PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check.
@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
if (end)
count += blocksize - end;
lost_grant = PAGE_CACHE_SIZE - count;
lost_grant = PAGE_SIZE - count;
}
if (ext->oe_grants > 0)
osc_free_grant(cli, nr_pages, lost_grant);
@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
struct osc_async_page *oap;
struct osc_async_page *tmp;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
}
@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
*/
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
oap->oap_count = PAGE_SIZE - oap->oap_page_off;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
}
}
@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env,
return 0;
else if (cl_offset(obj, page->cp_index + 1) > kms)
/* catch sub-page write at end of file */
return kms % PAGE_CACHE_SIZE;
return kms % PAGE_SIZE;
else
return PAGE_CACHE_SIZE;
return PAGE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli,
assert_spin_locked(&cli->cl_loi_list_lock.lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_CACHE_SIZE;
cli->cl_dirty += PAGE_SIZE;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
PAGE_CACHE_SIZE, pga, pga->pg);
PAGE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli,
pga->flag &= ~OBD_BRW_FROM_GRANT;
atomic_dec(&obd_dirty_pages);
cli->cl_dirty -= PAGE_CACHE_SIZE;
cli->cl_dirty -= PAGE_SIZE;
if (pga->flag & OBD_BRW_NOCACHE) {
pga->flag &= ~OBD_BRW_NOCACHE;
atomic_dec(&obd_dirty_transit_pages);
cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
cli->cl_dirty_transit -= PAGE_SIZE;
}
}
@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli,
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
* 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
* 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
client_obd_list_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli,
if (rc < 0)
return 0;
if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += PAGE_CACHE_SIZE;
cli->cl_dirty_transit += PAGE_SIZE;
atomic_inc(&obd_dirty_transit_pages);
oap->oap_brw_flags |= OBD_BRW_NOCACHE;
}
@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
* of queued writes and create a discontiguous rpc stream
*/
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
cli->cl_dirty_max < PAGE_CACHE_SIZE ||
cli->cl_dirty_max < PAGE_SIZE ||
cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
rc = -EDQUOT;
goto out;
@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
(atomic_read(&obd_dirty_pages) + 1 >
obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",

View File

@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
int result;
opg->ops_from = 0;
opg->ops_to = PAGE_CACHE_SIZE;
opg->ops_to = PAGE_SIZE;
result = osc_prep_async_page(osc, opg, vmpage,
cl_offset(obj, page->cp_index));
@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and..
*/
static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */
static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */
static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady

View File

@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_undirty = 0;
} else {
long max_in_flight = (cli->cl_max_pages_per_rpc <<
PAGE_CACHE_SHIFT)*
PAGE_SHIFT)*
(cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
}
@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
static int osc_shrink_grant(struct client_obd *cli)
{
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
(cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
(cli->cl_max_pages_per_rpc << PAGE_SHIFT);
client_obd_list_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
client_obd_list_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance.
*/
if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client)
* cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
* Keep comment here so that it can be found by searching.
*/
int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
client->cl_avail_grant > brw_size)
@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
}
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
client_obd_list_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
LASSERTF(page_count == 1 ||
(ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) &&
(ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
ergo(i > 0 && i < page_count - 1,
poff == 0 && pg->count == PAGE_CACHE_SIZE) &&
poff == 0 && pg->count == PAGE_SIZE) &&
ergo(i == page_count - 1, poff == 0)),
"i: %d/%d pg: %p off: %llu, count: %u\n",
i, page_count, pg, pg->off, pg->count);
@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
oap->oap_count;
else
LASSERT(oap->oap_page_off + oap->oap_count ==
PAGE_CACHE_SIZE);
PAGE_SIZE);
}
}
@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
tmp->oap_request = ptlrpc_request_addref(req);
client_obd_list_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_CACHE_SHIFT;
starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
@ -2790,12 +2790,12 @@ out:
CFS_PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1)
fm_key->fiemap.fm_start + PAGE_SIZE - 1)
policy.l_extent.end = OBD_OBJECT_EOF;
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK;
PAGE_SIZE - 1) & CFS_PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,

View File

@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
LASSERT(page);
LASSERT(pageoffset >= 0);
LASSERT(len > 0);
LASSERT(pageoffset + len <= PAGE_CACHE_SIZE);
LASSERT(pageoffset + len <= PAGE_SIZE);
desc->bd_nob += len;
if (pin)
page_cache_get(page);
get_page(page);
ptlrpc_add_bulk_page(desc, page, pageoffset, len);
}
@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
if (unpin) {
for (i = 0; i < desc->bd_iov_count; i++)
page_cache_release(desc->bd_iov[i].kiov_page);
put_page(desc->bd_iov[i].kiov_page);
}
kfree(desc);

View File

@ -1092,7 +1092,7 @@ finish:
if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE)
cli->cl_max_pages_per_rpc =
min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT,
min(ocd->ocd_brw_size >> PAGE_SHIFT,
cli->cl_max_pages_per_rpc);
else if (imp->imp_connect_op == MDS_CONNECT ||
imp->imp_connect_op == MGS_CONNECT)

View File

@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file,
* hose a kernel by allowing the request history to grow too
* far.
*/
bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer,
const char prefix[] = "connection=";
const int prefix_len = sizeof(prefix) - 1;
if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len)
if (count > PAGE_SIZE - 1 || count <= prefix_len)
return -EINVAL;
kbuf = kzalloc(count + 1, GFP_NOFS);

View File

@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp)
}
list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) {
LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON,
LASSERTF((long)req > PAGE_SIZE && req != LP_POISON,
"req %p bad\n", req);
LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req);
if (!ptlrpc_no_resend(req))

Some files were not shown because too many files have changed in this diff Show More