Commit 4a2d057e authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'PAGE_CACHE_SIZE-removal'

Merge PAGE_CACHE_SIZE removal patches from Kirill Shutemov:
 "PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
  ago with promise that one day it will be possible to implement page
  cache with bigger chunks than PAGE_SIZE.

  This promise never materialized.  And unlikely will.

  Let's stop pretending that pages in page cache are special.  They are
  not.

  The first patch with most changes has been done with coccinelle.  The
  second is manual fixups on top.

  The third patch removes macros definition"

[ I was planning to apply this just before rc2, but then I spaced out,
  so here it is right _after_ rc2 instead.

  As Kirill suggested as a possibility, I could have decided to only
  merge the first two patches, and leave the old interfaces for
  compatibility, but I'd rather get it all done and any out-of-tree
  modules and patches can trivially do the converstion while still also
  working with older kernels, so there is little reason to try to
  maintain the redundant legacy model.    - Linus ]

* PAGE_CACHE_SIZE-removal:
  mm: drop PAGE_CACHE_* and page_cache_{get,release} definition
  mm, fs: remove remaining PAGE_CACHE_* and page_cache_{get,release} usage
  mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
parents 9735a227 1fa64f19
......@@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after
which the timestamp reverts to 1970, i.e. moves backwards in time.
Currently, cramfs must be written and read with architectures of the
same endianness, and can be read only by kernels with PAGE_CACHE_SIZE
same endianness, and can be read only by kernels with PAGE_SIZE
== 4096. At least the latter of these is a bug, but it hasn't been
decided what the best fix is. For the moment if you have larger pages
you can just change the #define in mkcramfs.c, so long as you don't
......
......@@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The
default is half of your physical RAM without swap. If you
oversize your tmpfs instances the machine will deadlock
since the OOM handler will not be able to free that memory.
nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE.
nr_blocks: The same as size, but in blocks of PAGE_SIZE.
nr_inodes: The maximum number of inodes for this instance. The default
is half of the number of your physical RAM pages, or (on a
machine with highmem) the number of lowmem RAM pages,
......
......@@ -708,9 +708,9 @@ struct address_space_operations {
from the address space. This generally corresponds to either a
truncation, punch hole or a complete invalidation of the address
space (in the latter case 'offset' will always be 0 and 'length'
will be PAGE_CACHE_SIZE). Any private data associated with the page
will be PAGE_SIZE). Any private data associated with the page
should be updated to reflect this truncation. If offset is 0 and
length is PAGE_CACHE_SIZE, then the private data should be released,
length is PAGE_SIZE, then the private data should be released,
because the page must be able to be completely discarded. This may
be done by calling the ->releasepage function, but in this case the
release MUST succeed.
......
......@@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
unsigned long vaddr = page->index << PAGE_SHIFT;
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);
......
......@@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
page->index << PAGE_SHIFT);
}
static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
......@@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
......
......@@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
if (!mapping)
return;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;
/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
......
......@@ -22,7 +22,7 @@
#include <linux/swap.h>
#include <linux/unistd.h>
#include <linux/nodemask.h> /* for node_online_map */
#include <linux/pagemap.h> /* for release_pages and page_cache_release */
#include <linux/pagemap.h> /* for release_pages */
#include <linux/compat.h>
#include <asm/pgalloc.h>
......
......@@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;
......
......@@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->uid = current_uid();
sbi->gid = current_gid();
sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
if (hypfs_parse_options(data, sb))
......
......@@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any
*/
while (j < page_limit)
page_cache_release(pages[j++]);
put_page(pages[j++]);
}
kfree(pages);
......@@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
page_cache_release(pages[j]);
put_page(pages[j]);
}
out:
kfree(pages);
......@@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);
page_cache_release(bvec->bv_page);
put_page(bvec->bv_page);
}
bio_put(bio);
......@@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio)
* the BIO and the offending pages and re-dirty the pages in process context.
*
* It is expected that bio_check_pages_dirty() will wholly own the BIO from
* here on. It will run one page_cache_release() against each page and will
* run one bio_put() against the BIO.
* here on. It will run one put_page() against each page and will run one
* bio_put() against the BIO.
*/
static void bio_dirty_fn(struct work_struct *work);
......@@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page;
if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page);
put_page(page);
bvec->bv_page = NULL;
} else {
nr_clean_pages++;
......
......@@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id;
q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;
......
......@@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;
if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
......@@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
if (max_size < PAGE_SIZE) {
max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
......@@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
if (mask < PAGE_SIZE - 1) {
mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}
......
......@@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
(PAGE_CACHE_SHIFT - 10);
(PAGE_SHIFT - 10);
return queue_var_show(ra_kb, (page));
}
......@@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);
return ret;
}
......@@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));
return queue_var_show(PAGE_CACHE_SIZE, (page));
return queue_var_show(PAGE_SIZE, (page));
}
static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
......@@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
if (ret < 0)
......
......@@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
......
......@@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
(bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
......@@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
......
......@@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
......@@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp);
......
......@@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
NULL);
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
NULL);
}
unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
......@@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
if (PageError(page))
goto fail;
p->v = page;
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
fail:
page_cache_release(page);
put_page(page);
}
p->v = NULL;
return NULL;
......
......@@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
......
......@@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}
......
......@@ -1327,8 +1327,8 @@ struct bm_extent {
#endif
#endif
/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE,
* so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte.
/* BIO_MAX_SIZE is 256 * PAGE_SIZE,
* so for typical PAGE_SIZE of 4k, that is (1<<20) Byte.
* Since we may live in a mixed-platform cluster,
* we limit us to a platform agnostic constant here for now.
* A followup commit may allow even bigger BIO sizes,
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment