Merge branch 'cleanup-bd_claim' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into for-2.6.38/core
This commit is contained in:
commit
f30195c502
|
@ -295,11 +295,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
return -EINVAL;
|
||||
if (get_user(n, (int __user *) arg))
|
||||
return -EFAULT;
|
||||
if (!(mode & FMODE_EXCL) && bd_claim(bdev, &bdev) < 0)
|
||||
if (!(mode & FMODE_EXCL) &&
|
||||
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
|
||||
return -EBUSY;
|
||||
ret = set_blocksize(bdev, n);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
||||
return ret;
|
||||
case BLKPG:
|
||||
ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
|
||||
|
|
|
@ -911,8 +911,6 @@ struct drbd_md {
|
|||
struct drbd_backing_dev {
|
||||
struct block_device *backing_bdev;
|
||||
struct block_device *md_bdev;
|
||||
struct file *lo_file;
|
||||
struct file *md_file;
|
||||
struct drbd_md md;
|
||||
struct disk_conf dc; /* The user provided config... */
|
||||
sector_t known_size; /* last known size of that backing device */
|
||||
|
|
|
@ -3372,11 +3372,8 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
|
|||
if (ldev == NULL)
|
||||
return;
|
||||
|
||||
bd_release(ldev->backing_bdev);
|
||||
bd_release(ldev->md_bdev);
|
||||
|
||||
fput(ldev->lo_file);
|
||||
fput(ldev->md_file);
|
||||
blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
|
||||
kfree(ldev);
|
||||
}
|
||||
|
|
|
@ -855,7 +855,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
sector_t max_possible_sectors;
|
||||
sector_t min_md_device_sectors;
|
||||
struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
|
||||
struct inode *inode, *inode2;
|
||||
struct block_device *bdev;
|
||||
struct lru_cache *resync_lru = NULL;
|
||||
union drbd_state ns, os;
|
||||
unsigned int max_seg_s;
|
||||
|
@ -907,46 +907,40 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
}
|
||||
}
|
||||
|
||||
nbc->lo_file = filp_open(nbc->dc.backing_dev, O_RDWR, 0);
|
||||
if (IS_ERR(nbc->lo_file)) {
|
||||
bdev = blkdev_get_by_path(nbc->dc.backing_dev,
|
||||
FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
|
||||
if (IS_ERR(bdev)) {
|
||||
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
|
||||
PTR_ERR(nbc->lo_file));
|
||||
nbc->lo_file = NULL;
|
||||
PTR_ERR(bdev));
|
||||
retcode = ERR_OPEN_DISK;
|
||||
goto fail;
|
||||
}
|
||||
nbc->backing_bdev = bdev;
|
||||
|
||||
inode = nbc->lo_file->f_dentry->d_inode;
|
||||
|
||||
if (!S_ISBLK(inode->i_mode)) {
|
||||
retcode = ERR_DISK_NOT_BDEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nbc->md_file = filp_open(nbc->dc.meta_dev, O_RDWR, 0);
|
||||
if (IS_ERR(nbc->md_file)) {
|
||||
/*
|
||||
* meta_dev_idx >= 0: external fixed size, possibly multiple
|
||||
* drbd sharing one meta device. TODO in that case, paranoia
|
||||
* check that [md_bdev, meta_dev_idx] is not yet used by some
|
||||
* other drbd minor! (if you use drbd.conf + drbdadm, that
|
||||
* should check it for you already; but if you don't, or
|
||||
* someone fooled it, we need to double check here)
|
||||
*/
|
||||
bdev = blkdev_get_by_path(nbc->dc.meta_dev,
|
||||
FMODE_READ | FMODE_WRITE | FMODE_EXCL,
|
||||
(nbc->dc.meta_dev_idx < 0) ?
|
||||
(void *)mdev : (void *)drbd_m_holder);
|
||||
if (IS_ERR(bdev)) {
|
||||
dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
|
||||
PTR_ERR(nbc->md_file));
|
||||
nbc->md_file = NULL;
|
||||
PTR_ERR(bdev));
|
||||
retcode = ERR_OPEN_MD_DISK;
|
||||
goto fail;
|
||||
}
|
||||
nbc->md_bdev = bdev;
|
||||
|
||||
inode2 = nbc->md_file->f_dentry->d_inode;
|
||||
|
||||
if (!S_ISBLK(inode2->i_mode)) {
|
||||
retcode = ERR_MD_NOT_BDEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nbc->backing_bdev = inode->i_bdev;
|
||||
if (bd_claim(nbc->backing_bdev, mdev)) {
|
||||
printk(KERN_ERR "drbd: bd_claim(%p,%p); failed [%p;%p;%u]\n",
|
||||
nbc->backing_bdev, mdev,
|
||||
nbc->backing_bdev->bd_holder,
|
||||
nbc->backing_bdev->bd_contains->bd_holder,
|
||||
nbc->backing_bdev->bd_holders);
|
||||
retcode = ERR_BDCLAIM_DISK;
|
||||
if ((nbc->backing_bdev == nbc->md_bdev) !=
|
||||
(nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
|
||||
nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
|
||||
retcode = ERR_MD_IDX_INVALID;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
@ -955,28 +949,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
offsetof(struct bm_extent, lce));
|
||||
if (!resync_lru) {
|
||||
retcode = ERR_NOMEM;
|
||||
goto release_bdev_fail;
|
||||
}
|
||||
|
||||
/* meta_dev_idx >= 0: external fixed size,
|
||||
* possibly multiple drbd sharing one meta device.
|
||||
* TODO in that case, paranoia check that [md_bdev, meta_dev_idx] is
|
||||
* not yet used by some other drbd minor!
|
||||
* (if you use drbd.conf + drbdadm,
|
||||
* that should check it for you already; but if you don't, or someone
|
||||
* fooled it, we need to double check here) */
|
||||
nbc->md_bdev = inode2->i_bdev;
|
||||
if (bd_claim(nbc->md_bdev, (nbc->dc.meta_dev_idx < 0) ? (void *)mdev
|
||||
: (void *) drbd_m_holder)) {
|
||||
retcode = ERR_BDCLAIM_MD_DISK;
|
||||
goto release_bdev_fail;
|
||||
}
|
||||
|
||||
if ((nbc->backing_bdev == nbc->md_bdev) !=
|
||||
(nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
|
||||
nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
|
||||
retcode = ERR_MD_IDX_INVALID;
|
||||
goto release_bdev2_fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
|
||||
|
@ -987,7 +960,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
(unsigned long long) drbd_get_max_capacity(nbc),
|
||||
(unsigned long long) nbc->dc.disk_size);
|
||||
retcode = ERR_DISK_TO_SMALL;
|
||||
goto release_bdev2_fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (nbc->dc.meta_dev_idx < 0) {
|
||||
|
@ -1004,7 +977,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
dev_warn(DEV, "refusing attach: md-device too small, "
|
||||
"at least %llu sectors needed for this meta-disk type\n",
|
||||
(unsigned long long) min_md_device_sectors);
|
||||
goto release_bdev2_fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Make sure the new disk is big enough
|
||||
|
@ -1012,7 +985,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
if (drbd_get_max_capacity(nbc) <
|
||||
drbd_get_capacity(mdev->this_bdev)) {
|
||||
retcode = ERR_DISK_TO_SMALL;
|
||||
goto release_bdev2_fail;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
|
||||
|
@ -1035,7 +1008,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
retcode = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
|
||||
drbd_resume_io(mdev);
|
||||
if (retcode < SS_SUCCESS)
|
||||
goto release_bdev2_fail;
|
||||
goto fail;
|
||||
|
||||
if (!get_ldev_if_state(mdev, D_ATTACHING))
|
||||
goto force_diskless;
|
||||
|
@ -1269,18 +1242,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
|
|||
force_diskless:
|
||||
drbd_force_state(mdev, NS(disk, D_FAILED));
|
||||
drbd_md_sync(mdev);
|
||||
release_bdev2_fail:
|
||||
if (nbc)
|
||||
bd_release(nbc->md_bdev);
|
||||
release_bdev_fail:
|
||||
if (nbc)
|
||||
bd_release(nbc->backing_bdev);
|
||||
fail:
|
||||
if (nbc) {
|
||||
if (nbc->lo_file)
|
||||
fput(nbc->lo_file);
|
||||
if (nbc->md_file)
|
||||
fput(nbc->md_file);
|
||||
if (nbc->backing_bdev)
|
||||
blkdev_put(nbc->backing_bdev,
|
||||
FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
if (nbc->md_bdev)
|
||||
blkdev_put(nbc->md_bdev,
|
||||
FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
kfree(nbc);
|
||||
}
|
||||
lc_destroy(resync_lru);
|
||||
|
|
|
@ -2296,15 +2296,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
|
|||
* so bdget() can't fail.
|
||||
*/
|
||||
bdget(pd->bdev->bd_dev);
|
||||
if ((ret = blkdev_get(pd->bdev, FMODE_READ)))
|
||||
if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd)))
|
||||
goto out;
|
||||
|
||||
if ((ret = bd_claim(pd->bdev, pd)))
|
||||
goto out_putdev;
|
||||
|
||||
if ((ret = pkt_get_last_written(pd, &lba))) {
|
||||
printk(DRIVER_NAME": pkt_get_last_written failed\n");
|
||||
goto out_unclaim;
|
||||
goto out_putdev;
|
||||
}
|
||||
|
||||
set_capacity(pd->disk, lba << 2);
|
||||
|
@ -2314,7 +2311,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
|
|||
q = bdev_get_queue(pd->bdev);
|
||||
if (write) {
|
||||
if ((ret = pkt_open_write(pd)))
|
||||
goto out_unclaim;
|
||||
goto out_putdev;
|
||||
/*
|
||||
* Some CDRW drives can not handle writes larger than one packet,
|
||||
* even if the size is a multiple of the packet size.
|
||||
|
@ -2329,23 +2326,21 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
|
|||
}
|
||||
|
||||
if ((ret = pkt_set_segment_merging(pd, q)))
|
||||
goto out_unclaim;
|
||||
goto out_putdev;
|
||||
|
||||
if (write) {
|
||||
if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
|
||||
printk(DRIVER_NAME": not enough memory for buffers\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_unclaim;
|
||||
goto out_putdev;
|
||||
}
|
||||
printk(DRIVER_NAME": %lukB available on disc\n", lba << 1);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_unclaim:
|
||||
bd_release(pd->bdev);
|
||||
out_putdev:
|
||||
blkdev_put(pd->bdev, FMODE_READ);
|
||||
blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -2362,8 +2357,7 @@ static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
|
|||
pkt_lock_door(pd, 0);
|
||||
|
||||
pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
|
||||
bd_release(pd->bdev);
|
||||
blkdev_put(pd->bdev, FMODE_READ);
|
||||
blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
|
||||
|
||||
pkt_shrink_pktlist(pd);
|
||||
}
|
||||
|
@ -2733,7 +2727,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
|
|||
bdev = bdget(dev);
|
||||
if (!bdev)
|
||||
return -ENOMEM;
|
||||
ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY);
|
||||
ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -65,15 +65,12 @@ static int raw_open(struct inode *inode, struct file *filp)
|
|||
if (!bdev)
|
||||
goto out;
|
||||
igrab(bdev->bd_inode);
|
||||
err = blkdev_get(bdev, filp->f_mode);
|
||||
err = blkdev_get(bdev, filp->f_mode | FMODE_EXCL, raw_open);
|
||||
if (err)
|
||||
goto out;
|
||||
err = bd_claim(bdev, raw_open);
|
||||
if (err)
|
||||
goto out1;
|
||||
err = set_blocksize(bdev, bdev_logical_block_size(bdev));
|
||||
if (err)
|
||||
goto out2;
|
||||
goto out1;
|
||||
filp->f_flags |= O_DIRECT;
|
||||
filp->f_mapping = bdev->bd_inode->i_mapping;
|
||||
if (++raw_devices[minor].inuse == 1)
|
||||
|
@ -83,10 +80,8 @@ static int raw_open(struct inode *inode, struct file *filp)
|
|||
mutex_unlock(&raw_mutex);
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
bd_release(bdev);
|
||||
out1:
|
||||
blkdev_put(bdev, filp->f_mode);
|
||||
blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
|
||||
out:
|
||||
mutex_unlock(&raw_mutex);
|
||||
return err;
|
||||
|
@ -110,8 +105,7 @@ static int raw_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
mutex_unlock(&raw_mutex);
|
||||
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, filp->f_mode);
|
||||
blkdev_put(bdev, filp->f_mode | FMODE_EXCL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -325,15 +325,18 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev,
|
|||
|
||||
BUG_ON(d->dm_dev.bdev);
|
||||
|
||||
bdev = open_by_devnum(dev, d->dm_dev.mode);
|
||||
bdev = blkdev_get_by_dev(dev, d->dm_dev.mode | FMODE_EXCL, _claim_ptr);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md));
|
||||
if (r)
|
||||
blkdev_put(bdev, d->dm_dev.mode);
|
||||
else
|
||||
d->dm_dev.bdev = bdev;
|
||||
return r;
|
||||
|
||||
r = bd_link_disk_holder(bdev, dm_disk(md));
|
||||
if (r) {
|
||||
blkdev_put(bdev, d->dm_dev.mode | FMODE_EXCL);
|
||||
return r;
|
||||
}
|
||||
|
||||
d->dm_dev.bdev = bdev;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -344,8 +347,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md)
|
|||
if (!d->dm_dev.bdev)
|
||||
return;
|
||||
|
||||
bd_release_from_disk(d->dm_dev.bdev, dm_disk(md));
|
||||
blkdev_put(d->dm_dev.bdev, d->dm_dev.mode);
|
||||
blkdev_put(d->dm_dev.bdev, d->dm_dev.mode | FMODE_EXCL);
|
||||
d->dm_dev.bdev = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1880,7 +1880,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
|
|||
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
|
||||
|
||||
list_add_rcu(&rdev->same_set, &mddev->disks);
|
||||
bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk);
|
||||
bd_link_disk_holder(rdev->bdev, mddev->gendisk);
|
||||
|
||||
/* May as well allow recovery to be retried once */
|
||||
mddev->recovery_disabled = 0;
|
||||
|
@ -1907,7 +1907,6 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev)
|
|||
MD_BUG();
|
||||
return;
|
||||
}
|
||||
bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk);
|
||||
list_del_rcu(&rdev->same_set);
|
||||
printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b));
|
||||
rdev->mddev = NULL;
|
||||
|
@ -1935,19 +1934,13 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared)
|
|||
struct block_device *bdev;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
|
||||
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
|
||||
shared ? (mdk_rdev_t *)lock_rdev : rdev);
|
||||
if (IS_ERR(bdev)) {
|
||||
printk(KERN_ERR "md: could not open %s.\n",
|
||||
__bdevname(dev, b));
|
||||
return PTR_ERR(bdev);
|
||||
}
|
||||
err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev);
|
||||
if (err) {
|
||||
printk(KERN_ERR "md: could not bd_claim %s.\n",
|
||||
bdevname(bdev, b));
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
return err;
|
||||
}
|
||||
if (!shared)
|
||||
set_bit(AllReserved, &rdev->flags);
|
||||
rdev->bdev = bdev;
|
||||
|
@ -1960,8 +1953,7 @@ static void unlock_rdev(mdk_rdev_t *rdev)
|
|||
rdev->bdev = NULL;
|
||||
if (!bdev)
|
||||
MD_BUG();
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
void md_autodetect_dev(dev_t dev);
|
||||
|
|
|
@ -224,7 +224,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
|
|||
if (dev->blkdev) {
|
||||
invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
|
||||
0, -1);
|
||||
close_bdev_exclusive(dev->blkdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(dev->blkdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
kfree(dev);
|
||||
|
@ -234,6 +234,7 @@ static void block2mtd_free_device(struct block2mtd_dev *dev)
|
|||
/* FIXME: ensure that mtd->size % erase_size == 0 */
|
||||
static struct block2mtd_dev *add_device(char *devname, int erase_size)
|
||||
{
|
||||
const fmode_t mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
|
||||
struct block_device *bdev;
|
||||
struct block2mtd_dev *dev;
|
||||
char *name;
|
||||
|
@ -246,7 +247,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
|
|||
return NULL;
|
||||
|
||||
/* Get a handle on the device */
|
||||
bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, NULL);
|
||||
bdev = blkdev_get_by_path(devname, mode, dev);
|
||||
#ifndef MODULE
|
||||
if (IS_ERR(bdev)) {
|
||||
|
||||
|
@ -254,9 +255,8 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
|
|||
to resolve the device name by other means. */
|
||||
|
||||
dev_t devt = name_to_dev_t(devname);
|
||||
if (devt) {
|
||||
bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
|
||||
}
|
||||
if (devt)
|
||||
bdev = blkdev_get_by_dev(devt, mode, dev);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ int dasd_scan_partitions(struct dasd_block *block)
|
|||
struct block_device *bdev;
|
||||
|
||||
bdev = bdget_disk(block->gdp, 0);
|
||||
if (!bdev || blkdev_get(bdev, FMODE_READ) < 0)
|
||||
if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0)
|
||||
return -ENODEV;
|
||||
/*
|
||||
* See fs/partition/check.c:register_disk,rescan_partitions
|
||||
|
|
|
@ -543,7 +543,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
|
|||
ro = curlun->initially_ro;
|
||||
if (!ro) {
|
||||
filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
|
||||
if (-EROFS == PTR_ERR(filp))
|
||||
if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
|
||||
ro = 1;
|
||||
}
|
||||
if (ro)
|
||||
|
@ -558,10 +558,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
|
|||
|
||||
if (filp->f_path.dentry)
|
||||
inode = filp->f_path.dentry->d_inode;
|
||||
if (inode && S_ISBLK(inode->i_mode)) {
|
||||
if (bdev_read_only(inode->i_bdev))
|
||||
ro = 1;
|
||||
} else if (!inode || !S_ISREG(inode->i_mode)) {
|
||||
if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
|
||||
LINFO(curlun, "invalid file type: %s\n", filename);
|
||||
goto out;
|
||||
}
|
||||
|
|
720
fs/block_dev.c
720
fs/block_dev.c
|
@ -426,9 +426,6 @@ static void init_once(void *foo)
|
|||
mutex_init(&bdev->bd_mutex);
|
||||
INIT_LIST_HEAD(&bdev->bd_inodes);
|
||||
INIT_LIST_HEAD(&bdev->bd_list);
|
||||
#ifdef CONFIG_SYSFS
|
||||
INIT_LIST_HEAD(&bdev->bd_holder_list);
|
||||
#endif
|
||||
inode_init_once(&ei->vfs_inode);
|
||||
/* Initialize mutex for freeze. */
|
||||
mutex_init(&bdev->bd_fsfreeze_mutex);
|
||||
|
@ -663,7 +660,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
|
|||
else if (bdev->bd_contains == bdev)
|
||||
return true; /* is a whole device which isn't held */
|
||||
|
||||
else if (whole->bd_holder == bd_claim)
|
||||
else if (whole->bd_holder == bd_may_claim)
|
||||
return true; /* is a partition of a device that is being partitioned */
|
||||
else if (whole->bd_holder != NULL)
|
||||
return false; /* is a partition of a held device */
|
||||
|
@ -775,440 +772,88 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
|
|||
}
|
||||
}
|
||||
|
||||
/* releases bdev_lock */
|
||||
static void __bd_abort_claiming(struct block_device *whole, void *holder)
|
||||
{
|
||||
BUG_ON(whole->bd_claiming != holder);
|
||||
whole->bd_claiming = NULL;
|
||||
wake_up_bit(&whole->bd_claiming, 0);
|
||||
|
||||
spin_unlock(&bdev_lock);
|
||||
bdput(whole);
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_abort_claiming - abort claiming a block device
|
||||
* @whole: whole block device returned by bd_start_claiming()
|
||||
* @holder: holder trying to claim @bdev
|
||||
*
|
||||
* Abort a claiming block started by bd_start_claiming(). Note that
|
||||
* @whole is not the block device to be claimed but the whole device
|
||||
* returned by bd_start_claiming().
|
||||
*
|
||||
* CONTEXT:
|
||||
* Grabs and releases bdev_lock.
|
||||
*/
|
||||
static void bd_abort_claiming(struct block_device *whole, void *holder)
|
||||
{
|
||||
spin_lock(&bdev_lock);
|
||||
__bd_abort_claiming(whole, holder); /* releases bdev_lock */
|
||||
}
|
||||
|
||||
/* increment holders when we have a legitimate claim. requires bdev_lock */
|
||||
static void __bd_claim(struct block_device *bdev, struct block_device *whole,
|
||||
void *holder)
|
||||
{
|
||||
/* note that for a whole device bd_holders
|
||||
* will be incremented twice, and bd_holder will
|
||||
* be set to bd_claim before being set to holder
|
||||
*/
|
||||
whole->bd_holders++;
|
||||
whole->bd_holder = bd_claim;
|
||||
bdev->bd_holders++;
|
||||
bdev->bd_holder = holder;
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_finish_claiming - finish claiming a block device
|
||||
* @bdev: block device of interest (passed to bd_start_claiming())
|
||||
* @whole: whole block device returned by bd_start_claiming()
|
||||
* @holder: holder trying to claim @bdev
|
||||
*
|
||||
* Finish a claiming block started by bd_start_claiming().
|
||||
*
|
||||
* CONTEXT:
|
||||
* Grabs and releases bdev_lock.
|
||||
*/
|
||||
static void bd_finish_claiming(struct block_device *bdev,
|
||||
struct block_device *whole, void *holder)
|
||||
{
|
||||
spin_lock(&bdev_lock);
|
||||
BUG_ON(!bd_may_claim(bdev, whole, holder));
|
||||
__bd_claim(bdev, whole, holder);
|
||||
__bd_abort_claiming(whole, holder); /* not actually an abort */
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_claim - claim a block device
|
||||
* @bdev: block device to claim
|
||||
* @holder: holder trying to claim @bdev
|
||||
*
|
||||
* Try to claim @bdev which must have been opened successfully.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Might sleep.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 if successful, -EBUSY if @bdev is already claimed.
|
||||
*/
|
||||
int bd_claim(struct block_device *bdev, void *holder)
|
||||
{
|
||||
struct block_device *whole = bdev->bd_contains;
|
||||
int res;
|
||||
|
||||
might_sleep();
|
||||
|
||||
spin_lock(&bdev_lock);
|
||||
res = bd_prepare_to_claim(bdev, whole, holder);
|
||||
if (res == 0)
|
||||
__bd_claim(bdev, whole, holder);
|
||||
spin_unlock(&bdev_lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(bd_claim);
|
||||
|
||||
void bd_release(struct block_device *bdev)
|
||||
{
|
||||
spin_lock(&bdev_lock);
|
||||
if (!--bdev->bd_contains->bd_holders)
|
||||
bdev->bd_contains->bd_holder = NULL;
|
||||
if (!--bdev->bd_holders)
|
||||
bdev->bd_holder = NULL;
|
||||
spin_unlock(&bdev_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(bd_release);
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
/*
|
||||
* Functions for bd_claim_by_kobject / bd_release_from_kobject
|
||||
*
|
||||
* If a kobject is passed to bd_claim_by_kobject()
|
||||
* and the kobject has a parent directory,
|
||||
* following symlinks are created:
|
||||
* o from the kobject to the claimed bdev
|
||||
* o from "holders" directory of the bdev to the parent of the kobject
|
||||
* bd_release_from_kobject() removes these symlinks.
|
||||
*
|
||||
* Example:
|
||||
* If /dev/dm-0 maps to /dev/sda, kobject corresponding to
|
||||
* /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
|
||||
* /sys/block/dm-0/slaves/sda --> /sys/block/sda
|
||||
* /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
|
||||
*/
|
||||
|
||||
static int add_symlink(struct kobject *from, struct kobject *to)
|
||||
{
|
||||
if (!from || !to)
|
||||
return 0;
|
||||
return sysfs_create_link(from, to, kobject_name(to));
|
||||
}
|
||||
|
||||
static void del_symlink(struct kobject *from, struct kobject *to)
|
||||
{
|
||||
if (!from || !to)
|
||||
return;
|
||||
sysfs_remove_link(from, kobject_name(to));
|
||||
}
|
||||
|
||||
/*
|
||||
* 'struct bd_holder' contains pointers to kobjects symlinked by
|
||||
* bd_claim_by_kobject.
|
||||
* It's connected to bd_holder_list which is protected by bdev->bd_sem.
|
||||
*/
|
||||
struct bd_holder {
|
||||
struct list_head list; /* chain of holders of the bdev */
|
||||
int count; /* references from the holder */
|
||||
struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
|
||||
struct kobject *hdev; /* e.g. "/block/dm-0" */
|
||||
struct kobject *hdir; /* e.g. "/block/sda/holders" */
|
||||
struct kobject *sdev; /* e.g. "/block/sda" */
|
||||
};
|
||||
|
||||
/*
|
||||
* Get references of related kobjects at once.
|
||||
* Returns 1 on success. 0 on failure.
|
||||
*
|
||||
* Should call bd_holder_release_dirs() after successful use.
|
||||
*/
|
||||
static int bd_holder_grab_dirs(struct block_device *bdev,
|
||||
struct bd_holder *bo)
|
||||
{
|
||||
if (!bdev || !bo)
|
||||
return 0;
|
||||
|
||||
bo->sdir = kobject_get(bo->sdir);
|
||||
if (!bo->sdir)
|
||||
return 0;
|
||||
|
||||
bo->hdev = kobject_get(bo->sdir->parent);
|
||||
if (!bo->hdev)
|
||||
goto fail_put_sdir;
|
||||
|
||||
bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
|
||||
if (!bo->sdev)
|
||||
goto fail_put_hdev;
|
||||
|
||||
bo->hdir = kobject_get(bdev->bd_part->holder_dir);
|
||||
if (!bo->hdir)
|
||||
goto fail_put_sdev;
|
||||
|
||||
return 1;
|
||||
|
||||
fail_put_sdev:
|
||||
kobject_put(bo->sdev);
|
||||
fail_put_hdev:
|
||||
kobject_put(bo->hdev);
|
||||
fail_put_sdir:
|
||||
kobject_put(bo->sdir);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Put references of related kobjects at once. */
|
||||
static void bd_holder_release_dirs(struct bd_holder *bo)
|
||||
{
|
||||
kobject_put(bo->hdir);
|
||||
kobject_put(bo->sdev);
|
||||
kobject_put(bo->hdev);
|
||||
kobject_put(bo->sdir);
|
||||
}
|
||||
|
||||
static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
|
||||
{
|
||||
struct bd_holder *bo;
|
||||
|
||||
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
||||
if (!bo)
|
||||
return NULL;
|
||||
|
||||
bo->count = 1;
|
||||
bo->sdir = kobj;
|
||||
|
||||
return bo;
|
||||
}
|
||||
|
||||
static void free_bd_holder(struct bd_holder *bo)
|
||||
{
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
/**
|
||||
* find_bd_holder - find matching struct bd_holder from the block device
|
||||
* bd_link_disk_holder - create symlinks between holding disk and slave bdev
|
||||
* @bdev: the claimed slave bdev
|
||||
* @disk: the holding disk
|
||||
*
|
||||
* @bdev: struct block device to be searched
|
||||
* @bo: target struct bd_holder
|
||||
* This functions creates the following sysfs symlinks.
|
||||
*
|
||||
* Returns matching entry with @bo in @bdev->bd_holder_list.
|
||||
* If found, increment the reference count and return the pointer.
|
||||
* If not found, returns NULL.
|
||||
* - from "slaves" directory of the holder @disk to the claimed @bdev
|
||||
* - from "holders" directory of the @bdev to the holder @disk
|
||||
*
|
||||
* For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
|
||||
* passed to bd_link_disk_holder(), then:
|
||||
*
|
||||
* /sys/block/dm-0/slaves/sda --> /sys/block/sda
|
||||
* /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
|
||||
*
|
||||
* The caller must have claimed @bdev before calling this function and
|
||||
* ensure that both @bdev and @disk are valid during the creation and
|
||||
* lifetime of these symlinks.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Might sleep.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -errno on failure.
|
||||
*/
|
||||
static struct bd_holder *find_bd_holder(struct block_device *bdev,
|
||||
struct bd_holder *bo)
|
||||
int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
|
||||
{
|
||||
struct bd_holder *tmp;
|
||||
|
||||
list_for_each_entry(tmp, &bdev->bd_holder_list, list)
|
||||
if (tmp->sdir == bo->sdir) {
|
||||
tmp->count++;
|
||||
return tmp;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* add_bd_holder - create sysfs symlinks for bd_claim() relationship
|
||||
*
|
||||
* @bdev: block device to be bd_claimed
|
||||
* @bo: preallocated and initialized by alloc_bd_holder()
|
||||
*
|
||||
* Add @bo to @bdev->bd_holder_list, create symlinks.
|
||||
*
|
||||
* Returns 0 if symlinks are created.
|
||||
* Returns -ve if something fails.
|
||||
*/
|
||||
static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!bo)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bd_holder_grab_dirs(bdev, bo))
|
||||
return -EBUSY;
|
||||
|
||||
err = add_symlink(bo->sdir, bo->sdev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = add_symlink(bo->hdir, bo->hdev);
|
||||
if (err) {
|
||||
del_symlink(bo->sdir, bo->sdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
list_add_tail(&bo->list, &bdev->bd_holder_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* del_bd_holder - delete sysfs symlinks for bd_claim() relationship
|
||||
*
|
||||
* @bdev: block device to be bd_claimed
|
||||
* @kobj: holder's kobject
|
||||
*
|
||||
* If there is matching entry with @kobj in @bdev->bd_holder_list
|
||||
* and no other bd_claim() from the same kobject,
|
||||
* remove the struct bd_holder from the list, delete symlinks for it.
|
||||
*
|
||||
* Returns a pointer to the struct bd_holder when it's removed from the list
|
||||
* and ready to be freed.
|
||||
* Returns NULL if matching claim isn't found or there is other bd_claim()
|
||||
* by the same kobject.
|
||||
*/
|
||||
static struct bd_holder *del_bd_holder(struct block_device *bdev,
|
||||
struct kobject *kobj)
|
||||
{
|
||||
struct bd_holder *bo;
|
||||
|
||||
list_for_each_entry(bo, &bdev->bd_holder_list, list) {
|
||||
if (bo->sdir == kobj) {
|
||||
bo->count--;
|
||||
BUG_ON(bo->count < 0);
|
||||
if (!bo->count) {
|
||||
list_del(&bo->list);
|
||||
del_symlink(bo->sdir, bo->sdev);
|
||||
del_symlink(bo->hdir, bo->hdev);
|
||||
bd_holder_release_dirs(bo);
|
||||
return bo;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_claim_by_kobject - bd_claim() with additional kobject signature
|
||||
*
|
||||
* @bdev: block device to be claimed
|
||||
* @holder: holder's signature
|
||||
* @kobj: holder's kobject
|
||||
*
|
||||
* Do bd_claim() and if it succeeds, create sysfs symlinks between
|
||||
* the bdev and the holder's kobject.
|
||||
* Use bd_release_from_kobject() when relesing the claimed bdev.
|
||||
*
|
||||
* Returns 0 on success. (same as bd_claim())
|
||||
* Returns errno on failure.
|
||||
*/
|
||||
static int bd_claim_by_kobject(struct block_device *bdev, void *holder,
|
||||
struct kobject *kobj)
|
||||
{
|
||||
int err;
|
||||
struct bd_holder *bo, *found;
|
||||
|
||||
if (!kobj)
|
||||
return -EINVAL;
|
||||
|
||||
bo = alloc_bd_holder(kobj);
|
||||
if (!bo)
|
||||
return -ENOMEM;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
|
||||
err = bd_claim(bdev, holder);
|
||||
if (err)
|
||||
goto fail;
|
||||
WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk);
|
||||
|
||||
found = find_bd_holder(bdev, bo);
|
||||
if (found)
|
||||
goto fail;
|
||||
/* FIXME: remove the following once add_disk() handles errors */
|
||||
if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
|
||||
goto out_unlock;
|
||||
|
||||
err = add_bd_holder(bdev, bo);
|
||||
if (err)
|
||||
bd_release(bdev);
|
||||
else
|
||||
bo = NULL;
|
||||
fail:
|
||||
ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
|
||||
if (ret) {
|
||||
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
bdev->bd_holder_disk = disk;
|
||||
out_unlock:
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
free_bd_holder(bo);
|
||||
return err;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bd_link_disk_holder);
|
||||
|
||||
/**
|
||||
* bd_release_from_kobject - bd_release() with additional kobject signature
|
||||
*
|
||||
* @bdev: block device to be released
|
||||
* @kobj: holder's kobject
|
||||
*
|
||||
* Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
|
||||
*/
|
||||
static void bd_release_from_kobject(struct block_device *bdev,
|
||||
struct kobject *kobj)
|
||||
static void bd_unlink_disk_holder(struct block_device *bdev)
|
||||
{
|
||||
if (!kobj)
|
||||
struct gendisk *disk = bdev->bd_holder_disk;
|
||||
|
||||
bdev->bd_holder_disk = NULL;
|
||||
if (!disk)
|
||||
return;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
bd_release(bdev);
|
||||
free_bd_holder(del_bd_holder(bdev, kobj));
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
|
||||
del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
|
||||
}
|
||||
|
||||
/**
|
||||
* bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
|
||||
*
|
||||
* @bdev: block device to be claimed
|
||||
* @holder: holder's signature
|
||||
* @disk: holder's gendisk
|
||||
*
|
||||
* Call bd_claim_by_kobject() with getting @disk->slave_dir.
|
||||
*/
|
||||
int bd_claim_by_disk(struct block_device *bdev, void *holder,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bd_claim_by_disk);
|
||||
|
||||
/**
|
||||
* bd_release_from_disk - wrapper function for bd_release_from_kobject()
|
||||
*
|
||||
* @bdev: block device to be claimed
|
||||
* @disk: holder's gendisk
|
||||
*
|
||||
* Call bd_release_from_kobject() and put @disk->slave_dir.
|
||||
*/
|
||||
void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
|
||||
{
|
||||
bd_release_from_kobject(bdev, disk->slave_dir);
|
||||
kobject_put(disk->slave_dir);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bd_release_from_disk);
|
||||
#else
|
||||
static inline void bd_unlink_disk_holder(struct block_device *bdev)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Tries to open block device by device number. Use it ONLY if you
|
||||
* really do not have anything better - i.e. when you are behind a
|
||||
* truly sucky interface and all you are given is a device number. _Never_
|
||||
* to be used for internal purposes. If you ever need it - reconsider
|
||||
* your API.
|
||||
*/
|
||||
struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
|
||||
{
|
||||
struct block_device *bdev = bdget(dev);
|
||||
int err = -ENOMEM;
|
||||
if (bdev)
|
||||
err = blkdev_get(bdev, mode);
|
||||
return err ? ERR_PTR(err) : bdev;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(open_by_devnum);
|
||||
|
||||
/**
|
||||
* flush_disk - invalidates all buffer-cache entries on a disk
|
||||
*
|
||||
|
@ -1469,17 +1114,156 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int blkdev_get(struct block_device *bdev, fmode_t mode)
|
||||
/**
|
||||
* blkdev_get - open a block device
|
||||
* @bdev: block_device to open
|
||||
* @mode: FMODE_* mask
|
||||
* @holder: exclusive holder identifier
|
||||
*
|
||||
* Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
|
||||
* open with exclusive access. Specifying %FMODE_EXCL with %NULL
|
||||
* @holder is invalid. Exclusive opens may nest for the same @holder.
|
||||
*
|
||||
* On success, the reference count of @bdev is unchanged. On failure,
|
||||
* @bdev is put.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Might sleep.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success, -errno on failure.
|
||||
*/
|
||||
int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
|
||||
{
|
||||
return __blkdev_get(bdev, mode, 0);
|
||||
struct block_device *whole = NULL;
|
||||
int res;
|
||||
|
||||
WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
|
||||
|
||||
if ((mode & FMODE_EXCL) && holder) {
|
||||
whole = bd_start_claiming(bdev, holder);
|
||||
if (IS_ERR(whole)) {
|
||||
bdput(bdev);
|
||||
return PTR_ERR(whole);
|
||||
}
|
||||
}
|
||||
|
||||
res = __blkdev_get(bdev, mode, 0);
|
||||
|
||||
/* __blkdev_get() may alter read only status, check it afterwards */
|
||||
if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
|
||||
__blkdev_put(bdev, mode, 0);
|
||||
res = -EACCES;
|
||||
}
|
||||
|
||||
if (whole) {
|
||||
/* finish claiming */
|
||||
spin_lock(&bdev_lock);
|
||||
|
||||
if (res == 0) {
|
||||
BUG_ON(!bd_may_claim(bdev, whole, holder));
|
||||
/*
|
||||
* Note that for a whole device bd_holders
|
||||
* will be incremented twice, and bd_holder
|
||||
* will be set to bd_may_claim before being
|
||||
* set to holder
|
||||
*/
|
||||
whole->bd_holders++;
|
||||
whole->bd_holder = bd_may_claim;
|
||||
bdev->bd_holders++;
|
||||
bdev->bd_holder = holder;
|
||||
}
|
||||
|
||||
/* tell others that we're done */
|
||||
BUG_ON(whole->bd_claiming != holder);
|
||||
whole->bd_claiming = NULL;
|
||||
wake_up_bit(&whole->bd_claiming, 0);
|
||||
|
||||
spin_unlock(&bdev_lock);
|
||||
bdput(whole);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_get);
|
||||
|
||||
/**
|
||||
* blkdev_get_by_path - open a block device by name
|
||||
* @path: path to the block device to open
|
||||
* @mode: FMODE_* mask
|
||||
* @holder: exclusive holder identifier
|
||||
*
|
||||
* Open the blockdevice described by the device file at @path. @mode
|
||||
* and @holder are identical to blkdev_get().
|
||||
*
|
||||
* On success, the returned block_device has reference count of one.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Might sleep.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to block_device on success, ERR_PTR(-errno) on failure.
|
||||
*/
|
||||
struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
int err;
|
||||
|
||||
bdev = lookup_bdev(path);
|
||||
if (IS_ERR(bdev))
|
||||
return bdev;
|
||||
|
||||
err = blkdev_get(bdev, mode, holder);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return bdev;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_get_by_path);
|
||||
|
||||
/**
|
||||
* blkdev_get_by_dev - open a block device by device number
|
||||
* @dev: device number of block device to open
|
||||
* @mode: FMODE_* mask
|
||||
* @holder: exclusive holder identifier
|
||||
*
|
||||
* Open the blockdevice described by device number @dev. @mode and
|
||||
* @holder are identical to blkdev_get().
|
||||
*
|
||||
* Use it ONLY if you really do not have anything better - i.e. when
|
||||
* you are behind a truly sucky interface and all you are given is a
|
||||
* device number. _Never_ to be used for internal purposes. If you
|
||||
* ever need it - reconsider your API.
|
||||
*
|
||||
* On success, the returned block_device has reference count of one.
|
||||
*
|
||||
* CONTEXT:
|
||||
* Might sleep.
|
||||
*
|
||||
* RETURNS:
|
||||
* Pointer to block_device on success, ERR_PTR(-errno) on failure.
|
||||
*/
|
||||
struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
int err;
|
||||
|
||||
bdev = bdget(dev);
|
||||
if (!bdev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
err = blkdev_get(bdev, mode, holder);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
return bdev;
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_get_by_dev);
|
||||
|
||||
static int blkdev_open(struct inode * inode, struct file * filp)
|
||||
{
|
||||
struct block_device *whole = NULL;
|
||||
struct block_device *bdev;
|
||||
int res;
|
||||
|
||||
/*
|
||||
* Preserve backwards compatibility and allow large file access
|
||||
|
@ -1500,26 +1284,9 @@ static int blkdev_open(struct inode * inode, struct file * filp)
|
|||
if (bdev == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
if (filp->f_mode & FMODE_EXCL) {
|
||||
whole = bd_start_claiming(bdev, filp);
|
||||
if (IS_ERR(whole)) {
|
||||
bdput(bdev);
|
||||
return PTR_ERR(whole);
|
||||
}
|
||||
}
|
||||
|
||||
filp->f_mapping = bdev->bd_inode->i_mapping;
|
||||
|
||||
res = blkdev_get(bdev, filp->f_mode);
|
||||
|
||||
if (whole) {
|
||||
if (res == 0)
|
||||
bd_finish_claiming(bdev, whole, filp);
|
||||
else
|
||||
bd_abort_claiming(whole, filp);
|
||||
}
|
||||
|
||||
return res;
|
||||
return blkdev_get(bdev, filp->f_mode, filp);
|
||||
}
|
||||
|
||||
static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
||||
|
@ -1533,6 +1300,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
bdev->bd_part_count--;
|
||||
|
||||
if (!--bdev->bd_openers) {
|
||||
WARN_ON_ONCE(bdev->bd_holders);
|
||||
sync_blockdev(bdev);
|
||||
kill_bdev(bdev);
|
||||
}
|
||||
|
@ -1563,6 +1331,34 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
|
|||
|
||||
int blkdev_put(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
if (mode & FMODE_EXCL) {
|
||||
bool bdev_free;
|
||||
|
||||
/*
|
||||
* Release a claim on the device. The holder fields
|
||||
* are protected with bdev_lock. bd_mutex is to
|
||||
* synchronize disk_holder unlinking.
|
||||
*/
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
spin_lock(&bdev_lock);
|
||||
|
||||
WARN_ON_ONCE(--bdev->bd_holders < 0);
|
||||
WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
|
||||
|
||||
/* bd_contains might point to self, check in a separate step */
|
||||
if ((bdev_free = !bdev->bd_holders))
|
||||
bdev->bd_holder = NULL;
|
||||
if (!bdev->bd_contains->bd_holders)
|
||||
bdev->bd_contains->bd_holder = NULL;
|
||||
|
||||
spin_unlock(&bdev_lock);
|
||||
|
||||
/* if this was the last claim, holder link should go too */
|
||||
if (bdev_free)
|
||||
bd_unlink_disk_holder(bdev);
|
||||
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
}
|
||||
return __blkdev_put(bdev, mode, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_put);
|
||||
|
@ -1570,8 +1366,7 @@ EXPORT_SYMBOL(blkdev_put);
|
|||
static int blkdev_close(struct inode * inode, struct file * filp)
|
||||
{
|
||||
struct block_device *bdev = I_BDEV(filp->f_mapping->host);
|
||||
if (bdev->bd_holder == filp)
|
||||
bd_release(bdev);
|
||||
|
||||
return blkdev_put(bdev, filp->f_mode);
|
||||
}
|
||||
|
||||
|
@ -1716,67 +1511,6 @@ fail:
|
|||
}
|
||||
EXPORT_SYMBOL(lookup_bdev);
|
||||
|
||||
/**
|
||||
* open_bdev_exclusive - open a block device by name and set it up for use
|
||||
*
|
||||
* @path: special file representing the block device
|
||||
* @mode: FMODE_... combination to pass be used
|
||||
* @holder: owner for exclusion
|
||||
*
|
||||
* Open the blockdevice described by the special file at @path, claim it
|
||||
* for the @holder.
|
||||
*/
|
||||
struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
|
||||
{
|
||||
struct block_device *bdev, *whole;
|
||||
int error;
|
||||
|
||||
bdev = lookup_bdev(path);
|
||||
if (IS_ERR(bdev))
|
||||
return bdev;
|
||||
|
||||
whole = bd_start_claiming(bdev, holder);
|
||||
if (IS_ERR(whole)) {
|
||||
bdput(bdev);
|
||||
return whole;
|
||||
}
|
||||
|
||||
error = blkdev_get(bdev, mode);
|
||||
if (error)
|
||||
goto out_abort_claiming;
|
||||
|
||||
error = -EACCES;
|
||||
if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
|
||||
goto out_blkdev_put;
|
||||
|
||||
bd_finish_claiming(bdev, whole, holder);
|
||||
return bdev;
|
||||
|
||||
out_blkdev_put:
|
||||
blkdev_put(bdev, mode);
|
||||
out_abort_claiming:
|
||||
bd_abort_claiming(whole, holder);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(open_bdev_exclusive);
|
||||
|
||||
/**
|
||||
* close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive()
|
||||
*
|
||||
* @bdev: blockdevice to close
|
||||
* @mode: mode, must match that used to open.
|
||||
*
|
||||
* This is the counterpart to open_bdev_exclusive().
|
||||
*/
|
||||
void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, mode);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(close_bdev_exclusive);
|
||||
|
||||
int __invalidate_device(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb = get_super(bdev);
|
||||
|
|
|
@ -489,7 +489,7 @@ again:
|
|||
continue;
|
||||
|
||||
if (device->bdev) {
|
||||
close_bdev_exclusive(device->bdev, device->mode);
|
||||
blkdev_put(device->bdev, device->mode);
|
||||
device->bdev = NULL;
|
||||
fs_devices->open_devices--;
|
||||
}
|
||||
|
@ -523,7 +523,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
|
|||
|
||||
list_for_each_entry(device, &fs_devices->devices, dev_list) {
|
||||
if (device->bdev) {
|
||||
close_bdev_exclusive(device->bdev, device->mode);
|
||||
blkdev_put(device->bdev, device->mode);
|
||||
fs_devices->open_devices--;
|
||||
}
|
||||
if (device->writeable) {
|
||||
|
@ -580,13 +580,15 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
|||
int seeding = 1;
|
||||
int ret = 0;
|
||||
|
||||
flags |= FMODE_EXCL;
|
||||
|
||||
list_for_each_entry(device, head, dev_list) {
|
||||
if (device->bdev)
|
||||
continue;
|
||||
if (!device->name)
|
||||
continue;
|
||||
|
||||
bdev = open_bdev_exclusive(device->name, flags, holder);
|
||||
bdev = blkdev_get_by_path(device->name, flags, holder);
|
||||
if (IS_ERR(bdev)) {
|
||||
printk(KERN_INFO "open %s failed\n", device->name);
|
||||
goto error;
|
||||
|
@ -638,7 +640,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
|||
error_brelse:
|
||||
brelse(bh);
|
||||
error_close:
|
||||
close_bdev_exclusive(bdev, FMODE_READ);
|
||||
blkdev_put(bdev, flags);
|
||||
error:
|
||||
continue;
|
||||
}
|
||||
|
@ -684,7 +686,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
|||
|
||||
mutex_lock(&uuid_mutex);
|
||||
|
||||
bdev = open_bdev_exclusive(path, flags, holder);
|
||||
flags |= FMODE_EXCL;
|
||||
bdev = blkdev_get_by_path(path, flags, holder);
|
||||
|
||||
if (IS_ERR(bdev)) {
|
||||
ret = PTR_ERR(bdev);
|
||||
|
@ -716,7 +719,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
|
|||
|
||||
brelse(bh);
|
||||
error_close:
|
||||
close_bdev_exclusive(bdev, flags);
|
||||
blkdev_put(bdev, flags);
|
||||
error:
|
||||
mutex_unlock(&uuid_mutex);
|
||||
return ret;
|
||||
|
@ -1179,8 +1182,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
goto out;
|
||||
}
|
||||
} else {
|
||||
bdev = open_bdev_exclusive(device_path, FMODE_READ,
|
||||
root->fs_info->bdev_holder);
|
||||
bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL,
|
||||
root->fs_info->bdev_holder);
|
||||
if (IS_ERR(bdev)) {
|
||||
ret = PTR_ERR(bdev);
|
||||
goto out;
|
||||
|
@ -1244,7 +1247,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
|
|||
root->fs_info->fs_devices->latest_bdev = next_device->bdev;
|
||||
|
||||
if (device->bdev) {
|
||||
close_bdev_exclusive(device->bdev, device->mode);
|
||||
blkdev_put(device->bdev, device->mode);
|
||||
device->bdev = NULL;
|
||||
device->fs_devices->open_devices--;
|
||||
}
|
||||
|
@ -1287,7 +1290,7 @@ error_brelse:
|
|||
brelse(bh);
|
||||
error_close:
|
||||
if (bdev)
|
||||
close_bdev_exclusive(bdev, FMODE_READ);
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
|
||||
out:
|
||||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
mutex_unlock(&uuid_mutex);
|
||||
|
@ -1439,7 +1442,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
|
||||
return -EINVAL;
|
||||
|
||||
bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
|
||||
bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
|
||||
root->fs_info->bdev_holder);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
|
@ -1565,7 +1569,7 @@ out:
|
|||
mutex_unlock(&root->fs_info->volume_mutex);
|
||||
return ret;
|
||||
error:
|
||||
close_bdev_exclusive(bdev, 0);
|
||||
blkdev_put(bdev, FMODE_EXCL);
|
||||
if (seeding_dev) {
|
||||
mutex_unlock(&uuid_mutex);
|
||||
up_write(&sb->s_umount);
|
||||
|
|
|
@ -49,7 +49,7 @@ struct btrfs_device {
|
|||
|
||||
struct block_device *bdev;
|
||||
|
||||
/* the mode sent to open_bdev_exclusive */
|
||||
/* the mode sent to blkdev_get */
|
||||
fmode_t mode;
|
||||
|
||||
char *name;
|
||||
|
|
|
@ -347,7 +347,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb)
|
|||
struct block_device *bdev;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
|
||||
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
|
||||
if (IS_ERR(bdev))
|
||||
goto fail;
|
||||
return bdev;
|
||||
|
@ -364,8 +364,7 @@ fail:
|
|||
*/
|
||||
static int ext3_blkdev_put(struct block_device *bdev)
|
||||
{
|
||||
bd_release(bdev);
|
||||
return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
static int ext3_blkdev_remove(struct ext3_sb_info *sbi)
|
||||
|
@ -2136,13 +2135,6 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb,
|
|||
if (bdev == NULL)
|
||||
return NULL;
|
||||
|
||||
if (bd_claim(bdev, sb)) {
|
||||
ext3_msg(sb, KERN_ERR,
|
||||
"error: failed to claim external journal device");
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blocksize = sb->s_blocksize;
|
||||
hblock = bdev_logical_block_size(bdev);
|
||||
if (blocksize < hblock) {
|
||||
|
|
|
@ -647,7 +647,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
|
|||
struct block_device *bdev;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
|
||||
bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
|
||||
if (IS_ERR(bdev))
|
||||
goto fail;
|
||||
return bdev;
|
||||
|
@ -663,8 +663,7 @@ fail:
|
|||
*/
|
||||
static int ext4_blkdev_put(struct block_device *bdev)
|
||||
{
|
||||
bd_release(bdev);
|
||||
return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
|
||||
|
@ -3758,13 +3757,6 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
|||
if (bdev == NULL)
|
||||
return NULL;
|
||||
|
||||
if (bd_claim(bdev, sb)) {
|
||||
ext4_msg(sb, KERN_ERR,
|
||||
"failed to claim external journal device");
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
blocksize = sb->s_blocksize;
|
||||
hblock = bdev_logical_block_size(bdev);
|
||||
if (blocksize < hblock) {
|
||||
|
|
|
@ -1268,7 +1268,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
|
|||
{
|
||||
struct block_device *bdev;
|
||||
struct super_block *s;
|
||||
fmode_t mode = FMODE_READ;
|
||||
fmode_t mode = FMODE_READ | FMODE_EXCL;
|
||||
int error;
|
||||
struct gfs2_args args;
|
||||
struct gfs2_sbd *sdp;
|
||||
|
@ -1276,7 +1276,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
|
|||
if (!(flags & MS_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
bdev = open_bdev_exclusive(dev_name, mode, fs_type);
|
||||
bdev = blkdev_get_by_path(dev_name, mode, fs_type);
|
||||
if (IS_ERR(bdev))
|
||||
return ERR_CAST(bdev);
|
||||
|
||||
|
@ -1298,7 +1298,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
|
|||
goto error_bdev;
|
||||
|
||||
if (s->s_root)
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
blkdev_put(bdev, mode);
|
||||
|
||||
memset(&args, 0, sizeof(args));
|
||||
args.ar_quota = GFS2_QUOTA_DEFAULT;
|
||||
|
@ -1342,7 +1342,7 @@ error_super:
|
|||
deactivate_locked_super(s);
|
||||
return ERR_PTR(error);
|
||||
error_bdev:
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
blkdev_put(bdev, mode);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -1120,16 +1120,13 @@ int lmLogOpen(struct super_block *sb)
|
|||
* file systems to log may have n-to-1 relationship;
|
||||
*/
|
||||
|
||||
bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE);
|
||||
bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
|
||||
log);
|
||||
if (IS_ERR(bdev)) {
|
||||
rc = -PTR_ERR(bdev);
|
||||
goto free;
|
||||
}
|
||||
|
||||
if ((rc = bd_claim(bdev, log))) {
|
||||
goto close;
|
||||
}
|
||||
|
||||
log->bdev = bdev;
|
||||
memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid));
|
||||
|
||||
|
@ -1137,7 +1134,7 @@ int lmLogOpen(struct super_block *sb)
|
|||
* initialize log:
|
||||
*/
|
||||
if ((rc = lmLogInit(log)))
|
||||
goto unclaim;
|
||||
goto close;
|
||||
|
||||
list_add(&log->journal_list, &jfs_external_logs);
|
||||
|
||||
|
@ -1163,11 +1160,8 @@ journal_found:
|
|||
list_del(&log->journal_list);
|
||||
lbmLogShutdown(log);
|
||||
|
||||
unclaim:
|
||||
bd_release(bdev);
|
||||
|
||||
close: /* close external log device */
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
|
||||
free: /* free log descriptor */
|
||||
mutex_unlock(&jfs_log_mutex);
|
||||
|
@ -1512,8 +1506,7 @@ int lmLogClose(struct super_block *sb)
|
|||
bdev = log->bdev;
|
||||
rc = lmLogShutdown(log);
|
||||
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
|
||||
kfree(log);
|
||||
|
||||
|
|
|
@ -300,7 +300,7 @@ static int bdev_write_sb(struct super_block *sb, struct page *page)
|
|||
|
||||
static void bdev_put_device(struct logfs_super *s)
|
||||
{
|
||||
close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
static int bdev_can_write_buf(struct super_block *sb, u64 ofs)
|
||||
|
@ -325,13 +325,14 @@ int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type,
|
|||
{
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type);
|
||||
bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
|
||||
type);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
|
||||
int mtdnr = MINOR(bdev->bd_dev);
|
||||
close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
return logfs_get_sb_mtd(p, mtdnr);
|
||||
}
|
||||
|
||||
|
|
|
@ -1147,14 +1147,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
|||
{
|
||||
struct nilfs_super_data sd;
|
||||
struct super_block *s;
|
||||
fmode_t mode = FMODE_READ;
|
||||
fmode_t mode = FMODE_READ | FMODE_EXCL;
|
||||
struct dentry *root_dentry;
|
||||
int err, s_new = false;
|
||||
|
||||
if (!(flags & MS_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type);
|
||||
sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
|
||||
if (IS_ERR(sd.bdev))
|
||||
return ERR_CAST(sd.bdev);
|
||||
|
||||
|
@ -1233,7 +1233,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
|||
}
|
||||
|
||||
if (!s_new)
|
||||
close_bdev_exclusive(sd.bdev, mode);
|
||||
blkdev_put(sd.bdev, mode);
|
||||
|
||||
return root_dentry;
|
||||
|
||||
|
@ -1242,7 +1242,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
|||
|
||||
failed:
|
||||
if (!s_new)
|
||||
close_bdev_exclusive(sd.bdev, mode);
|
||||
blkdev_put(sd.bdev, mode);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
|
|
@ -1674,7 +1674,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg,
|
|||
goto out;
|
||||
|
||||
reg->hr_bdev = I_BDEV(filp->f_mapping->host);
|
||||
ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ);
|
||||
ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL);
|
||||
if (ret) {
|
||||
reg->hr_bdev = NULL;
|
||||
goto out;
|
||||
|
|
|
@ -558,7 +558,7 @@ void register_disk(struct gendisk *disk)
|
|||
goto exit;
|
||||
|
||||
bdev->bd_invalidated = 1;
|
||||
err = blkdev_get(bdev, FMODE_READ);
|
||||
err = blkdev_get(bdev, FMODE_READ, NULL);
|
||||
if (err < 0)
|
||||
goto exit;
|
||||
blkdev_put(bdev, FMODE_READ);
|
||||
|
|
|
@ -2552,8 +2552,6 @@ static int release_journal_dev(struct super_block *super,
|
|||
result = 0;
|
||||
|
||||
if (journal->j_dev_bd != NULL) {
|
||||
if (journal->j_dev_bd->bd_dev != super->s_dev)
|
||||
bd_release(journal->j_dev_bd);
|
||||
result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode);
|
||||
journal->j_dev_bd = NULL;
|
||||
}
|
||||
|
@ -2571,7 +2569,7 @@ static int journal_init_dev(struct super_block *super,
|
|||
{
|
||||
int result;
|
||||
dev_t jdev;
|
||||
fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE;
|
||||
fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL;
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
result = 0;
|
||||
|
@ -2585,7 +2583,10 @@ static int journal_init_dev(struct super_block *super,
|
|||
|
||||
/* there is no "jdev" option and journal is on separate device */
|
||||
if ((!jdev_name || !jdev_name[0])) {
|
||||
journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
|
||||
if (jdev == super->s_dev)
|
||||
blkdev_mode &= ~FMODE_EXCL;
|
||||
journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode,
|
||||
journal);
|
||||
journal->j_dev_mode = blkdev_mode;
|
||||
if (IS_ERR(journal->j_dev_bd)) {
|
||||
result = PTR_ERR(journal->j_dev_bd);
|
||||
|
@ -2594,22 +2595,14 @@ static int journal_init_dev(struct super_block *super,
|
|||
"cannot init journal device '%s': %i",
|
||||
__bdevname(jdev, b), result);
|
||||
return result;
|
||||
} else if (jdev != super->s_dev) {
|
||||
result = bd_claim(journal->j_dev_bd, journal);
|
||||
if (result) {
|
||||
blkdev_put(journal->j_dev_bd, blkdev_mode);
|
||||
return result;
|
||||
}
|
||||
|
||||
} else if (jdev != super->s_dev)
|
||||
set_blocksize(journal->j_dev_bd, super->s_blocksize);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
journal->j_dev_mode = blkdev_mode;
|
||||
journal->j_dev_bd = open_bdev_exclusive(jdev_name,
|
||||
blkdev_mode, journal);
|
||||
journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal);
|
||||
if (IS_ERR(journal->j_dev_bd)) {
|
||||
result = PTR_ERR(journal->j_dev_bd);
|
||||
journal->j_dev_bd = NULL;
|
||||
|
|
19
fs/super.c
19
fs/super.c
|
@ -766,13 +766,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
|
|||
{
|
||||
struct block_device *bdev;
|
||||
struct super_block *s;
|
||||
fmode_t mode = FMODE_READ;
|
||||
fmode_t mode = FMODE_READ | FMODE_EXCL;
|
||||
int error = 0;
|
||||
|
||||
if (!(flags & MS_RDONLY))
|
||||
mode |= FMODE_WRITE;
|
||||
|
||||
bdev = open_bdev_exclusive(dev_name, mode, fs_type);
|
||||
bdev = blkdev_get_by_path(dev_name, mode, fs_type);
|
||||
if (IS_ERR(bdev))
|
||||
return ERR_CAST(bdev);
|
||||
|
||||
|
@ -801,13 +801,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
|
|||
|
||||
/*
|
||||
* s_umount nests inside bd_mutex during
|
||||
* __invalidate_device(). close_bdev_exclusive()
|
||||
* acquires bd_mutex and can't be called under
|
||||
* s_umount. Drop s_umount temporarily. This is safe
|
||||
* as we're holding an active reference.
|
||||
* __invalidate_device(). blkdev_put() acquires
|
||||
* bd_mutex and can't be called under s_umount. Drop
|
||||
* s_umount temporarily. This is safe as we're
|
||||
* holding an active reference.
|
||||
*/
|
||||
up_write(&s->s_umount);
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
blkdev_put(bdev, mode);
|
||||
down_write(&s->s_umount);
|
||||
} else {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
@ -831,7 +831,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
|
|||
error_s:
|
||||
error = PTR_ERR(s);
|
||||
error_bdev:
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
blkdev_put(bdev, mode);
|
||||
error:
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
@ -862,7 +862,8 @@ void kill_block_super(struct super_block *sb)
|
|||
bdev->bd_super = NULL;
|
||||
generic_shutdown_super(sb);
|
||||
sync_blockdev(bdev);
|
||||
close_bdev_exclusive(bdev, mode);
|
||||
WARN_ON_ONCE(!(mode & FMODE_EXCL));
|
||||
blkdev_put(bdev, mode | FMODE_EXCL);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(kill_block_super);
|
||||
|
|
|
@ -606,7 +606,8 @@ xfs_blkdev_get(
|
|||
{
|
||||
int error = 0;
|
||||
|
||||
*bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp);
|
||||
*bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
|
||||
mp);
|
||||
if (IS_ERR(*bdevp)) {
|
||||
error = PTR_ERR(*bdevp);
|
||||
printk("XFS: Invalid device [%s], error=%d\n", name, error);
|
||||
|
@ -620,7 +621,7 @@ xfs_blkdev_put(
|
|||
struct block_device *bdev)
|
||||
{
|
||||
if (bdev)
|
||||
close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -663,7 +663,7 @@ struct block_device {
|
|||
void * bd_holder;
|
||||
int bd_holders;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct list_head bd_holder_list;
|
||||
struct gendisk * bd_holder_disk; /* for sysfs slave linkng */
|
||||
#endif
|
||||
struct block_device * bd_contains;
|
||||
unsigned bd_block_size;
|
||||
|
@ -2006,7 +2006,6 @@ extern struct block_device *bdgrab(struct block_device *bdev);
|
|||
extern void bd_set_size(struct block_device *, loff_t size);
|
||||
extern void bd_forget(struct inode *inode);
|
||||
extern void bdput(struct block_device *);
|
||||
extern struct block_device *open_by_devnum(dev_t, fmode_t);
|
||||
extern void invalidate_bdev(struct block_device *);
|
||||
extern int sync_blockdev(struct block_device *bdev);
|
||||
extern struct super_block *freeze_bdev(struct block_device *);
|
||||
|
@ -2037,16 +2036,20 @@ extern const struct file_operations def_fifo_fops;
|
|||
extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long);
|
||||
extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long);
|
||||
extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long);
|
||||
extern int blkdev_get(struct block_device *, fmode_t);
|
||||
extern int blkdev_put(struct block_device *, fmode_t);
|
||||
extern int bd_claim(struct block_device *, void *);
|
||||
extern void bd_release(struct block_device *);
|
||||
extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder);
|
||||
extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder);
|
||||
extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode,
|
||||
void *holder);
|
||||
extern int blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||
#ifdef CONFIG_SYSFS
|
||||
extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *);
|
||||
extern void bd_release_from_disk(struct block_device *, struct gendisk *);
|
||||
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
|
||||
#else
|
||||
#define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder)
|
||||
#define bd_release_from_disk(bdev, disk) bd_release(bdev)
|
||||
static inline int bd_link_disk_holder(struct block_device *bdev,
|
||||
struct gendisk *disk)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -2082,8 +2085,6 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
|
|||
extern const char *__bdevname(dev_t, char *buffer);
|
||||
extern const char *bdevname(struct block_device *bdev, char *buffer);
|
||||
extern struct block_device *lookup_bdev(const char *);
|
||||
extern struct block_device *open_bdev_exclusive(const char *, fmode_t, void *);
|
||||
extern void close_bdev_exclusive(struct block_device *, fmode_t);
|
||||
extern void blkdev_show(struct seq_file *,off_t);
|
||||
|
||||
#else
|
||||
|
|
|
@ -223,7 +223,7 @@ static int swsusp_swap_check(void)
|
|||
return res;
|
||||
|
||||
root_swap = res;
|
||||
res = blkdev_get(hib_resume_bdev, FMODE_WRITE);
|
||||
res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
|
@ -907,7 +907,8 @@ int swsusp_check(void)
|
|||
{
|
||||
int error;
|
||||
|
||||
hib_resume_bdev = open_by_devnum(swsusp_resume_device, FMODE_READ);
|
||||
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
|
||||
FMODE_READ, NULL);
|
||||
if (!IS_ERR(hib_resume_bdev)) {
|
||||
set_blocksize(hib_resume_bdev, PAGE_SIZE);
|
||||
clear_page(swsusp_header);
|
||||
|
|
|
@ -1677,7 +1677,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
|
|||
if (S_ISBLK(inode->i_mode)) {
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
set_blocksize(bdev, p->old_block_size);
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
} else {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
inode->i_flags &= ~S_SWAPFILE;
|
||||
|
@ -1939,7 +1939,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|||
error = -EINVAL;
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
bdev = I_BDEV(inode);
|
||||
error = bd_claim(bdev, sys_swapon);
|
||||
error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
|
||||
sys_swapon);
|
||||
if (error < 0) {
|
||||
bdev = NULL;
|
||||
error = -EINVAL;
|
||||
|
@ -2136,7 +2137,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
|
|||
bad_swap:
|
||||
if (bdev) {
|
||||
set_blocksize(bdev, p->old_block_size);
|
||||
bd_release(bdev);
|
||||
blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
|
||||
}
|
||||
destroy_swap_extents(p);
|
||||
swap_cgroup_swapoff(type);
|
||||
|
|
Loading…
Reference in New Issue