2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2010-08-09 03:58:20 +08:00
|
|
|
* Interface to Linux block layer for MTD 'translation layers'.
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
2010-08-09 03:58:20 +08:00
|
|
|
* Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
2005-04-17 06:20:36 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mtd/blktrans.h>
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/blkdev.h>
|
2018-10-16 22:09:58 +08:00
|
|
|
#include <linux/blk-mq.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/blkpg.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/hdreg.h>
|
2006-03-31 18:29:41 +08:00
|
|
|
#include <linux/mutex.h>
|
2016-12-25 03:46:01 +08:00
|
|
|
#include <linux/uaccess.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-29 03:28:34 +08:00
|
|
|
#include "mtdcore.h"
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-29 03:28:34 +08:00
|
|
|
static LIST_HEAD(blktrans_majors);
|
2010-02-23 02:39:30 +08:00
|
|
|
static DEFINE_MUTEX(blktrans_ref_mutex);
|
|
|
|
|
2011-01-12 08:46:10 +08:00
|
|
|
static void blktrans_dev_release(struct kref *kref)
|
2010-02-23 02:39:30 +08:00
|
|
|
{
|
|
|
|
struct mtd_blktrans_dev *dev =
|
|
|
|
container_of(kref, struct mtd_blktrans_dev, ref);
|
|
|
|
|
|
|
|
dev->disk->private_data = NULL;
|
2010-02-27 08:31:51 +08:00
|
|
|
blk_cleanup_queue(dev->rq);
|
2018-10-16 22:09:58 +08:00
|
|
|
blk_mq_free_tag_set(dev->tag_set);
|
|
|
|
kfree(dev->tag_set);
|
2010-02-23 02:39:30 +08:00
|
|
|
put_disk(dev->disk);
|
|
|
|
list_del(&dev->list);
|
|
|
|
kfree(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
|
|
|
|
{
|
|
|
|
struct mtd_blktrans_dev *dev;
|
|
|
|
|
|
|
|
mutex_lock(&blktrans_ref_mutex);
|
|
|
|
dev = disk->private_data;
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
goto unlock;
|
|
|
|
kref_get(&dev->ref);
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&blktrans_ref_mutex);
|
|
|
|
return dev;
|
|
|
|
}
|
|
|
|
|
2011-01-12 08:46:10 +08:00
|
|
|
static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
|
2010-02-23 02:39:30 +08:00
|
|
|
{
|
|
|
|
mutex_lock(&blktrans_ref_mutex);
|
|
|
|
kref_put(&dev->ref, blktrans_dev_release);
|
|
|
|
mutex_unlock(&blktrans_ref_mutex);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct mtd_blktrans_dev *dev,
|
|
|
|
struct request *req)
|
|
|
|
{
|
|
|
|
unsigned long block, nsect;
|
|
|
|
char *buf;
|
|
|
|
|
2009-05-07 21:24:39 +08:00
|
|
|
block = blk_rq_pos(req) << 9 >> tr->blkshift;
|
2009-05-07 21:24:45 +08:00
|
|
|
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-06-03 15:38:04 +08:00
|
|
|
if (req_op(req) == REQ_OP_FLUSH) {
|
|
|
|
if (tr->flush(dev))
|
|
|
|
return BLK_STS_IOERR;
|
|
|
|
return BLK_STS_OK;
|
|
|
|
}
|
2014-03-08 20:59:14 +08:00
|
|
|
|
2009-05-07 21:24:39 +08:00
|
|
|
if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
|
|
|
|
get_capacity(req->rq_disk))
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_IOERR;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-31 23:57:31 +08:00
|
|
|
switch (req_op(req)) {
|
|
|
|
case REQ_OP_DISCARD:
|
2017-06-03 15:38:04 +08:00
|
|
|
if (tr->discard(dev, block, nsect))
|
|
|
|
return BLK_STS_IOERR;
|
|
|
|
return BLK_STS_OK;
|
2017-01-31 23:57:31 +08:00
|
|
|
case REQ_OP_READ:
|
2018-05-09 21:59:45 +08:00
|
|
|
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
|
|
|
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
|
|
|
|
if (tr->readsect(dev, block, buf)) {
|
|
|
|
kunmap(bio_page(req->bio));
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_IOERR;
|
2018-05-09 21:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
kunmap(bio_page(req->bio));
|
2009-11-26 16:16:19 +08:00
|
|
|
rq_flush_dcache_pages(req);
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_OK;
|
2017-01-31 23:57:31 +08:00
|
|
|
case REQ_OP_WRITE:
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!tr->writesect)
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_IOERR;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-11-26 16:16:19 +08:00
|
|
|
rq_flush_dcache_pages(req);
|
2018-05-09 21:59:45 +08:00
|
|
|
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
|
|
|
|
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
|
|
|
|
if (tr->writesect(dev, block, buf)) {
|
|
|
|
kunmap(bio_page(req->bio));
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_IOERR;
|
2018-05-09 21:59:45 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
kunmap(bio_page(req->bio));
|
2017-08-02 20:33:05 +08:00
|
|
|
return BLK_STS_OK;
|
2017-01-31 23:57:31 +08:00
|
|
|
default:
|
2017-06-03 15:38:04 +08:00
|
|
|
return BLK_STS_IOERR;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-02-14 22:16:09 +08:00
|
|
|
int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
|
|
|
|
{
|
2011-03-25 23:41:20 +08:00
|
|
|
return dev->bg_stop;
|
2011-02-14 22:16:09 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
|
|
|
|
{
|
|
|
|
struct request *rq;
|
|
|
|
|
|
|
|
rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
|
|
|
|
if (rq) {
|
|
|
|
list_del_init(&rq->queuelist);
|
|
|
|
blk_mq_start_request(rq);
|
|
|
|
return rq;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
|
|
|
|
__releases(&dev->queue_lock)
|
|
|
|
__acquires(&dev->queue_lock)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2011-02-14 22:16:09 +08:00
|
|
|
struct mtd_blktrans_ops *tr = dev->tr;
|
2009-05-08 10:54:11 +08:00
|
|
|
struct request *req = NULL;
|
2011-02-14 22:16:09 +08:00
|
|
|
int background_done = 0;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2012-11-11 00:08:20 +08:00
|
|
|
while (1) {
|
2017-06-03 15:38:04 +08:00
|
|
|
blk_status_t res;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-03-25 23:41:20 +08:00
|
|
|
dev->bg_stop = false;
|
2018-10-16 22:09:58 +08:00
|
|
|
if (!req && !(req = mtd_next_request(dev))) {
|
2011-02-14 22:16:09 +08:00
|
|
|
if (tr->background && !background_done) {
|
2018-10-16 22:09:58 +08:00
|
|
|
spin_unlock_irq(&dev->queue_lock);
|
2011-02-14 22:16:09 +08:00
|
|
|
mutex_lock(&dev->lock);
|
|
|
|
tr->background(dev);
|
|
|
|
mutex_unlock(&dev->lock);
|
2018-10-16 22:09:58 +08:00
|
|
|
spin_lock_irq(&dev->queue_lock);
|
2011-02-14 22:16:09 +08:00
|
|
|
/*
|
|
|
|
* Do background processing just once per idle
|
|
|
|
* period.
|
|
|
|
*/
|
2011-03-25 23:41:20 +08:00
|
|
|
background_done = !dev->bg_stop;
|
2011-02-14 22:16:09 +08:00
|
|
|
continue;
|
|
|
|
}
|
2012-11-11 00:08:20 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
spin_unlock_irq(&dev->queue_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_lock(&dev->lock);
|
2010-02-23 02:39:29 +08:00
|
|
|
res = do_blktrans_request(dev->tr, dev, req);
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&dev->lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
|
|
|
|
__blk_mq_end_request(req, res);
|
2009-05-08 10:54:11 +08:00
|
|
|
req = NULL;
|
2018-10-16 22:09:58 +08:00
|
|
|
}
|
2011-02-14 22:16:09 +08:00
|
|
|
|
|
|
|
background_done = 0;
|
2018-10-16 22:09:58 +08:00
|
|
|
spin_lock_irq(&dev->queue_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|
|
|
const struct blk_mq_queue_data *bd)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
struct mtd_blktrans_dev *dev;
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
dev = hctx->queue->queuedata;
|
|
|
|
if (!dev) {
|
|
|
|
blk_mq_start_request(bd->rq);
|
|
|
|
return BLK_STS_IOERR;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irq(&dev->queue_lock);
|
|
|
|
list_add_tail(&bd->rq->queuelist, &dev->rq_list);
|
|
|
|
mtd_blktrans_work(dev);
|
|
|
|
spin_unlock_irq(&dev->queue_lock);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
return BLK_STS_OK;
|
2010-02-23 02:39:30 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-02 23:35:06 +08:00
|
|
|
static int blktrans_open(struct block_device *bdev, fmode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
|
2010-10-15 23:20:43 +08:00
|
|
|
int ret = 0;
|
2010-02-23 02:39:30 +08:00
|
|
|
|
|
|
|
if (!dev)
|
2010-08-08 00:25:34 +08:00
|
|
|
return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
|
2010-02-23 02:39:30 +08:00
|
|
|
|
2015-05-08 08:55:16 +08:00
|
|
|
mutex_lock(&mtd_table_mutex);
|
mtd: blkdevs: fix potential deadlock + lockdep warnings
Commit 073db4a51ee4 ("mtd: fix: avoid race condition when accessing
mtd->usecount") fixed a race condition but due to poor ordering of the
mutex acquisition, introduced a potential deadlock.
The deadlock can occur, for example, when rmmod'ing the m25p80 module, which
will delete one or more MTDs, along with any corresponding mtdblock
devices. This could potentially race with an acquisition of the block
device as follows.
-> blktrans_open()
-> mutex_lock(&dev->lock);
-> mutex_lock(&mtd_table_mutex);
-> del_mtd_device()
-> mutex_lock(&mtd_table_mutex);
-> blktrans_notify_remove() -> del_mtd_blktrans_dev()
-> mutex_lock(&dev->lock);
This is a classic (potential) ABBA deadlock, which can be fixed by
making the A->B ordering consistent everywhere. There was no real
purpose to the ordering in the original patch, AFAIR, so this shouldn't
be a problem. This ordering was actually already present in
del_mtd_blktrans_dev(), for one, where the function tried to ensure that
its caller already held mtd_table_mutex before it acquired &dev->lock:
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
So, reverse the ordering of acquisition of &dev->lock and &mtd_table_mutex so
we always acquire mtd_table_mutex first.
Snippets of the lockdep output follow:
# modprobe -r m25p80
[ 53.419251]
[ 53.420838] ======================================================
[ 53.427300] [ INFO: possible circular locking dependency detected ]
[ 53.433865] 4.3.0-rc6 #96 Not tainted
[ 53.437686] -------------------------------------------------------
[ 53.444220] modprobe/372 is trying to acquire lock:
[ 53.449320] (&new->lock){+.+...}, at: [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.457271]
[ 53.457271] but task is already holding lock:
[ 53.463372] (mtd_table_mutex){+.+.+.}, at: [<c0439994>] del_mtd_device+0x18/0x100
[ 53.471321]
[ 53.471321] which lock already depends on the new lock.
[ 53.471321]
[ 53.479856]
[ 53.479856] the existing dependency chain (in reverse order) is:
[ 53.487660]
-> #1 (mtd_table_mutex){+.+.+.}:
[ 53.492331] [<c043fc5c>] blktrans_open+0x34/0x1a4
[ 53.497879] [<c01afce0>] __blkdev_get+0xc4/0x3b0
[ 53.503364] [<c01b0bb8>] blkdev_get+0x108/0x320
[ 53.508743] [<c01713c0>] do_dentry_open+0x218/0x314
[ 53.514496] [<c0180454>] path_openat+0x4c0/0xf9c
[ 53.519959] [<c0182044>] do_filp_open+0x5c/0xc0
[ 53.525336] [<c0172758>] do_sys_open+0xfc/0x1cc
[ 53.530716] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.536375]
-> #0 (&new->lock){+.+...}:
[ 53.540587] [<c063f124>] mutex_lock_nested+0x38/0x3cc
[ 53.546504] [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.552606] [<c043f164>] blktrans_notify_remove+0x7c/0x84
[ 53.558891] [<c04399f0>] del_mtd_device+0x74/0x100
[ 53.564544] [<c043c670>] del_mtd_partitions+0x80/0xc8
[ 53.570451] [<c0439aa0>] mtd_device_unregister+0x24/0x48
[ 53.576637] [<c046ce6c>] spi_drv_remove+0x1c/0x34
[ 53.582207] [<c03de0f0>] __device_release_driver+0x88/0x114
[ 53.588663] [<c03de19c>] device_release_driver+0x20/0x2c
[ 53.594843] [<c03dd9e8>] bus_remove_device+0xd8/0x108
[ 53.600748] [<c03dacc0>] device_del+0x10c/0x210
[ 53.606127] [<c03dadd0>] device_unregister+0xc/0x20
[ 53.611849] [<c046d878>] __unregister+0x10/0x20
[ 53.617211] [<c03da868>] device_for_each_child+0x50/0x7c
[ 53.623387] [<c046eae8>] spi_unregister_master+0x58/0x8c
[ 53.629578] [<c03e12f0>] release_nodes+0x15c/0x1c8
[ 53.635223] [<c03de0f8>] __device_release_driver+0x90/0x114
[ 53.641689] [<c03de900>] driver_detach+0xb4/0xb8
[ 53.647147] [<c03ddc78>] bus_remove_driver+0x4c/0xa0
[ 53.652970] [<c00cab50>] SyS_delete_module+0x11c/0x1e4
[ 53.658976] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.664621]
[ 53.664621] other info that might help us debug this:
[ 53.664621]
[ 53.672979] Possible unsafe locking scenario:
[ 53.672979]
[ 53.679169] CPU0 CPU1
[ 53.683900] ---- ----
[ 53.688633] lock(mtd_table_mutex);
[ 53.692383] lock(&new->lock);
[ 53.698306] lock(mtd_table_mutex);
[ 53.704658] lock(&new->lock);
[ 53.707946]
[ 53.707946] *** DEADLOCK ***
Fixes: 073db4a51ee4 ("mtd: fix: avoid race condition when accessing mtd->usecount")
Reported-by: Felipe Balbi <balbi@ti.com>
Tested-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Cc: <stable@vger.kernel.org>
2015-10-27 01:20:23 +08:00
|
|
|
mutex_lock(&dev->lock);
|
2010-02-23 02:39:30 +08:00
|
|
|
|
2011-11-08 07:51:05 +08:00
|
|
|
if (dev->open)
|
2010-02-23 02:39:30 +08:00
|
|
|
goto unlock;
|
|
|
|
|
2010-10-15 23:20:43 +08:00
|
|
|
kref_get(&dev->ref);
|
|
|
|
__module_get(dev->tr->owner);
|
|
|
|
|
2011-04-18 12:50:37 +08:00
|
|
|
if (!dev->mtd)
|
|
|
|
goto unlock;
|
|
|
|
|
|
|
|
if (dev->tr->open) {
|
|
|
|
ret = dev->tr->open(dev);
|
|
|
|
if (ret)
|
|
|
|
goto error_put;
|
2010-10-15 23:20:43 +08:00
|
|
|
}
|
2010-02-23 02:39:30 +08:00
|
|
|
|
2011-04-18 12:50:37 +08:00
|
|
|
ret = __get_mtd_device(dev->mtd);
|
|
|
|
if (ret)
|
|
|
|
goto error_release;
|
2012-01-10 20:26:58 +08:00
|
|
|
dev->file_mode = mode;
|
2011-04-18 12:50:37 +08:00
|
|
|
|
2010-02-23 02:39:30 +08:00
|
|
|
unlock:
|
2011-11-08 07:51:05 +08:00
|
|
|
dev->open++;
|
2010-02-23 02:39:30 +08:00
|
|
|
mutex_unlock(&dev->lock);
|
mtd: blkdevs: fix potential deadlock + lockdep warnings
Commit 073db4a51ee4 ("mtd: fix: avoid race condition when accessing
mtd->usecount") fixed a race condition but due to poor ordering of the
mutex acquisition, introduced a potential deadlock.
The deadlock can occur, for example, when rmmod'ing the m25p80 module, which
will delete one or more MTDs, along with any corresponding mtdblock
devices. This could potentially race with an acquisition of the block
device as follows.
-> blktrans_open()
-> mutex_lock(&dev->lock);
-> mutex_lock(&mtd_table_mutex);
-> del_mtd_device()
-> mutex_lock(&mtd_table_mutex);
-> blktrans_notify_remove() -> del_mtd_blktrans_dev()
-> mutex_lock(&dev->lock);
This is a classic (potential) ABBA deadlock, which can be fixed by
making the A->B ordering consistent everywhere. There was no real
purpose to the ordering in the original patch, AFAIR, so this shouldn't
be a problem. This ordering was actually already present in
del_mtd_blktrans_dev(), for one, where the function tried to ensure that
its caller already held mtd_table_mutex before it acquired &dev->lock:
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
So, reverse the ordering of acquisition of &dev->lock and &mtd_table_mutex so
we always acquire mtd_table_mutex first.
Snippets of the lockdep output follow:
# modprobe -r m25p80
[ 53.419251]
[ 53.420838] ======================================================
[ 53.427300] [ INFO: possible circular locking dependency detected ]
[ 53.433865] 4.3.0-rc6 #96 Not tainted
[ 53.437686] -------------------------------------------------------
[ 53.444220] modprobe/372 is trying to acquire lock:
[ 53.449320] (&new->lock){+.+...}, at: [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.457271]
[ 53.457271] but task is already holding lock:
[ 53.463372] (mtd_table_mutex){+.+.+.}, at: [<c0439994>] del_mtd_device+0x18/0x100
[ 53.471321]
[ 53.471321] which lock already depends on the new lock.
[ 53.471321]
[ 53.479856]
[ 53.479856] the existing dependency chain (in reverse order) is:
[ 53.487660]
-> #1 (mtd_table_mutex){+.+.+.}:
[ 53.492331] [<c043fc5c>] blktrans_open+0x34/0x1a4
[ 53.497879] [<c01afce0>] __blkdev_get+0xc4/0x3b0
[ 53.503364] [<c01b0bb8>] blkdev_get+0x108/0x320
[ 53.508743] [<c01713c0>] do_dentry_open+0x218/0x314
[ 53.514496] [<c0180454>] path_openat+0x4c0/0xf9c
[ 53.519959] [<c0182044>] do_filp_open+0x5c/0xc0
[ 53.525336] [<c0172758>] do_sys_open+0xfc/0x1cc
[ 53.530716] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.536375]
-> #0 (&new->lock){+.+...}:
[ 53.540587] [<c063f124>] mutex_lock_nested+0x38/0x3cc
[ 53.546504] [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.552606] [<c043f164>] blktrans_notify_remove+0x7c/0x84
[ 53.558891] [<c04399f0>] del_mtd_device+0x74/0x100
[ 53.564544] [<c043c670>] del_mtd_partitions+0x80/0xc8
[ 53.570451] [<c0439aa0>] mtd_device_unregister+0x24/0x48
[ 53.576637] [<c046ce6c>] spi_drv_remove+0x1c/0x34
[ 53.582207] [<c03de0f0>] __device_release_driver+0x88/0x114
[ 53.588663] [<c03de19c>] device_release_driver+0x20/0x2c
[ 53.594843] [<c03dd9e8>] bus_remove_device+0xd8/0x108
[ 53.600748] [<c03dacc0>] device_del+0x10c/0x210
[ 53.606127] [<c03dadd0>] device_unregister+0xc/0x20
[ 53.611849] [<c046d878>] __unregister+0x10/0x20
[ 53.617211] [<c03da868>] device_for_each_child+0x50/0x7c
[ 53.623387] [<c046eae8>] spi_unregister_master+0x58/0x8c
[ 53.629578] [<c03e12f0>] release_nodes+0x15c/0x1c8
[ 53.635223] [<c03de0f8>] __device_release_driver+0x90/0x114
[ 53.641689] [<c03de900>] driver_detach+0xb4/0xb8
[ 53.647147] [<c03ddc78>] bus_remove_driver+0x4c/0xa0
[ 53.652970] [<c00cab50>] SyS_delete_module+0x11c/0x1e4
[ 53.658976] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.664621]
[ 53.664621] other info that might help us debug this:
[ 53.664621]
[ 53.672979] Possible unsafe locking scenario:
[ 53.672979]
[ 53.679169] CPU0 CPU1
[ 53.683900] ---- ----
[ 53.688633] lock(mtd_table_mutex);
[ 53.692383] lock(&new->lock);
[ 53.698306] lock(mtd_table_mutex);
[ 53.704658] lock(&new->lock);
[ 53.707946]
[ 53.707946] *** DEADLOCK ***
Fixes: 073db4a51ee4 ("mtd: fix: avoid race condition when accessing mtd->usecount")
Reported-by: Felipe Balbi <balbi@ti.com>
Tested-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Cc: <stable@vger.kernel.org>
2015-10-27 01:20:23 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2010-02-23 02:39:30 +08:00
|
|
|
blktrans_dev_put(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
2011-04-18 12:50:37 +08:00
|
|
|
|
|
|
|
error_release:
|
|
|
|
if (dev->tr->release)
|
|
|
|
dev->tr->release(dev);
|
|
|
|
error_put:
|
|
|
|
module_put(dev->tr->owner);
|
|
|
|
kref_put(&dev->ref, blktrans_dev_release);
|
|
|
|
mutex_unlock(&dev->lock);
|
mtd: blkdevs: fix potential deadlock + lockdep warnings
Commit 073db4a51ee4 ("mtd: fix: avoid race condition when accessing
mtd->usecount") fixed a race condition but due to poor ordering of the
mutex acquisition, introduced a potential deadlock.
The deadlock can occur, for example, when rmmod'ing the m25p80 module, which
will delete one or more MTDs, along with any corresponding mtdblock
devices. This could potentially race with an acquisition of the block
device as follows.
-> blktrans_open()
-> mutex_lock(&dev->lock);
-> mutex_lock(&mtd_table_mutex);
-> del_mtd_device()
-> mutex_lock(&mtd_table_mutex);
-> blktrans_notify_remove() -> del_mtd_blktrans_dev()
-> mutex_lock(&dev->lock);
This is a classic (potential) ABBA deadlock, which can be fixed by
making the A->B ordering consistent everywhere. There was no real
purpose to the ordering in the original patch, AFAIR, so this shouldn't
be a problem. This ordering was actually already present in
del_mtd_blktrans_dev(), for one, where the function tried to ensure that
its caller already held mtd_table_mutex before it acquired &dev->lock:
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
So, reverse the ordering of acquisition of &dev->lock and &mtd_table_mutex so
we always acquire mtd_table_mutex first.
Snippets of the lockdep output follow:
# modprobe -r m25p80
[ 53.419251]
[ 53.420838] ======================================================
[ 53.427300] [ INFO: possible circular locking dependency detected ]
[ 53.433865] 4.3.0-rc6 #96 Not tainted
[ 53.437686] -------------------------------------------------------
[ 53.444220] modprobe/372 is trying to acquire lock:
[ 53.449320] (&new->lock){+.+...}, at: [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.457271]
[ 53.457271] but task is already holding lock:
[ 53.463372] (mtd_table_mutex){+.+.+.}, at: [<c0439994>] del_mtd_device+0x18/0x100
[ 53.471321]
[ 53.471321] which lock already depends on the new lock.
[ 53.471321]
[ 53.479856]
[ 53.479856] the existing dependency chain (in reverse order) is:
[ 53.487660]
-> #1 (mtd_table_mutex){+.+.+.}:
[ 53.492331] [<c043fc5c>] blktrans_open+0x34/0x1a4
[ 53.497879] [<c01afce0>] __blkdev_get+0xc4/0x3b0
[ 53.503364] [<c01b0bb8>] blkdev_get+0x108/0x320
[ 53.508743] [<c01713c0>] do_dentry_open+0x218/0x314
[ 53.514496] [<c0180454>] path_openat+0x4c0/0xf9c
[ 53.519959] [<c0182044>] do_filp_open+0x5c/0xc0
[ 53.525336] [<c0172758>] do_sys_open+0xfc/0x1cc
[ 53.530716] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.536375]
-> #0 (&new->lock){+.+...}:
[ 53.540587] [<c063f124>] mutex_lock_nested+0x38/0x3cc
[ 53.546504] [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.552606] [<c043f164>] blktrans_notify_remove+0x7c/0x84
[ 53.558891] [<c04399f0>] del_mtd_device+0x74/0x100
[ 53.564544] [<c043c670>] del_mtd_partitions+0x80/0xc8
[ 53.570451] [<c0439aa0>] mtd_device_unregister+0x24/0x48
[ 53.576637] [<c046ce6c>] spi_drv_remove+0x1c/0x34
[ 53.582207] [<c03de0f0>] __device_release_driver+0x88/0x114
[ 53.588663] [<c03de19c>] device_release_driver+0x20/0x2c
[ 53.594843] [<c03dd9e8>] bus_remove_device+0xd8/0x108
[ 53.600748] [<c03dacc0>] device_del+0x10c/0x210
[ 53.606127] [<c03dadd0>] device_unregister+0xc/0x20
[ 53.611849] [<c046d878>] __unregister+0x10/0x20
[ 53.617211] [<c03da868>] device_for_each_child+0x50/0x7c
[ 53.623387] [<c046eae8>] spi_unregister_master+0x58/0x8c
[ 53.629578] [<c03e12f0>] release_nodes+0x15c/0x1c8
[ 53.635223] [<c03de0f8>] __device_release_driver+0x90/0x114
[ 53.641689] [<c03de900>] driver_detach+0xb4/0xb8
[ 53.647147] [<c03ddc78>] bus_remove_driver+0x4c/0xa0
[ 53.652970] [<c00cab50>] SyS_delete_module+0x11c/0x1e4
[ 53.658976] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.664621]
[ 53.664621] other info that might help us debug this:
[ 53.664621]
[ 53.672979] Possible unsafe locking scenario:
[ 53.672979]
[ 53.679169] CPU0 CPU1
[ 53.683900] ---- ----
[ 53.688633] lock(mtd_table_mutex);
[ 53.692383] lock(&new->lock);
[ 53.698306] lock(mtd_table_mutex);
[ 53.704658] lock(&new->lock);
[ 53.707946]
[ 53.707946] *** DEADLOCK ***
Fixes: 073db4a51ee4 ("mtd: fix: avoid race condition when accessing mtd->usecount")
Reported-by: Felipe Balbi <balbi@ti.com>
Tested-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Cc: <stable@vger.kernel.org>
2015-10-27 01:20:23 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2011-04-18 12:50:37 +08:00
|
|
|
blktrans_dev_put(dev);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2013-05-06 09:52:57 +08:00
|
|
|
static void blktrans_release(struct gendisk *disk, fmode_t mode)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-23 02:39:30 +08:00
|
|
|
if (!dev)
|
2013-05-06 09:52:57 +08:00
|
|
|
return;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-05-08 08:55:16 +08:00
|
|
|
mutex_lock(&mtd_table_mutex);
|
mtd: blkdevs: fix potential deadlock + lockdep warnings
Commit 073db4a51ee4 ("mtd: fix: avoid race condition when accessing
mtd->usecount") fixed a race condition but due to poor ordering of the
mutex acquisition, introduced a potential deadlock.
The deadlock can occur, for example, when rmmod'ing the m25p80 module, which
will delete one or more MTDs, along with any corresponding mtdblock
devices. This could potentially race with an acquisition of the block
device as follows.
-> blktrans_open()
-> mutex_lock(&dev->lock);
-> mutex_lock(&mtd_table_mutex);
-> del_mtd_device()
-> mutex_lock(&mtd_table_mutex);
-> blktrans_notify_remove() -> del_mtd_blktrans_dev()
-> mutex_lock(&dev->lock);
This is a classic (potential) ABBA deadlock, which can be fixed by
making the A->B ordering consistent everywhere. There was no real
purpose to the ordering in the original patch, AFAIR, so this shouldn't
be a problem. This ordering was actually already present in
del_mtd_blktrans_dev(), for one, where the function tried to ensure that
its caller already held mtd_table_mutex before it acquired &dev->lock:
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
So, reverse the ordering of acquisition of &dev->lock and &mtd_table_mutex so
we always acquire mtd_table_mutex first.
Snippets of the lockdep output follow:
# modprobe -r m25p80
[ 53.419251]
[ 53.420838] ======================================================
[ 53.427300] [ INFO: possible circular locking dependency detected ]
[ 53.433865] 4.3.0-rc6 #96 Not tainted
[ 53.437686] -------------------------------------------------------
[ 53.444220] modprobe/372 is trying to acquire lock:
[ 53.449320] (&new->lock){+.+...}, at: [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.457271]
[ 53.457271] but task is already holding lock:
[ 53.463372] (mtd_table_mutex){+.+.+.}, at: [<c0439994>] del_mtd_device+0x18/0x100
[ 53.471321]
[ 53.471321] which lock already depends on the new lock.
[ 53.471321]
[ 53.479856]
[ 53.479856] the existing dependency chain (in reverse order) is:
[ 53.487660]
-> #1 (mtd_table_mutex){+.+.+.}:
[ 53.492331] [<c043fc5c>] blktrans_open+0x34/0x1a4
[ 53.497879] [<c01afce0>] __blkdev_get+0xc4/0x3b0
[ 53.503364] [<c01b0bb8>] blkdev_get+0x108/0x320
[ 53.508743] [<c01713c0>] do_dentry_open+0x218/0x314
[ 53.514496] [<c0180454>] path_openat+0x4c0/0xf9c
[ 53.519959] [<c0182044>] do_filp_open+0x5c/0xc0
[ 53.525336] [<c0172758>] do_sys_open+0xfc/0x1cc
[ 53.530716] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.536375]
-> #0 (&new->lock){+.+...}:
[ 53.540587] [<c063f124>] mutex_lock_nested+0x38/0x3cc
[ 53.546504] [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.552606] [<c043f164>] blktrans_notify_remove+0x7c/0x84
[ 53.558891] [<c04399f0>] del_mtd_device+0x74/0x100
[ 53.564544] [<c043c670>] del_mtd_partitions+0x80/0xc8
[ 53.570451] [<c0439aa0>] mtd_device_unregister+0x24/0x48
[ 53.576637] [<c046ce6c>] spi_drv_remove+0x1c/0x34
[ 53.582207] [<c03de0f0>] __device_release_driver+0x88/0x114
[ 53.588663] [<c03de19c>] device_release_driver+0x20/0x2c
[ 53.594843] [<c03dd9e8>] bus_remove_device+0xd8/0x108
[ 53.600748] [<c03dacc0>] device_del+0x10c/0x210
[ 53.606127] [<c03dadd0>] device_unregister+0xc/0x20
[ 53.611849] [<c046d878>] __unregister+0x10/0x20
[ 53.617211] [<c03da868>] device_for_each_child+0x50/0x7c
[ 53.623387] [<c046eae8>] spi_unregister_master+0x58/0x8c
[ 53.629578] [<c03e12f0>] release_nodes+0x15c/0x1c8
[ 53.635223] [<c03de0f8>] __device_release_driver+0x90/0x114
[ 53.641689] [<c03de900>] driver_detach+0xb4/0xb8
[ 53.647147] [<c03ddc78>] bus_remove_driver+0x4c/0xa0
[ 53.652970] [<c00cab50>] SyS_delete_module+0x11c/0x1e4
[ 53.658976] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.664621]
[ 53.664621] other info that might help us debug this:
[ 53.664621]
[ 53.672979] Possible unsafe locking scenario:
[ 53.672979]
[ 53.679169] CPU0 CPU1
[ 53.683900] ---- ----
[ 53.688633] lock(mtd_table_mutex);
[ 53.692383] lock(&new->lock);
[ 53.698306] lock(mtd_table_mutex);
[ 53.704658] lock(&new->lock);
[ 53.707946]
[ 53.707946] *** DEADLOCK ***
Fixes: 073db4a51ee4 ("mtd: fix: avoid race condition when accessing mtd->usecount")
Reported-by: Felipe Balbi <balbi@ti.com>
Tested-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Cc: <stable@vger.kernel.org>
2015-10-27 01:20:23 +08:00
|
|
|
mutex_lock(&dev->lock);
|
2010-02-23 02:39:30 +08:00
|
|
|
|
2010-10-15 23:20:43 +08:00
|
|
|
if (--dev->open)
|
2010-02-23 02:39:30 +08:00
|
|
|
goto unlock;
|
|
|
|
|
2010-10-15 23:20:43 +08:00
|
|
|
kref_put(&dev->ref, blktrans_dev_release);
|
|
|
|
module_put(dev->tr->owner);
|
|
|
|
|
|
|
|
if (dev->mtd) {
|
2013-05-06 09:31:22 +08:00
|
|
|
if (dev->tr->release)
|
|
|
|
dev->tr->release(dev);
|
2010-10-15 23:20:43 +08:00
|
|
|
__put_mtd_device(dev->mtd);
|
|
|
|
}
|
2010-02-23 02:39:30 +08:00
|
|
|
unlock:
|
|
|
|
mutex_unlock(&dev->lock);
|
mtd: blkdevs: fix potential deadlock + lockdep warnings
Commit 073db4a51ee4 ("mtd: fix: avoid race condition when accessing
mtd->usecount") fixed a race condition but due to poor ordering of the
mutex acquisition, introduced a potential deadlock.
The deadlock can occur, for example, when rmmod'ing the m25p80 module, which
will delete one or more MTDs, along with any corresponding mtdblock
devices. This could potentially race with an acquisition of the block
device as follows.
-> blktrans_open()
-> mutex_lock(&dev->lock);
-> mutex_lock(&mtd_table_mutex);
-> del_mtd_device()
-> mutex_lock(&mtd_table_mutex);
-> blktrans_notify_remove() -> del_mtd_blktrans_dev()
-> mutex_lock(&dev->lock);
This is a classic (potential) ABBA deadlock, which can be fixed by
making the A->B ordering consistent everywhere. There was no real
purpose to the ordering in the original patch, AFAIR, so this shouldn't
be a problem. This ordering was actually already present in
del_mtd_blktrans_dev(), for one, where the function tried to ensure that
its caller already held mtd_table_mutex before it acquired &dev->lock:
if (mutex_trylock(&mtd_table_mutex)) {
mutex_unlock(&mtd_table_mutex);
BUG();
}
So, reverse the ordering of acquisition of &dev->lock and &mtd_table_mutex so
we always acquire mtd_table_mutex first.
Snippets of the lockdep output follow:
# modprobe -r m25p80
[ 53.419251]
[ 53.420838] ======================================================
[ 53.427300] [ INFO: possible circular locking dependency detected ]
[ 53.433865] 4.3.0-rc6 #96 Not tainted
[ 53.437686] -------------------------------------------------------
[ 53.444220] modprobe/372 is trying to acquire lock:
[ 53.449320] (&new->lock){+.+...}, at: [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.457271]
[ 53.457271] but task is already holding lock:
[ 53.463372] (mtd_table_mutex){+.+.+.}, at: [<c0439994>] del_mtd_device+0x18/0x100
[ 53.471321]
[ 53.471321] which lock already depends on the new lock.
[ 53.471321]
[ 53.479856]
[ 53.479856] the existing dependency chain (in reverse order) is:
[ 53.487660]
-> #1 (mtd_table_mutex){+.+.+.}:
[ 53.492331] [<c043fc5c>] blktrans_open+0x34/0x1a4
[ 53.497879] [<c01afce0>] __blkdev_get+0xc4/0x3b0
[ 53.503364] [<c01b0bb8>] blkdev_get+0x108/0x320
[ 53.508743] [<c01713c0>] do_dentry_open+0x218/0x314
[ 53.514496] [<c0180454>] path_openat+0x4c0/0xf9c
[ 53.519959] [<c0182044>] do_filp_open+0x5c/0xc0
[ 53.525336] [<c0172758>] do_sys_open+0xfc/0x1cc
[ 53.530716] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.536375]
-> #0 (&new->lock){+.+...}:
[ 53.540587] [<c063f124>] mutex_lock_nested+0x38/0x3cc
[ 53.546504] [<c043fe4c>] del_mtd_blktrans_dev+0x80/0xdc
[ 53.552606] [<c043f164>] blktrans_notify_remove+0x7c/0x84
[ 53.558891] [<c04399f0>] del_mtd_device+0x74/0x100
[ 53.564544] [<c043c670>] del_mtd_partitions+0x80/0xc8
[ 53.570451] [<c0439aa0>] mtd_device_unregister+0x24/0x48
[ 53.576637] [<c046ce6c>] spi_drv_remove+0x1c/0x34
[ 53.582207] [<c03de0f0>] __device_release_driver+0x88/0x114
[ 53.588663] [<c03de19c>] device_release_driver+0x20/0x2c
[ 53.594843] [<c03dd9e8>] bus_remove_device+0xd8/0x108
[ 53.600748] [<c03dacc0>] device_del+0x10c/0x210
[ 53.606127] [<c03dadd0>] device_unregister+0xc/0x20
[ 53.611849] [<c046d878>] __unregister+0x10/0x20
[ 53.617211] [<c03da868>] device_for_each_child+0x50/0x7c
[ 53.623387] [<c046eae8>] spi_unregister_master+0x58/0x8c
[ 53.629578] [<c03e12f0>] release_nodes+0x15c/0x1c8
[ 53.635223] [<c03de0f8>] __device_release_driver+0x90/0x114
[ 53.641689] [<c03de900>] driver_detach+0xb4/0xb8
[ 53.647147] [<c03ddc78>] bus_remove_driver+0x4c/0xa0
[ 53.652970] [<c00cab50>] SyS_delete_module+0x11c/0x1e4
[ 53.658976] [<c000f740>] ret_fast_syscall+0x0/0x1c
[ 53.664621]
[ 53.664621] other info that might help us debug this:
[ 53.664621]
[ 53.672979] Possible unsafe locking scenario:
[ 53.672979]
[ 53.679169] CPU0 CPU1
[ 53.683900] ---- ----
[ 53.688633] lock(mtd_table_mutex);
[ 53.692383] lock(&new->lock);
[ 53.698306] lock(mtd_table_mutex);
[ 53.704658] lock(&new->lock);
[ 53.707946]
[ 53.707946] *** DEADLOCK ***
Fixes: 073db4a51ee4 ("mtd: fix: avoid race condition when accessing mtd->usecount")
Reported-by: Felipe Balbi <balbi@ti.com>
Tested-by: Felipe Balbi <balbi@ti.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
Cc: <stable@vger.kernel.org>
2015-10-27 01:20:23 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2010-02-23 02:39:30 +08:00
|
|
|
blktrans_dev_put(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2006-01-08 17:02:50 +08:00
|
|
|
static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
|
|
|
|
int ret = -ENXIO;
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->lock);
|
2006-01-08 17:02:50 +08:00
|
|
|
|
2010-02-23 02:39:30 +08:00
|
|
|
if (!dev->mtd)
|
|
|
|
goto unlock;
|
|
|
|
|
2015-05-22 01:44:32 +08:00
|
|
|
ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
|
2010-02-23 02:39:30 +08:00
|
|
|
unlock:
|
|
|
|
mutex_unlock(&dev->lock);
|
|
|
|
blktrans_dev_put(dev);
|
|
|
|
return ret;
|
2006-01-08 17:02:50 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-03-02 23:35:06 +08:00
|
|
|
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int cmd, unsigned long arg)
|
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
|
|
|
|
int ret = -ENXIO;
|
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
mutex_lock(&dev->lock);
|
|
|
|
|
|
|
|
if (!dev->mtd)
|
|
|
|
goto unlock;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case BLKFLSBUF:
|
2010-02-23 02:39:30 +08:00
|
|
|
ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
|
2010-05-31 22:03:38 +08:00
|
|
|
break;
|
2005-04-17 06:20:36 +08:00
|
|
|
default:
|
2010-02-23 02:39:30 +08:00
|
|
|
ret = -ENOTTY;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2010-02-23 02:39:30 +08:00
|
|
|
unlock:
|
|
|
|
mutex_unlock(&dev->lock);
|
|
|
|
blktrans_dev_put(dev);
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
2012-11-09 23:36:35 +08:00
|
|
|
static const struct block_device_operations mtd_block_ops = {
|
2005-04-17 06:20:36 +08:00
|
|
|
.owner = THIS_MODULE,
|
2008-03-02 23:35:06 +08:00
|
|
|
.open = blktrans_open,
|
|
|
|
.release = blktrans_release,
|
2010-07-08 16:18:46 +08:00
|
|
|
.ioctl = blktrans_ioctl,
|
2006-01-08 17:02:50 +08:00
|
|
|
.getgeo = blktrans_getgeo,
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
static const struct blk_mq_ops mtd_mq_ops = {
|
|
|
|
.queue_rq = mtd_queue_rq,
|
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
|
|
|
|
{
|
|
|
|
struct mtd_blktrans_ops *tr = new->tr;
|
2008-05-20 03:11:50 +08:00
|
|
|
struct mtd_blktrans_dev *d;
|
2005-04-17 06:20:36 +08:00
|
|
|
int last_devnum = -1;
|
|
|
|
struct gendisk *gd;
|
2010-02-23 02:39:29 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-05-08 15:30:46 +08:00
|
|
|
if (mutex_trylock(&mtd_table_mutex)) {
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2010-02-23 02:39:30 +08:00
|
|
|
mutex_lock(&blktrans_ref_mutex);
|
2008-05-20 03:11:50 +08:00
|
|
|
list_for_each_entry(d, &tr->devs, list) {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (new->devnum == -1) {
|
|
|
|
/* Use first free number */
|
|
|
|
if (d->devnum != last_devnum+1) {
|
|
|
|
/* Found a free devnum. Plug it in here */
|
|
|
|
new->devnum = last_devnum+1;
|
|
|
|
list_add_tail(&new->list, &d->list);
|
|
|
|
goto added;
|
|
|
|
}
|
|
|
|
} else if (d->devnum == new->devnum) {
|
|
|
|
/* Required number taken */
|
2010-02-23 02:39:30 +08:00
|
|
|
mutex_unlock(&blktrans_ref_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
return -EBUSY;
|
|
|
|
} else if (d->devnum > new->devnum) {
|
|
|
|
/* Required number was free */
|
|
|
|
list_add_tail(&new->list, &d->list);
|
|
|
|
goto added;
|
2005-11-07 19:15:26 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
last_devnum = d->devnum;
|
|
|
|
}
|
2010-02-23 02:39:29 +08:00
|
|
|
|
|
|
|
ret = -EBUSY;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (new->devnum == -1)
|
|
|
|
new->devnum = last_devnum+1;
|
|
|
|
|
2010-01-30 04:59:53 +08:00
|
|
|
/* Check that the device and any partitions will get valid
|
|
|
|
* minor numbers and that the disk naming code below can cope
|
|
|
|
* with this number. */
|
|
|
|
if (new->devnum > (MINORMASK >> tr->part_bits) ||
|
2010-02-23 02:39:30 +08:00
|
|
|
(tr->part_bits && new->devnum >= 27 * 26)) {
|
|
|
|
mutex_unlock(&blktrans_ref_mutex);
|
2010-02-23 02:39:29 +08:00
|
|
|
goto error1;
|
2010-02-23 02:39:30 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
list_add_tail(&new->list, &tr->devs);
|
|
|
|
added:
|
2010-02-23 02:39:30 +08:00
|
|
|
mutex_unlock(&blktrans_ref_mutex);
|
|
|
|
|
2007-12-03 20:46:12 +08:00
|
|
|
mutex_init(&new->lock);
|
2010-02-23 02:39:30 +08:00
|
|
|
kref_init(&new->ref);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!tr->writesect)
|
|
|
|
new->readonly = 1;
|
|
|
|
|
2010-02-23 02:39:29 +08:00
|
|
|
/* Create gendisk */
|
|
|
|
ret = -ENOMEM;
|
2005-04-17 06:20:36 +08:00
|
|
|
gd = alloc_disk(1 << tr->part_bits);
|
2010-02-23 02:39:29 +08:00
|
|
|
|
|
|
|
if (!gd)
|
|
|
|
goto error2;
|
|
|
|
|
|
|
|
new->disk = gd;
|
|
|
|
gd->private_data = new;
|
2005-04-17 06:20:36 +08:00
|
|
|
gd->major = tr->major;
|
|
|
|
gd->first_minor = (new->devnum) << tr->part_bits;
|
2012-11-09 23:36:35 +08:00
|
|
|
gd->fops = &mtd_block_ops;
|
2005-11-07 19:15:26 +08:00
|
|
|
|
2005-07-30 03:42:07 +08:00
|
|
|
if (tr->part_bits)
|
|
|
|
if (new->devnum < 26)
|
|
|
|
snprintf(gd->disk_name, sizeof(gd->disk_name),
|
|
|
|
"%s%c", tr->name, 'a' + new->devnum);
|
|
|
|
else
|
|
|
|
snprintf(gd->disk_name, sizeof(gd->disk_name),
|
|
|
|
"%s%c%c", tr->name,
|
|
|
|
'a' - 1 + new->devnum / 26,
|
|
|
|
'a' + new->devnum % 26);
|
|
|
|
else
|
|
|
|
snprintf(gd->disk_name, sizeof(gd->disk_name),
|
|
|
|
"%s%d", tr->name, new->devnum);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2015-09-11 21:41:47 +08:00
|
|
|
set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-23 02:39:29 +08:00
|
|
|
/* Create the request queue */
|
|
|
|
spin_lock_init(&new->queue_lock);
|
2018-10-16 22:09:58 +08:00
|
|
|
INIT_LIST_HEAD(&new->rq_list);
|
2010-02-23 02:39:29 +08:00
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
|
|
|
|
if (!new->tag_set)
|
2010-02-23 02:39:29 +08:00
|
|
|
goto error3;
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
|
|
|
|
BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
|
|
|
|
if (IS_ERR(new->rq)) {
|
|
|
|
ret = PTR_ERR(new->rq);
|
|
|
|
new->rq = NULL;
|
|
|
|
goto error4;
|
|
|
|
}
|
|
|
|
|
2014-03-08 20:59:14 +08:00
|
|
|
if (tr->flush)
|
2016-03-31 00:17:47 +08:00
|
|
|
blk_queue_write_cache(new->rq, true, false);
|
2014-03-08 20:59:14 +08:00
|
|
|
|
2010-02-23 02:39:29 +08:00
|
|
|
new->rq->queuedata = new;
|
|
|
|
blk_queue_logical_block_size(new->rq, tr->blksize);
|
|
|
|
|
2018-03-08 09:10:10 +08:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
|
|
|
|
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
|
2011-09-28 13:21:42 +08:00
|
|
|
|
2011-02-14 22:16:10 +08:00
|
|
|
if (tr->discard) {
|
2018-03-08 09:10:10 +08:00
|
|
|
blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
|
2015-07-14 22:15:12 +08:00
|
|
|
blk_queue_max_discard_sectors(new->rq, UINT_MAX);
|
2011-02-14 22:16:10 +08:00
|
|
|
}
|
2010-02-23 02:39:29 +08:00
|
|
|
|
|
|
|
gd->queue = new->rq;
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (new->readonly)
|
|
|
|
set_disk_ro(gd, 1);
|
|
|
|
|
2018-09-28 14:17:19 +08:00
|
|
|
device_add_disk(&new->mtd->dev, gd, NULL);
|
2010-02-23 02:39:33 +08:00
|
|
|
|
2010-02-27 04:08:40 +08:00
|
|
|
if (new->disk_attributes) {
|
|
|
|
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
|
2010-02-23 02:39:33 +08:00
|
|
|
new->disk_attributes);
|
2010-02-27 04:08:40 +08:00
|
|
|
WARN_ON(ret);
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
2010-02-23 02:39:29 +08:00
|
|
|
error4:
|
2018-10-16 22:09:58 +08:00
|
|
|
kfree(new->tag_set);
|
2010-02-23 02:39:29 +08:00
|
|
|
error3:
|
|
|
|
put_disk(new->disk);
|
|
|
|
error2:
|
|
|
|
list_del(&new->list);
|
|
|
|
error1:
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
|
|
|
|
{
|
2010-02-23 02:39:30 +08:00
|
|
|
unsigned long flags;
|
|
|
|
|
2007-05-08 15:30:46 +08:00
|
|
|
if (mutex_trylock(&mtd_table_mutex)) {
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
2010-02-23 02:39:33 +08:00
|
|
|
if (old->disk_attributes)
|
|
|
|
sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
|
|
|
|
old->disk_attributes);
|
|
|
|
|
2010-07-28 23:53:16 +08:00
|
|
|
/* Stop new requests to arrive */
|
|
|
|
del_gendisk(old->disk);
|
|
|
|
|
2010-02-23 02:39:30 +08:00
|
|
|
/* Kill current requests */
|
|
|
|
spin_lock_irqsave(&old->queue_lock, flags);
|
|
|
|
old->rq->queuedata = NULL;
|
|
|
|
spin_unlock_irqrestore(&old->queue_lock, flags);
|
|
|
|
|
2018-10-16 22:09:58 +08:00
|
|
|
/* freeze+quiesce queue to ensure all requests are flushed */
|
|
|
|
blk_mq_freeze_queue(old->rq);
|
|
|
|
blk_mq_quiesce_queue(old->rq);
|
|
|
|
blk_mq_unquiesce_queue(old->rq);
|
|
|
|
blk_mq_unfreeze_queue(old->rq);
|
|
|
|
|
2010-10-15 23:20:43 +08:00
|
|
|
/* If the device is currently open, tell trans driver to close it,
|
|
|
|
then put mtd device, and don't touch it again */
|
2010-02-23 02:39:30 +08:00
|
|
|
mutex_lock(&old->lock);
|
2010-10-15 23:20:43 +08:00
|
|
|
if (old->open) {
|
|
|
|
if (old->tr->release)
|
|
|
|
old->tr->release(old);
|
|
|
|
__put_mtd_device(old->mtd);
|
2010-02-23 02:39:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
old->mtd = NULL;
|
|
|
|
|
|
|
|
mutex_unlock(&old->lock);
|
|
|
|
blktrans_dev_put(old);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void blktrans_notify_remove(struct mtd_info *mtd)
|
|
|
|
{
|
2008-05-20 03:11:50 +08:00
|
|
|
struct mtd_blktrans_ops *tr;
|
|
|
|
struct mtd_blktrans_dev *dev, *next;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-05-20 03:11:50 +08:00
|
|
|
list_for_each_entry(tr, &blktrans_majors, list)
|
|
|
|
list_for_each_entry_safe(dev, next, &tr->devs, list)
|
2005-04-17 06:20:36 +08:00
|
|
|
if (dev->mtd == mtd)
|
|
|
|
tr->remove_dev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void blktrans_notify_add(struct mtd_info *mtd)
|
|
|
|
{
|
2008-05-20 03:11:50 +08:00
|
|
|
struct mtd_blktrans_ops *tr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (mtd->type == MTD_ABSENT)
|
|
|
|
return;
|
|
|
|
|
2008-05-20 03:11:50 +08:00
|
|
|
list_for_each_entry(tr, &blktrans_majors, list)
|
2005-04-17 06:20:36 +08:00
|
|
|
tr->add_mtd(tr, mtd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mtd_notifier blktrans_notifier = {
|
|
|
|
.add = blktrans_notify_add,
|
|
|
|
.remove = blktrans_notify_remove,
|
|
|
|
};
|
2005-11-07 19:15:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
|
|
|
{
|
2010-01-30 04:57:11 +08:00
|
|
|
struct mtd_info *mtd;
|
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2005-11-07 19:15:26 +08:00
|
|
|
/* Register the notifier if/when the first device type is
|
2005-04-17 06:20:36 +08:00
|
|
|
registered, to prevent the link/init ordering from fucking
|
|
|
|
us over. */
|
|
|
|
if (!blktrans_notifier.list.next)
|
|
|
|
register_mtd_user(&blktrans_notifier);
|
|
|
|
|
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_lock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
ret = register_blkdev(tr->major, tr->name);
|
2010-10-26 11:02:19 +08:00
|
|
|
if (ret < 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
|
|
|
|
tr->name, tr->major, ret);
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
2008-08-06 01:08:25 +08:00
|
|
|
|
2010-10-26 11:02:19 +08:00
|
|
|
if (ret)
|
|
|
|
tr->major = ret;
|
|
|
|
|
2006-10-27 16:09:33 +08:00
|
|
|
tr->blkshift = ffs(tr->blksize) - 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&tr->devs);
|
|
|
|
list_add(&tr->list, &blktrans_majors);
|
|
|
|
|
2010-01-30 04:57:11 +08:00
|
|
|
mtd_for_each_device(mtd)
|
|
|
|
if (mtd->type != MTD_ABSENT)
|
|
|
|
tr->add_mtd(tr, mtd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
|
|
|
|
{
|
2008-05-20 03:11:50 +08:00
|
|
|
struct mtd_blktrans_dev *dev, *next;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_lock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Remove it from the list of active majors */
|
|
|
|
list_del(&tr->list);
|
|
|
|
|
2008-05-20 03:11:50 +08:00
|
|
|
list_for_each_entry_safe(dev, next, &tr->devs, list)
|
2005-04-17 06:20:36 +08:00
|
|
|
tr->remove_dev(dev);
|
|
|
|
|
|
|
|
unregister_blkdev(tr->major, tr->name);
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtd_table_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-27 00:15:12 +08:00
|
|
|
BUG_ON(!list_empty(&tr->devs));
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit mtd_blktrans_exit(void)
|
|
|
|
{
|
|
|
|
/* No race here -- if someone's currently in register_mtd_blktrans
|
|
|
|
we're screwed anyway. */
|
|
|
|
if (blktrans_notifier.list.next)
|
|
|
|
unregister_mtd_user(&blktrans_notifier);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_exit(mtd_blktrans_exit);
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(register_mtd_blktrans);
|
|
|
|
EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
|
|
|
|
EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
|
|
|
|
EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
|