2019-05-23 17:14:39 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-11-07 19:15:26 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Direct MTD block device access
|
|
|
|
*
|
2010-08-09 03:58:20 +08:00
|
|
|
* Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
|
|
|
|
* Copyright © 2000-2003 Nicolas Pitre <nico@fluxnic.net>
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/init.h>
|
2005-11-07 07:14:42 +08:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/sched.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/slab.h>
|
2005-11-07 07:14:42 +08:00
|
|
|
#include <linux/types.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/vmalloc.h>
|
2005-11-07 07:14:42 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/mtd/blktrans.h>
|
2006-03-31 18:29:41 +08:00
|
|
|
#include <linux/mutex.h>
|
2013-10-14 05:05:23 +08:00
|
|
|
#include <linux/major.h>
|
2006-03-31 18:29:41 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev {
|
|
|
|
struct mtd_blktrans_dev mbd;
|
2005-04-17 06:20:36 +08:00
|
|
|
int count;
|
2006-03-31 18:29:41 +08:00
|
|
|
struct mutex cache_mutex;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned char *cache_data;
|
|
|
|
unsigned long cache_offset;
|
|
|
|
unsigned int cache_size;
|
|
|
|
enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state;
|
2010-01-30 04:58:37 +08:00
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cache stuff...
|
2005-11-07 19:15:26 +08:00
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Since typical flash erasable sectors are much larger than what Linux's
|
|
|
|
* buffer cache can handle, we must implement read-modify-write on flash
|
|
|
|
* sectors for each block write requests. To avoid over-erasing flash sectors
|
|
|
|
* and to speed things up, we locally cache a whole flash sector while it is
|
|
|
|
* being written to until a different sector is required.
|
|
|
|
*/
|
|
|
|
|
2005-11-07 19:15:26 +08:00
|
|
|
static int erase_write (struct mtd_info *mtd, unsigned long pos,
|
2018-11-29 12:19:51 +08:00
|
|
|
unsigned int len, const char *buf)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct erase_info erase;
|
|
|
|
size_t retlen;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, let's erase the flash block.
|
|
|
|
*/
|
|
|
|
erase.addr = pos;
|
|
|
|
erase.len = len;
|
|
|
|
|
2011-12-23 21:25:39 +08:00
|
|
|
ret = mtd_erase(mtd, &erase);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret) {
|
|
|
|
printk (KERN_WARNING "mtdblock: erase of region [0x%lx, 0x%x] "
|
|
|
|
"on \"%s\" failed\n",
|
|
|
|
pos, len, mtd->name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2009-07-06 18:02:08 +08:00
|
|
|
* Next, write the data to flash.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
2011-12-23 23:35:41 +08:00
|
|
|
ret = mtd_write(mtd, pos, len, &retlen, buf);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (retlen != len)
|
|
|
|
return -EIO;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int write_cached_data (struct mtdblk_dev *mtdblk)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtd_info *mtd = mtdblk->mbd.mtd;
|
2005-04-17 06:20:36 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (mtdblk->cache_state != STATE_DIRTY)
|
|
|
|
return 0;
|
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("mtdblock: writing cached data for \"%s\" "
|
2005-11-07 19:15:26 +08:00
|
|
|
"at 0x%lx, size 0x%x\n", mtd->name,
|
2005-04-17 06:20:36 +08:00
|
|
|
mtdblk->cache_offset, mtdblk->cache_size);
|
2005-11-07 19:15:26 +08:00
|
|
|
|
|
|
|
ret = erase_write (mtd, mtdblk->cache_offset,
|
2005-04-17 06:20:36 +08:00
|
|
|
mtdblk->cache_size, mtdblk->cache_data);
|
|
|
|
|
|
|
|
/*
|
2011-03-31 09:57:33 +08:00
|
|
|
* Here we could arguably set the cache state to STATE_CLEAN.
|
2005-11-07 19:15:26 +08:00
|
|
|
* However this could lead to inconsistency since we will not
|
|
|
|
* be notified if this content is altered on the flash by other
|
2005-04-17 06:20:36 +08:00
|
|
|
* means. Let's declare it empty and leave buffering tasks to
|
|
|
|
* the buffer cache instead.
|
2020-03-31 09:31:59 +08:00
|
|
|
*
|
|
|
|
* If this cache_offset points to a bad block, data cannot be
|
|
|
|
* written to the device. Clear cache_state to avoid writing to
|
|
|
|
* bad blocks repeatedly.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2020-03-31 09:31:59 +08:00
|
|
|
if (ret == 0 || ret == -EIO)
|
|
|
|
mtdblk->cache_state = STATE_EMPTY;
|
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-07 19:15:26 +08:00
|
|
|
static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
|
2005-04-17 06:20:36 +08:00
|
|
|
int len, const char *buf)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtd_info *mtd = mtdblk->mbd.mtd;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int sect_size = mtdblk->cache_size;
|
|
|
|
size_t retlen;
|
|
|
|
int ret;
|
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
mtd->name, pos, len);
|
2005-11-07 19:15:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sect_size)
|
2011-12-23 23:35:41 +08:00
|
|
|
return mtd_write(mtd, pos, len, &retlen, buf);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
unsigned long sect_start = (pos/sect_size)*sect_size;
|
|
|
|
unsigned int offset = pos - sect_start;
|
|
|
|
unsigned int size = sect_size - offset;
|
2005-11-07 19:15:26 +08:00
|
|
|
if( size > len )
|
2005-04-17 06:20:36 +08:00
|
|
|
size = len;
|
|
|
|
|
|
|
|
if (size == sect_size) {
|
2005-11-07 19:15:26 +08:00
|
|
|
/*
|
2005-04-17 06:20:36 +08:00
|
|
|
* We are covering a whole sector. Thus there is no
|
|
|
|
* need to bother with the cache while it may still be
|
|
|
|
* useful for other partial writes.
|
|
|
|
*/
|
|
|
|
ret = erase_write (mtd, pos, size, buf);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} else {
|
|
|
|
/* Partial sector: need to use the cache */
|
|
|
|
|
|
|
|
if (mtdblk->cache_state == STATE_DIRTY &&
|
|
|
|
mtdblk->cache_offset != sect_start) {
|
|
|
|
ret = write_cached_data(mtdblk);
|
2005-11-07 19:15:26 +08:00
|
|
|
if (ret)
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mtdblk->cache_state == STATE_EMPTY ||
|
|
|
|
mtdblk->cache_offset != sect_start) {
|
|
|
|
/* fill the cache with the current sector */
|
|
|
|
mtdblk->cache_state = STATE_EMPTY;
|
2011-12-23 23:30:16 +08:00
|
|
|
ret = mtd_read(mtd, sect_start, sect_size,
|
|
|
|
&retlen, mtdblk->cache_data);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (retlen != sect_size)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
mtdblk->cache_offset = sect_start;
|
|
|
|
mtdblk->cache_size = sect_size;
|
|
|
|
mtdblk->cache_state = STATE_CLEAN;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write data to our local cache */
|
|
|
|
memcpy (mtdblk->cache_data + offset, buf, size);
|
|
|
|
mtdblk->cache_state = STATE_DIRTY;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf += size;
|
|
|
|
pos += size;
|
|
|
|
len -= size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-11-07 19:15:26 +08:00
|
|
|
static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
|
2005-04-17 06:20:36 +08:00
|
|
|
int len, char *buf)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtd_info *mtd = mtdblk->mbd.mtd;
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned int sect_size = mtdblk->cache_size;
|
|
|
|
size_t retlen;
|
|
|
|
int ret;
|
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
mtd->name, pos, len);
|
2005-11-07 19:15:26 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!sect_size)
|
2011-12-23 23:30:16 +08:00
|
|
|
return mtd_read(mtd, pos, len, &retlen, buf);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
while (len > 0) {
|
|
|
|
unsigned long sect_start = (pos/sect_size)*sect_size;
|
|
|
|
unsigned int offset = pos - sect_start;
|
|
|
|
unsigned int size = sect_size - offset;
|
2005-11-07 19:15:26 +08:00
|
|
|
if (size > len)
|
2005-04-17 06:20:36 +08:00
|
|
|
size = len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the requested data is already cached
|
|
|
|
* Read the requested amount of data from our internal cache if it
|
|
|
|
* contains what we want, otherwise we read the data directly
|
|
|
|
* from flash.
|
|
|
|
*/
|
|
|
|
if (mtdblk->cache_state != STATE_EMPTY &&
|
|
|
|
mtdblk->cache_offset == sect_start) {
|
|
|
|
memcpy (buf, mtdblk->cache_data + offset, size);
|
|
|
|
} else {
|
2011-12-23 23:30:16 +08:00
|
|
|
ret = mtd_read(mtd, pos, size, &retlen, buf);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (retlen != size)
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf += size;
|
|
|
|
pos += size;
|
|
|
|
len -= size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
|
|
|
|
unsigned long block, char *buf)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
|
2005-04-17 06:20:36 +08:00
|
|
|
return do_cached_read(mtdblk, block<<9, 512, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
|
|
|
|
unsigned long block, char *buf)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
|
2010-01-30 04:58:37 +08:00
|
|
|
mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize);
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!mtdblk->cache_data)
|
|
|
|
return -EINTR;
|
|
|
|
/* -EINTR is not really correct, but it is the best match
|
|
|
|
* documented in man 2 write for all cases. We could also
|
|
|
|
* return -EAGAIN sometimes, but why bother?
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
return do_cached_write(mtdblk, block<<9, 512, buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mtdblock_open(struct mtd_blktrans_dev *mbd)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("mtdblock_open\n");
|
2005-11-07 19:15:26 +08:00
|
|
|
|
2010-01-30 04:58:37 +08:00
|
|
|
if (mtdblk->count) {
|
|
|
|
mtdblk->count++;
|
2005-04-17 06:20:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
2005-11-07 19:15:26 +08:00
|
|
|
|
mtdblock: warn if opened on NAND
Warning on every translated mtd partition results in excessive log noise
if this driver is loaded:
nand: device found, Manufacturer ID: 0xc2, Chip ID: 0xf1
nand: Macronix MX30LF1G18AC
nand: 128 MiB, SLC, erase size: 128 KiB, page size: 2048, OOB size: 64
mt7621-nand 1e003000.nand: ECC strength adjusted to 4 bits
read_bbt: found bbt at block 1023
10 fixed-partitions partitions found on MTD device mt7621-nand
Creating 10 MTD partitions on "mt7621-nand":
0x000000000000-0x000000080000 : "Bootloader"
mtdblock: MTD device 'Bootloader' is NAND, please consider using UBI block devices instead.
0x000000080000-0x000000100000 : "Config"
mtdblock: MTD device 'Config' is NAND, please consider using UBI block devices instead.
0x000000100000-0x000000140000 : "Factory"
mtdblock: MTD device 'Factory' is NAND, please consider using UBI block devices instead.
0x000000140000-0x000002000000 : "Kernel"
mtdblock: MTD device 'Kernel' is NAND, please consider using UBI block devices instead.
0x000000540000-0x000002000000 : "ubi"
mtdblock: MTD device 'ubi' is NAND, please consider using UBI block devices instead.
0x000002140000-0x000004000000 : "Kernel2"
mtdblock: MTD device 'Kernel2' is NAND, please consider using UBI block devices instead.
0x000004000000-0x000004100000 : "wwan"
mtdblock: MTD device 'wwan' is NAND, please consider using UBI block devices instead.
0x000004100000-0x000005100000 : "data"
mtdblock: MTD device 'data' is NAND, please consider using UBI block devices instead.
0x000005100000-0x000005200000 : "rom-d"
mtdblock: MTD device 'rom-d' is NAND, please consider using UBI block devices instead.
0x000005200000-0x000005280000 : "reserve"
mtdblock: MTD device 'reserve' is NAND, please consider using UBI block devices instead.
mtk_soc_eth 1e100000.ethernet eth0: mediatek frame engine at 0xbe100000, irq 21
This is more likely to annoy than to help users of embedded distros where
this driver is enabled by default. Making the blockdevs available does
not imply that they are in use, and warning about bootloader partitions
or other devices which obviously never will be mounted is more confusing
than helpful.
Move the warning to open(), where it will be of more use - actually warning
anyone who mounts a file system on NAND using mtdblock.
Fixes: e07403a8c6be ("mtdblock: Warn if added for a NAND device")
Signed-off-by: Bjørn Mork <bjorn@mork.no>
Reviewed-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar>
Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
Link: https://lore.kernel.org/linux-mtd/20220328161108.87757-1-bjorn@mork.no
2022-03-29 00:11:08 +08:00
|
|
|
if (mtd_type_is_nand(mbd->mtd))
|
|
|
|
pr_warn("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
|
|
|
|
mbd->tr->name, mbd->mtd->name);
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* OK, it's not open. Create cache info for it */
|
|
|
|
mtdblk->count = 1;
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_init(&mtdblk->cache_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
mtdblk->cache_state = STATE_EMPTY;
|
2010-01-30 04:58:37 +08:00
|
|
|
if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
|
|
|
|
mtdblk->cache_size = mbd->mtd->erasesize;
|
2005-04-17 06:20:36 +08:00
|
|
|
mtdblk->cache_data = NULL;
|
|
|
|
}
|
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("ok\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-06 09:31:22 +08:00
|
|
|
static void mtdblock_release(struct mtd_blktrans_dev *mbd)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("mtdblock_release\n");
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_lock(&mtdblk->cache_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
write_cached_data(mtdblk);
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtdblk->cache_mutex);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!--mtdblk->count) {
|
2012-01-10 20:26:58 +08:00
|
|
|
/*
|
|
|
|
* It was the last usage. Free the cache, but only sync if
|
|
|
|
* opened for writing.
|
|
|
|
*/
|
|
|
|
if (mbd->file_mode & FMODE_WRITE)
|
|
|
|
mtd_sync(mbd->mtd);
|
2005-04-17 06:20:36 +08:00
|
|
|
vfree(mtdblk->cache_data);
|
|
|
|
}
|
2009-07-15 04:04:29 +08:00
|
|
|
|
2011-07-20 01:06:09 +08:00
|
|
|
pr_debug("ok\n");
|
2005-11-07 19:15:26 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
static int mtdblock_flush(struct mtd_blktrans_dev *dev)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd);
|
2020-03-20 11:15:11 +08:00
|
|
|
int ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_lock(&mtdblk->cache_mutex);
|
2020-03-20 11:15:11 +08:00
|
|
|
ret = write_cached_data(mtdblk);
|
2006-03-31 18:29:41 +08:00
|
|
|
mutex_unlock(&mtdblk->cache_mutex);
|
2011-12-30 22:35:35 +08:00
|
|
|
mtd_sync(dev->mtd);
|
2020-03-20 11:15:11 +08:00
|
|
|
return ret;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
|
|
|
|
{
|
2010-01-30 04:58:37 +08:00
|
|
|
struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!dev)
|
|
|
|
return;
|
|
|
|
|
2010-01-30 04:58:37 +08:00
|
|
|
dev->mbd.mtd = mtd;
|
|
|
|
dev->mbd.devnum = mtd->index;
|
2006-10-27 16:09:33 +08:00
|
|
|
|
2010-01-30 04:58:37 +08:00
|
|
|
dev->mbd.size = mtd->size >> 9;
|
|
|
|
dev->mbd.tr = tr;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (!(mtd->flags & MTD_WRITEABLE))
|
2010-01-30 04:58:37 +08:00
|
|
|
dev->mbd.readonly = 1;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-02-23 02:39:31 +08:00
|
|
|
if (add_mtd_blktrans_dev(&dev->mbd))
|
|
|
|
kfree(dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev)
|
|
|
|
{
|
|
|
|
del_mtd_blktrans_dev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mtd_blktrans_ops mtdblock_tr = {
|
|
|
|
.name = "mtdblock",
|
2013-10-09 07:59:08 +08:00
|
|
|
.major = MTD_BLOCK_MAJOR,
|
2005-04-17 06:20:36 +08:00
|
|
|
.part_bits = 0,
|
2006-10-27 16:09:33 +08:00
|
|
|
.blksize = 512,
|
2005-04-17 06:20:36 +08:00
|
|
|
.open = mtdblock_open,
|
|
|
|
.flush = mtdblock_flush,
|
|
|
|
.release = mtdblock_release,
|
|
|
|
.readsect = mtdblock_readsect,
|
|
|
|
.writesect = mtdblock_writesect,
|
|
|
|
.add_mtd = mtdblock_add_mtd,
|
|
|
|
.remove_dev = mtdblock_remove_dev,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2021-02-14 00:45:56 +08:00
|
|
|
module_mtd_blktrans(mtdblock_tr);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
2009-09-14 15:25:28 +08:00
|
|
|
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net> et al.");
|
2005-04-17 06:20:36 +08:00
|
|
|
MODULE_DESCRIPTION("Caching read/erase/writeback block device emulation access to MTD devices");
|