2014-02-24 18:37:37 +08:00
|
|
|
/*
|
2014-04-10 16:27:28 +08:00
|
|
|
* Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
|
|
|
|
* influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005, Intec Automation Inc.
|
|
|
|
* Copyright (C) 2014, Freescale Semiconductor, Inc.
|
2014-02-24 18:37:37 +08:00
|
|
|
*
|
|
|
|
* This code is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/math64.h>
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
#include <linux/sizes.h>
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
#include <linux/mtd/mtd.h>
|
|
|
|
#include <linux/of_platform.h>
|
|
|
|
#include <linux/spi/flash.h>
|
|
|
|
#include <linux/mtd/spi-nor.h>
|
|
|
|
|
|
|
|
/* Define max times to check status register before we give up. */
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* For everything but full-chip erase; probably could be much smaller, but kept
|
|
|
|
* around for safety for now
|
|
|
|
*/
|
|
|
|
#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
|
|
|
|
* for larger flash
|
|
|
|
*/
|
|
|
|
#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ)
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-11-06 11:24:33 +08:00
|
|
|
#define SPI_NOR_MAX_ID_LEN 6
|
2015-11-11 04:15:27 +08:00
|
|
|
#define SPI_NOR_MAX_ADDR_WIDTH 4
|
2014-11-06 11:24:33 +08:00
|
|
|
|
|
|
|
struct flash_info {
|
2015-08-11 03:39:03 +08:00
|
|
|
char *name;
|
|
|
|
|
2014-11-06 11:24:33 +08:00
|
|
|
/*
|
|
|
|
* This array stores the ID bytes.
|
|
|
|
* The first three bytes are the JEDIC ID.
|
|
|
|
* JEDEC ID zero means "no ID" (mostly older chips).
|
|
|
|
*/
|
|
|
|
u8 id[SPI_NOR_MAX_ID_LEN];
|
|
|
|
u8 id_len;
|
|
|
|
|
|
|
|
/* The size listed here is what works with SPINOR_OP_SE, which isn't
|
|
|
|
* necessarily called a "sector" by the vendor.
|
|
|
|
*/
|
|
|
|
unsigned sector_size;
|
|
|
|
u16 n_sectors;
|
|
|
|
|
|
|
|
u16 page_size;
|
|
|
|
u16 addr_width;
|
|
|
|
|
|
|
|
u16 flags;
|
2016-01-30 03:25:34 +08:00
|
|
|
#define SECT_4K BIT(0) /* SPINOR_OP_BE_4K works uniformly */
|
|
|
|
#define SPI_NOR_NO_ERASE BIT(1) /* No erase command needed */
|
|
|
|
#define SST_WRITE BIT(2) /* use SST byte programming */
|
|
|
|
#define SPI_NOR_NO_FR BIT(3) /* Can't do fastread */
|
|
|
|
#define SECT_4K_PMC BIT(4) /* SPINOR_OP_BE_4K_PMC works uniformly */
|
|
|
|
#define SPI_NOR_DUAL_READ BIT(5) /* Flash supports Dual Read */
|
|
|
|
#define SPI_NOR_QUAD_READ BIT(6) /* Flash supports Quad Read */
|
|
|
|
#define USE_FSR BIT(7) /* use flag status register */
|
2016-01-30 03:25:35 +08:00
|
|
|
#define SPI_NOR_HAS_LOCK BIT(8) /* Flash supports lock/unlock via SR */
|
2014-11-06 11:24:33 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
#define JEDEC_MFR(info) ((info)->id[0])
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
static const struct flash_info *spi_nor_match_id(const char *name);
|
2014-09-29 17:47:54 +08:00
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* Read the status register, returning its value in the location
|
|
|
|
* Return the status register value.
|
|
|
|
* Returns negative if error occurred.
|
|
|
|
*/
|
|
|
|
static int read_sr(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 val;
|
|
|
|
|
2014-04-09 09:15:31 +08:00
|
|
|
ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("error %d reading SR\n", (int) ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2014-04-29 23:29:51 +08:00
|
|
|
/*
|
|
|
|
* Read the flag status register, returning its value in the location
|
|
|
|
* Return the status register value.
|
|
|
|
* Returns negative if error occurred.
|
|
|
|
*/
|
|
|
|
static int read_fsr(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 val;
|
|
|
|
|
|
|
|
ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
pr_err("error %d reading FSR\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* Read configuration register, returning its value in the
|
|
|
|
* location. Return the configuration register value.
|
|
|
|
* Returns negative if error occured.
|
|
|
|
*/
|
|
|
|
static int read_cr(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u8 val;
|
|
|
|
|
2014-04-09 09:15:31 +08:00
|
|
|
ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(nor->dev, "error %d reading CR\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Dummy Cycle calculation for different type of read.
|
|
|
|
* It can be used to support more commands with
|
|
|
|
* different dummy cycle requirements.
|
|
|
|
*/
|
|
|
|
static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
switch (nor->flash_read) {
|
|
|
|
case SPI_NOR_FAST:
|
|
|
|
case SPI_NOR_DUAL:
|
|
|
|
case SPI_NOR_QUAD:
|
2014-04-28 11:53:38 +08:00
|
|
|
return 8;
|
2014-02-24 18:37:37 +08:00
|
|
|
case SPI_NOR_NORMAL:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write status register 1 byte
|
|
|
|
* Returns negative if error occurred.
|
|
|
|
*/
|
|
|
|
static inline int write_sr(struct spi_nor *nor, u8 val)
|
|
|
|
{
|
|
|
|
nor->cmd_buf[0] = val;
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set write enable latch with Write Enable command.
|
|
|
|
* Returns negative if error occurred.
|
|
|
|
*/
|
|
|
|
static inline int write_enable(struct spi_nor *nor)
|
|
|
|
{
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send write disble instruction to the chip.
|
|
|
|
*/
|
|
|
|
static inline int write_disable(struct spi_nor *nor)
|
|
|
|
{
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
|
|
|
|
{
|
|
|
|
return mtd->priv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable/disable 4-byte addressing mode. */
|
2015-08-11 03:39:03 +08:00
|
|
|
static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
|
2014-11-06 11:24:33 +08:00
|
|
|
int enable)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
bool need_wren = false;
|
|
|
|
u8 cmd;
|
|
|
|
|
2014-11-06 11:24:33 +08:00
|
|
|
switch (JEDEC_MFR(info)) {
|
2015-09-02 03:57:09 +08:00
|
|
|
case SNOR_MFR_MICRON:
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Some Micron need WREN command; all will accept it */
|
|
|
|
need_wren = true;
|
2015-09-02 03:57:09 +08:00
|
|
|
case SNOR_MFR_MACRONIX:
|
|
|
|
case SNOR_MFR_WINBOND:
|
2014-02-24 18:37:37 +08:00
|
|
|
if (need_wren)
|
|
|
|
write_enable(nor);
|
|
|
|
|
2014-04-09 09:15:31 +08:00
|
|
|
cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
|
2015-08-19 17:56:44 +08:00
|
|
|
status = nor->write_reg(nor, cmd, NULL, 0);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (need_wren)
|
|
|
|
write_disable(nor);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
default:
|
|
|
|
/* Spansion style */
|
|
|
|
nor->cmd_buf[0] = enable << 7;
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
}
|
2014-09-10 15:26:16 +08:00
|
|
|
static inline int spi_nor_sr_ready(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2014-09-10 15:26:16 +08:00
|
|
|
int sr = read_sr(nor);
|
|
|
|
if (sr < 0)
|
|
|
|
return sr;
|
|
|
|
else
|
|
|
|
return !(sr & SR_WIP);
|
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-09-10 15:26:16 +08:00
|
|
|
static inline int spi_nor_fsr_ready(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int fsr = read_fsr(nor);
|
|
|
|
if (fsr < 0)
|
|
|
|
return fsr;
|
|
|
|
else
|
|
|
|
return fsr & FSR_READY;
|
|
|
|
}
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-09-10 15:26:16 +08:00
|
|
|
static int spi_nor_ready(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int sr, fsr;
|
|
|
|
sr = spi_nor_sr_ready(nor);
|
|
|
|
if (sr < 0)
|
|
|
|
return sr;
|
|
|
|
fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
|
|
|
|
if (fsr < 0)
|
|
|
|
return fsr;
|
|
|
|
return sr && fsr;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2014-08-07 09:17:00 +08:00
|
|
|
/*
|
|
|
|
* Service routine to read status register until ready, or timeout occurs.
|
|
|
|
* Returns non-zero if error.
|
|
|
|
*/
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
|
|
|
|
unsigned long timeout_jiffies)
|
2014-04-29 23:29:51 +08:00
|
|
|
{
|
|
|
|
unsigned long deadline;
|
2014-11-05 18:32:03 +08:00
|
|
|
int timeout = 0, ret;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
deadline = jiffies + timeout_jiffies;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2014-11-05 18:32:03 +08:00
|
|
|
while (!timeout) {
|
|
|
|
if (time_after_eq(jiffies, deadline))
|
|
|
|
timeout = 1;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2014-09-10 15:26:16 +08:00
|
|
|
ret = spi_nor_ready(nor);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
if (ret)
|
|
|
|
return 0;
|
2014-11-05 18:32:03 +08:00
|
|
|
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_err(nor->dev, "flash operation timed out\n");
|
2014-04-29 23:29:51 +08:00
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
static int spi_nor_wait_till_ready(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
return spi_nor_wait_till_ready_with_timeout(nor,
|
|
|
|
DEFAULT_READY_WAIT_JIFFIES);
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* Erase the whole flash memory
|
|
|
|
*
|
|
|
|
* Returns 0 if successful, non-zero otherwise.
|
|
|
|
*/
|
|
|
|
static int erase_chip(struct spi_nor *nor)
|
|
|
|
{
|
2015-08-14 06:46:05 +08:00
|
|
|
dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
mutex_lock(&nor->lock);
|
|
|
|
|
|
|
|
if (nor->prepare) {
|
|
|
|
ret = nor->prepare(nor, ops);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(nor->dev, "failed in the preparation.\n");
|
|
|
|
mutex_unlock(&nor->lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
|
|
|
|
{
|
|
|
|
if (nor->unprepare)
|
|
|
|
nor->unprepare(nor, ops);
|
|
|
|
mutex_unlock(&nor->lock);
|
|
|
|
}
|
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
/*
|
|
|
|
* Initiate the erasure of a single sector
|
|
|
|
*/
|
|
|
|
static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
|
|
|
|
{
|
|
|
|
u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (nor->erase)
|
|
|
|
return nor->erase(nor, addr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Default implementation, if driver doesn't have a specialized HW
|
|
|
|
* control
|
|
|
|
*/
|
|
|
|
for (i = nor->addr_width - 1; i >= 0; i--) {
|
|
|
|
buf[i] = addr & 0xff;
|
|
|
|
addr >>= 8;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* Erase an address range on the nor chip. The address range may extend
|
|
|
|
* one or more erase sectors. Return an error is there is a problem erasing.
|
|
|
|
*/
|
|
|
|
static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
u32 addr, len;
|
|
|
|
uint32_t rem;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
|
|
|
|
(long long)instr->len);
|
|
|
|
|
|
|
|
div_u64_rem(instr->len, mtd->erasesize, &rem);
|
|
|
|
if (rem)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
addr = instr->addr;
|
|
|
|
len = instr->len;
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* whole-chip erase? */
|
|
|
|
if (len == mtd->size) {
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
unsigned long timeout;
|
|
|
|
|
2014-11-05 18:29:03 +08:00
|
|
|
write_enable(nor);
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
if (erase_chip(nor)) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto erase_err;
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: scale up timeout for full-chip erase
This patch fixes timeout issues seen on large NOR flash (e.g., 16MB
w25q128fw) when using ioctl(MEMERASE) with offset=0 and length=16M. The
input parameters matter because spi_nor_erase() uses a different code
path for full-chip erase, where we use the SPINOR_OP_CHIP_ERASE (0xc7)
opcode.
Fix: use a different timeout for full-chip erase than for other
commands.
While most operations can be expected to perform relatively similarly
across a variety of NOR flash types and sizes (and therefore might as
well use a similar timeout to keep things simple), full-chip erase is
unique, because the time it typically takes to complete:
(1) is much larger than most operations and
(2) scales with the size of the flash.
Let's base our timeout on the original comments stuck here -- that a 2MB
flash requires max 40s to erase.
Small survey of a few flash datasheets I have lying around:
Chip Size (MB) Max chip erase (seconds)
---- -------- ------------------------
w25q32fw 4 50
w25q64cv 8 30
w25q64fw 8 100
w25q128fw 16 200
s25fl128s 16 ~256
s25fl256s 32 ~512
From this data, it seems plenty sufficient to say we need to wait for
40 seconds for each 2MB of flash.
After this change, it might make some sense to decrease the timeout for
everything else, as even the most extreme operations (single block
erase?) shouldn't take more than a handful of seconds. But for safety,
let's leave it as-is. It's only an error case, after all, so we don't
exactly need to optimize it.
Signed-off-by: Furquan Shaikh <furquan@google.com>
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-19 05:59:17 +08:00
|
|
|
/*
|
|
|
|
* Scale the timeout linearly with the size of the flash, with
|
|
|
|
* a minimum calibrated to an old 2MB flash. We could try to
|
|
|
|
* pull these from CFI/SFDP, but these values should be good
|
|
|
|
* enough for now.
|
|
|
|
*/
|
|
|
|
timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
|
|
|
|
CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
|
|
|
|
(unsigned long)(mtd->size / SZ_2M));
|
|
|
|
ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
|
2014-08-07 09:16:57 +08:00
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* REVISIT in some cases we could speed up erasing large regions
|
2014-04-09 09:15:31 +08:00
|
|
|
* by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up
|
2014-02-24 18:37:37 +08:00
|
|
|
* to use "small sector erase", but that's not always optimal.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* "sector"-at-a-time erase */
|
|
|
|
} else {
|
|
|
|
while (len) {
|
2014-11-05 18:29:03 +08:00
|
|
|
write_enable(nor);
|
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
ret = spi_nor_erase_sector(nor, addr);
|
|
|
|
if (ret)
|
2014-02-24 18:37:37 +08:00
|
|
|
goto erase_err;
|
|
|
|
|
|
|
|
addr += mtd->erasesize;
|
|
|
|
len -= mtd->erasesize;
|
2014-08-07 09:16:57 +08:00
|
|
|
|
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
|
|
|
if (ret)
|
|
|
|
goto erase_err;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-05 18:29:03 +08:00
|
|
|
write_disable(nor);
|
|
|
|
|
2015-11-18 03:18:54 +08:00
|
|
|
erase_err:
|
2014-02-24 18:37:37 +08:00
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
|
|
|
|
|
2015-11-18 03:18:54 +08:00
|
|
|
instr->state = ret ? MTD_ERASE_FAILED : MTD_ERASE_DONE;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd_erase_callback(instr);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
|
|
|
|
uint64_t *len)
|
|
|
|
{
|
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
|
|
|
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
|
|
|
|
int shift = ffs(mask) - 1;
|
|
|
|
int pow;
|
|
|
|
|
|
|
|
if (!(sr & mask)) {
|
|
|
|
/* No protection */
|
|
|
|
*ofs = 0;
|
|
|
|
*len = 0;
|
|
|
|
} else {
|
|
|
|
pow = ((sr & mask) ^ mask) >> shift;
|
|
|
|
*len = mtd->size >> pow;
|
|
|
|
*ofs = mtd->size - *len;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2016-01-30 03:25:32 +08:00
|
|
|
* Return 1 if the entire region is locked (if @locked is true) or unlocked (if
|
|
|
|
* @locked is false); 0 otherwise
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
*/
|
2016-01-30 03:25:32 +08:00
|
|
|
static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
|
|
u8 sr, bool locked)
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
{
|
|
|
|
loff_t lock_offs;
|
|
|
|
uint64_t lock_len;
|
|
|
|
|
2016-01-30 03:25:32 +08:00
|
|
|
if (!len)
|
|
|
|
return 1;
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
|
|
|
|
|
2016-01-30 03:25:32 +08:00
|
|
|
if (locked)
|
|
|
|
/* Requested range is a sub-range of locked range */
|
|
|
|
return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
|
|
|
|
else
|
|
|
|
/* Requested range does not overlap with locked range */
|
|
|
|
return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
|
|
u8 sr)
|
|
|
|
{
|
|
|
|
return stm_check_lock_status_sr(nor, ofs, len, sr, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
|
|
|
|
u8 sr)
|
|
|
|
{
|
|
|
|
return stm_check_lock_status_sr(nor, ofs, len, sr, false);
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Lock a region of the flash. Compatible with ST Micro and similar flash.
|
|
|
|
* Supports only the block protection bits BP{0,1,2} in the status register
|
|
|
|
* (SR). Does not support these features found in newer SR bitfields:
|
|
|
|
* - TB: top/bottom protect - only handle TB=0 (top protect)
|
|
|
|
* - SEC: sector/block protect - only handle SEC=0 (block protect)
|
|
|
|
* - CMP: complement protect - only support CMP=0 (range is not complemented)
|
|
|
|
*
|
|
|
|
* Sample table portion for 8MB flash (Winbond w25q64fw):
|
|
|
|
*
|
|
|
|
* SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion
|
|
|
|
* --------------------------------------------------------------------------
|
|
|
|
* X | X | 0 | 0 | 0 | NONE | NONE
|
|
|
|
* 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64
|
|
|
|
* 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32
|
|
|
|
* 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16
|
|
|
|
* 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8
|
|
|
|
* 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4
|
|
|
|
* 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2
|
|
|
|
* X | X | 1 | 1 | 1 | 8 MB | ALL
|
|
|
|
*
|
|
|
|
* Returns negative on errors, 0 on success.
|
|
|
|
*/
|
2015-03-13 15:38:39 +08:00
|
|
|
static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2015-08-14 06:46:05 +08:00
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
2015-11-21 02:26:11 +08:00
|
|
|
int status_old, status_new;
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
|
|
|
|
u8 shift = ffs(mask) - 1, pow, val;
|
2016-01-30 03:25:32 +08:00
|
|
|
loff_t lock_len;
|
2015-12-29 04:54:51 +08:00
|
|
|
int ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
status_old = read_sr(nor);
|
2015-11-21 02:26:11 +08:00
|
|
|
if (status_old < 0)
|
|
|
|
return status_old;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-01-30 03:25:32 +08:00
|
|
|
/* If nothing in our range is unlocked, we don't need to do anything */
|
|
|
|
if (stm_is_locked_sr(nor, ofs, len, status_old))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If anything above us is unlocked, we can't use 'top' protection */
|
|
|
|
if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
|
|
|
|
status_old))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* lock_len: length of region that should end up locked */
|
|
|
|
lock_len = mtd->size - ofs;
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Need smallest pow such that:
|
|
|
|
*
|
|
|
|
* 1 / (2^pow) <= (len / size)
|
|
|
|
*
|
|
|
|
* so (assuming power-of-2 size) we do:
|
|
|
|
*
|
|
|
|
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
|
|
|
|
*/
|
2016-01-30 03:25:32 +08:00
|
|
|
pow = ilog2(mtd->size) - ilog2(lock_len);
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
val = mask - (pow << shift);
|
|
|
|
if (val & ~mask)
|
|
|
|
return -EINVAL;
|
|
|
|
/* Don't "lock" with no region! */
|
|
|
|
if (!(val & mask))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
status_new = (status_old & ~mask) | val;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-01-30 03:25:33 +08:00
|
|
|
/* Disallow further writes if WP pin is asserted */
|
|
|
|
status_new |= SR_SRWD;
|
|
|
|
|
2016-01-30 03:25:31 +08:00
|
|
|
/* Don't bother if they're the same */
|
|
|
|
if (status_new == status_old)
|
|
|
|
return 0;
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Only modify protection if it will not unlock other areas */
|
2016-01-30 03:25:31 +08:00
|
|
|
if ((status_new & mask) < (status_old & mask))
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
return -EINVAL;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
write_enable(nor);
|
2015-12-29 04:54:51 +08:00
|
|
|
ret = write_sr(nor, status_new);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
/*
|
|
|
|
* Unlock a region of the flash. See stm_lock() for more info
|
|
|
|
*
|
|
|
|
* Returns negative on errors, 0 on success.
|
|
|
|
*/
|
2015-03-13 15:38:39 +08:00
|
|
|
static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2015-08-14 06:46:05 +08:00
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
2015-11-21 02:26:11 +08:00
|
|
|
int status_old, status_new;
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
|
|
|
|
u8 shift = ffs(mask) - 1, pow, val;
|
2016-01-30 03:25:32 +08:00
|
|
|
loff_t lock_len;
|
2015-12-29 04:54:51 +08:00
|
|
|
int ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
status_old = read_sr(nor);
|
2015-11-21 02:26:11 +08:00
|
|
|
if (status_old < 0)
|
|
|
|
return status_old;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-01-30 03:25:32 +08:00
|
|
|
/* If nothing in our range is locked, we don't need to do anything */
|
|
|
|
if (stm_is_unlocked_sr(nor, ofs, len, status_old))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* If anything below us is locked, we can't use 'top' protection */
|
|
|
|
if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
return -EINVAL;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2016-01-30 03:25:32 +08:00
|
|
|
/* lock_len: length of region that should remain locked */
|
|
|
|
lock_len = mtd->size - (ofs + len);
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
/*
|
|
|
|
* Need largest pow such that:
|
|
|
|
*
|
|
|
|
* 1 / (2^pow) >= (len / size)
|
|
|
|
*
|
|
|
|
* so (assuming power-of-2 size) we do:
|
|
|
|
*
|
|
|
|
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
|
|
|
|
*/
|
2016-01-30 03:25:32 +08:00
|
|
|
pow = ilog2(mtd->size) - order_base_2(lock_len);
|
|
|
|
if (lock_len == 0) {
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
val = 0; /* fully unlocked */
|
|
|
|
} else {
|
|
|
|
val = mask - (pow << shift);
|
|
|
|
/* Some power-of-two sizes are not supported */
|
|
|
|
if (val & ~mask)
|
|
|
|
return -EINVAL;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
status_new = (status_old & ~mask) | val;
|
|
|
|
|
2016-01-30 03:25:33 +08:00
|
|
|
/* Don't protect status register if we're fully unlocked */
|
|
|
|
if (lock_len == mtd->size)
|
|
|
|
status_new &= ~SR_SRWD;
|
|
|
|
|
2016-01-30 03:25:31 +08:00
|
|
|
/* Don't bother if they're the same */
|
|
|
|
if (status_new == status_old)
|
|
|
|
return 0;
|
|
|
|
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
/* Only modify protection if it will not lock other areas */
|
2016-01-30 03:25:31 +08:00
|
|
|
if ((status_new & mask) > (status_old & mask))
|
mtd: spi-nor: refactor block protection functions
This code was a bit sloppy, would produce a lot of copy-and-paste, and
did not always provide a sensible interface:
* It didn't validate the length for LOCK and the offset for UNLOCK, so
we were essentially discarding half of the user-supplied data and
assuming what they wanted to lock/unlock
* It didn't do very good error checking
* It didn't make use of the fact that this operation works on
power-of-two dimensions
So, rewrite this to do proper bit arithmetic rather than a bunch of
hard-coded condition tables. Now we have:
* More comments on how this was derived
* Notes on what is (and isn't) supported
* A more exendible function, so we could add support for other
protection ranges
* More accurate locking - e.g., suppose the top quadrant is locked (75%
to 100%); then in the following cases, case (a) will succeed but (b)
will not (return -EINVAL):
(a) user requests lock 3rd quadrant (50% to 75%)
(b) user requests lock 3rd quadrant, minus a few blocks (e.g., 50%
to 73%)
Case (b) *should* fail, since we'd have to lock blocks that weren't
requested. But the old implementation didn't know the difference and
would lock the entire second half (50% to 100%)
This refactoring work will also help enable the addition of
mtd_is_locked() support and potentially the support of bottom boot
protection (TB=1).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-09-02 03:57:11 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
write_enable(nor);
|
2015-12-29 04:54:51 +08:00
|
|
|
ret = write_sr(nor, status_new);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
return spi_nor_wait_till_ready(nor);
|
2015-03-13 15:38:39 +08:00
|
|
|
}
|
|
|
|
|
2015-09-02 03:57:12 +08:00
|
|
|
/*
|
|
|
|
* Check if a region of the flash is (completely) locked. See stm_lock() for
|
|
|
|
* more info.
|
|
|
|
*
|
|
|
|
* Returns 1 if entire region is locked, 0 if any portion is unlocked, and
|
|
|
|
* negative on errors.
|
|
|
|
*/
|
|
|
|
static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = read_sr(nor);
|
|
|
|
if (status < 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
return stm_is_locked_sr(nor, ofs, len, status);
|
|
|
|
}
|
|
|
|
|
2015-03-13 15:38:39 +08:00
|
|
|
static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nor->flash_lock(nor, ofs, len);
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-13 15:38:39 +08:00
|
|
|
static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nor->flash_unlock(nor, ofs, len);
|
|
|
|
|
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-09-02 03:57:12 +08:00
|
|
|
static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nor->flash_is_locked(nor, ofs, len);
|
|
|
|
|
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-06 14:34:01 +08:00
|
|
|
/* Used when the "_ext_id" is two bytes at most */
|
2014-02-24 18:37:37 +08:00
|
|
|
#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
|
2014-11-06 14:34:01 +08:00
|
|
|
.id = { \
|
|
|
|
((_jedec_id) >> 16) & 0xff, \
|
|
|
|
((_jedec_id) >> 8) & 0xff, \
|
|
|
|
(_jedec_id) & 0xff, \
|
|
|
|
((_ext_id) >> 8) & 0xff, \
|
|
|
|
(_ext_id) & 0xff, \
|
|
|
|
}, \
|
|
|
|
.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \
|
2014-02-24 18:37:37 +08:00
|
|
|
.sector_size = (_sector_size), \
|
|
|
|
.n_sectors = (_n_sectors), \
|
|
|
|
.page_size = 256, \
|
2015-08-11 03:39:03 +08:00
|
|
|
.flags = (_flags),
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-08-12 08:54:56 +08:00
|
|
|
#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \
|
|
|
|
.id = { \
|
|
|
|
((_jedec_id) >> 16) & 0xff, \
|
|
|
|
((_jedec_id) >> 8) & 0xff, \
|
|
|
|
(_jedec_id) & 0xff, \
|
|
|
|
((_ext_id) >> 16) & 0xff, \
|
|
|
|
((_ext_id) >> 8) & 0xff, \
|
|
|
|
(_ext_id) & 0xff, \
|
|
|
|
}, \
|
|
|
|
.id_len = 6, \
|
|
|
|
.sector_size = (_sector_size), \
|
|
|
|
.n_sectors = (_n_sectors), \
|
|
|
|
.page_size = 256, \
|
2015-08-11 03:39:03 +08:00
|
|
|
.flags = (_flags),
|
2014-08-12 08:54:56 +08:00
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \
|
|
|
|
.sector_size = (_sector_size), \
|
|
|
|
.n_sectors = (_n_sectors), \
|
|
|
|
.page_size = (_page_size), \
|
|
|
|
.addr_width = (_addr_width), \
|
2015-08-11 03:39:03 +08:00
|
|
|
.flags = (_flags),
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* NOTE: double check command sets and memory organization when you add
|
|
|
|
* more nor chips. This current list focusses on newer chips, which
|
|
|
|
* have been converging on command sets which including JEDEC ID.
|
2015-04-25 18:41:30 +08:00
|
|
|
*
|
|
|
|
* All newly added entries should describe *hardware* and should use SECT_4K
|
|
|
|
* (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
|
|
|
|
* scenarios excluding small sectors there is config option that can be
|
|
|
|
* disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
|
|
|
|
* For historical (and compatibility) reasons (before we got above config) some
|
|
|
|
* old entries may be missing 4K flag.
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
2015-08-11 03:39:03 +08:00
|
|
|
static const struct flash_info spi_nor_ids[] = {
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Atmel -- some are (confusingly) marketed as "DataFlash" */
|
|
|
|
{ "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
|
|
|
|
{ "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
|
|
|
|
|
|
|
|
{ "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) },
|
|
|
|
{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) },
|
|
|
|
|
|
|
|
{ "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) },
|
|
|
|
{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) },
|
|
|
|
{ "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) },
|
|
|
|
{ "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
|
|
|
|
{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) },
|
|
|
|
|
|
|
|
/* EON -- en25xxx */
|
|
|
|
{ "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
|
|
|
|
{ "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) },
|
2014-06-12 22:16:46 +08:00
|
|
|
{ "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) },
|
2015-04-25 18:41:30 +08:00
|
|
|
{ "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* ESMT */
|
|
|
|
{ "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
|
|
|
|
/* Everspin */
|
|
|
|
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
|
2014-10-29 17:10:47 +08:00
|
|
|
/* Fujitsu */
|
|
|
|
{ "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) },
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* GigaDevice */
|
|
|
|
{ "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
|
2014-12-17 05:46:56 +08:00
|
|
|
{ "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* Intel/Numonyx -- xxxs33b */
|
|
|
|
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
|
|
|
|
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
|
|
|
|
|
2015-04-08 01:35:02 +08:00
|
|
|
/* ISSI */
|
|
|
|
{ "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) },
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Macronix */
|
2015-04-08 01:35:01 +08:00
|
|
|
{ "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
|
|
|
|
{ "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
|
|
|
|
{ "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
|
2015-11-05 17:04:23 +08:00
|
|
|
{ "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) },
|
2015-11-05 17:04:23 +08:00
|
|
|
{ "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
|
2015-02-06 00:39:03 +08:00
|
|
|
{ "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
|
|
|
|
{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
|
|
|
|
{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) },
|
|
|
|
{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
|
|
|
|
{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) },
|
|
|
|
{ "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) },
|
|
|
|
|
|
|
|
/* Micron */
|
2014-12-17 15:35:45 +08:00
|
|
|
{ "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
|
2015-10-08 03:10:08 +08:00
|
|
|
{ "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) },
|
2015-06-30 17:53:46 +08:00
|
|
|
{ "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
|
2015-08-27 17:52:19 +08:00
|
|
|
{ "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) },
|
2016-02-29 03:09:18 +08:00
|
|
|
{ "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
|
2014-12-17 15:35:45 +08:00
|
|
|
{ "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* PMC */
|
|
|
|
{ "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) },
|
|
|
|
{ "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) },
|
|
|
|
{ "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
|
|
|
|
/* Spansion -- single (large) sector size only, at least
|
|
|
|
* for the chips listed here (without boot sectors).
|
|
|
|
*/
|
2014-04-22 20:45:32 +08:00
|
|
|
{ "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2015-08-15 00:42:32 +08:00
|
|
|
{ "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
|
|
|
|
{ "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) },
|
|
|
|
{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) },
|
|
|
|
{ "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) },
|
2015-04-25 18:41:30 +08:00
|
|
|
{ "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
|
2015-08-26 20:56:53 +08:00
|
|
|
{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) },
|
|
|
|
{ "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) },
|
|
|
|
{ "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) },
|
2015-10-13 14:50:30 +08:00
|
|
|
{ "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2015-07-10 04:30:57 +08:00
|
|
|
{ "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
|
|
|
{ "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
|
2016-02-11 18:53:57 +08:00
|
|
|
{ "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2015-04-25 18:41:30 +08:00
|
|
|
{ "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) },
|
2015-04-25 18:01:35 +08:00
|
|
|
{ "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) },
|
2015-10-13 14:51:14 +08:00
|
|
|
{ "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* SST -- large erase sizes are "overlays", "sectors" are 4K */
|
|
|
|
{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
|
|
|
|
{ "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) },
|
|
|
|
{ "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) },
|
2015-08-15 01:35:39 +08:00
|
|
|
{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) },
|
2015-09-16 17:59:45 +08:00
|
|
|
{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) },
|
2014-10-21 19:37:59 +08:00
|
|
|
{ "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* ST Microelectronics -- newer production may have feature updates */
|
|
|
|
{ "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) },
|
|
|
|
{ "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) },
|
|
|
|
{ "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) },
|
|
|
|
{ "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) },
|
|
|
|
{ "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) },
|
|
|
|
{ "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) },
|
|
|
|
{ "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) },
|
|
|
|
|
|
|
|
{ "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) },
|
|
|
|
{ "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) },
|
|
|
|
{ "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) },
|
|
|
|
{ "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) },
|
|
|
|
{ "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) },
|
|
|
|
{ "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) },
|
|
|
|
{ "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) },
|
|
|
|
{ "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) },
|
|
|
|
|
|
|
|
{ "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) },
|
|
|
|
{ "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) },
|
|
|
|
|
|
|
|
{ "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) },
|
|
|
|
{ "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) },
|
|
|
|
{ "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) },
|
|
|
|
|
|
|
|
{ "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) },
|
|
|
|
{ "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) },
|
2014-07-28 05:56:08 +08:00
|
|
|
{ "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) },
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
|
2015-03-27 06:58:02 +08:00
|
|
|
{ "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) },
|
|
|
|
{ "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) },
|
|
|
|
{ "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) },
|
|
|
|
{ "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) },
|
|
|
|
{ "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) },
|
|
|
|
{ "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) },
|
|
|
|
{ "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
|
2015-09-02 03:57:13 +08:00
|
|
|
{ "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
|
|
|
|
{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
|
2015-09-02 03:57:13 +08:00
|
|
|
{ "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2015-09-19 06:08:14 +08:00
|
|
|
{ "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
|
2014-02-24 18:37:37 +08:00
|
|
|
{ "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
|
|
|
|
{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) },
|
|
|
|
{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
|
|
|
|
{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) },
|
|
|
|
|
|
|
|
/* Catalyst / On Semiconductor -- non-JEDEC */
|
|
|
|
{ "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
|
|
|
|
{ },
|
|
|
|
};
|
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int tmp;
|
2014-11-06 14:34:01 +08:00
|
|
|
u8 id[SPI_NOR_MAX_ID_LEN];
|
2015-08-11 03:39:03 +08:00
|
|
|
const struct flash_info *info;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-11-06 14:34:01 +08:00
|
|
|
tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (tmp < 0) {
|
2015-10-31 03:56:22 +08:00
|
|
|
dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
|
2014-02-24 18:37:37 +08:00
|
|
|
return ERR_PTR(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
|
2015-08-11 03:39:03 +08:00
|
|
|
info = &spi_nor_ids[tmp];
|
2014-11-06 14:34:01 +08:00
|
|
|
if (info->id_len) {
|
|
|
|
if (!memcmp(info->id, id, info->id_len))
|
2014-02-24 18:37:37 +08:00
|
|
|
return &spi_nor_ids[tmp];
|
|
|
|
}
|
|
|
|
}
|
2015-12-01 03:41:17 +08:00
|
|
|
dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
|
2014-11-06 14:34:01 +08:00
|
|
|
id[0], id[1], id[2]);
|
2014-02-24 18:37:37 +08:00
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
|
|
|
|
size_t *retlen, u_char *buf)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = nor->read(nor, from, len, retlen, buf);
|
|
|
|
|
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, const u_char *buf)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
size_t actual;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
write_enable(nor);
|
|
|
|
|
|
|
|
nor->sst_write_second = false;
|
|
|
|
|
|
|
|
actual = to % 2;
|
|
|
|
/* Start write from odd address. */
|
|
|
|
if (actual) {
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->program_opcode = SPINOR_OP_BP;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* write one byte. */
|
|
|
|
nor->write(nor, to, 1, retlen, buf);
|
2014-08-07 09:17:00 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
goto time_out;
|
|
|
|
}
|
|
|
|
to += actual;
|
|
|
|
|
|
|
|
/* Write out most of the data here. */
|
|
|
|
for (; actual < len - 1; actual += 2) {
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->program_opcode = SPINOR_OP_AAI_WP;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
/* write two bytes. */
|
|
|
|
nor->write(nor, to, 2, retlen, buf + actual);
|
2014-08-07 09:17:00 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
goto time_out;
|
|
|
|
to += 2;
|
|
|
|
nor->sst_write_second = true;
|
|
|
|
}
|
|
|
|
nor->sst_write_second = false;
|
|
|
|
|
|
|
|
write_disable(nor);
|
2014-08-07 09:17:00 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
goto time_out;
|
|
|
|
|
|
|
|
/* Write out trailing byte if it exists. */
|
|
|
|
if (actual != len) {
|
|
|
|
write_enable(nor);
|
|
|
|
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->program_opcode = SPINOR_OP_BP;
|
2014-02-24 18:37:37 +08:00
|
|
|
nor->write(nor, to, 1, retlen, buf + actual);
|
|
|
|
|
2014-08-07 09:17:00 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret)
|
|
|
|
goto time_out;
|
|
|
|
write_disable(nor);
|
|
|
|
}
|
|
|
|
time_out:
|
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write an address range to the nor chip. Data must be written in
|
|
|
|
* FLASH_PAGESIZE chunks. The address range may be any size provided
|
|
|
|
* it is within the physical boundaries.
|
|
|
|
*/
|
|
|
|
static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
|
|
|
|
size_t *retlen, const u_char *buf)
|
|
|
|
{
|
|
|
|
struct spi_nor *nor = mtd_to_spi_nor(mtd);
|
|
|
|
u32 page_offset, page_size, i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
|
|
|
|
|
|
|
|
ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
write_enable(nor);
|
|
|
|
|
|
|
|
page_offset = to & (nor->page_size - 1);
|
|
|
|
|
|
|
|
/* do all the bytes fit onto one page? */
|
|
|
|
if (page_offset + len <= nor->page_size) {
|
|
|
|
nor->write(nor, to, len, retlen, buf);
|
|
|
|
} else {
|
|
|
|
/* the size of data remaining on the first page */
|
|
|
|
page_size = nor->page_size - page_offset;
|
|
|
|
nor->write(nor, to, page_size, retlen, buf);
|
|
|
|
|
|
|
|
/* write everything in nor->page_size chunks */
|
|
|
|
for (i = page_size; i < len; i += page_size) {
|
|
|
|
page_size = len - i;
|
|
|
|
if (page_size > nor->page_size)
|
|
|
|
page_size = nor->page_size;
|
|
|
|
|
2014-08-07 09:17:00 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-08-07 09:16:56 +08:00
|
|
|
if (ret)
|
|
|
|
goto write_err;
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
write_enable(nor);
|
|
|
|
|
|
|
|
nor->write(nor, to + i, page_size, retlen, buf + i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-07 09:16:57 +08:00
|
|
|
ret = spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
write_err:
|
|
|
|
spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
|
2014-08-07 09:16:56 +08:00
|
|
|
return ret;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int macronix_quad_enable(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret, val;
|
|
|
|
|
|
|
|
val = read_sr(nor);
|
2015-11-21 02:26:11 +08:00
|
|
|
if (val < 0)
|
|
|
|
return val;
|
2014-02-24 18:37:37 +08:00
|
|
|
write_enable(nor);
|
|
|
|
|
2015-08-19 17:56:43 +08:00
|
|
|
write_sr(nor, val | SR_QUAD_EN_MX);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2014-08-07 09:17:00 +08:00
|
|
|
if (spi_nor_wait_till_ready(nor))
|
2014-02-24 18:37:37 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
ret = read_sr(nor);
|
|
|
|
if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
|
|
|
|
dev_err(nor->dev, "Macronix Quad bit not set\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write status Register and configuration register with 2 bytes
|
|
|
|
* The first byte will be written to the status register, while the
|
|
|
|
* second byte will be written to the configuration register.
|
|
|
|
* Return negative if error occured.
|
|
|
|
*/
|
|
|
|
static int write_sr_cr(struct spi_nor *nor, u16 val)
|
|
|
|
{
|
|
|
|
nor->cmd_buf[0] = val & 0xff;
|
|
|
|
nor->cmd_buf[1] = (val >> 8);
|
|
|
|
|
2015-08-19 17:56:44 +08:00
|
|
|
return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int spansion_quad_enable(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int quad_en = CR_QUAD_EN_SPAN << 8;
|
|
|
|
|
|
|
|
write_enable(nor);
|
|
|
|
|
|
|
|
ret = write_sr_cr(nor, quad_en);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(nor->dev,
|
|
|
|
"error while writing configuration register\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* read back and check it */
|
|
|
|
ret = read_cr(nor);
|
|
|
|
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
|
|
|
|
dev_err(nor->dev, "Spansion Quad bit not set\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
2014-11-06 11:24:33 +08:00
|
|
|
switch (JEDEC_MFR(info)) {
|
2015-09-02 03:57:09 +08:00
|
|
|
case SNOR_MFR_MACRONIX:
|
2014-02-24 18:37:37 +08:00
|
|
|
status = macronix_quad_enable(nor);
|
|
|
|
if (status) {
|
|
|
|
dev_err(nor->dev, "Macronix quad-read not enabled\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return status;
|
2015-09-02 03:57:09 +08:00
|
|
|
case SNOR_MFR_MICRON:
|
2016-02-03 21:26:46 +08:00
|
|
|
return 0;
|
2014-02-24 18:37:37 +08:00
|
|
|
default:
|
|
|
|
status = spansion_quad_enable(nor);
|
|
|
|
if (status) {
|
|
|
|
dev_err(nor->dev, "Spansion quad-read not enabled\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int spi_nor_check(struct spi_nor *nor)
|
|
|
|
{
|
|
|
|
if (!nor->dev || !nor->read || !nor->write ||
|
2015-11-11 04:15:27 +08:00
|
|
|
!nor->read_reg || !nor->write_reg) {
|
2014-02-24 18:37:37 +08:00
|
|
|
pr_err("spi-nor: please fill all the necessary fields!\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-29 17:47:54 +08:00
|
|
|
int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
|
2014-02-24 18:37:37 +08:00
|
|
|
{
|
2015-08-11 03:39:03 +08:00
|
|
|
const struct flash_info *info = NULL;
|
2014-02-24 18:37:37 +08:00
|
|
|
struct device *dev = nor->dev;
|
2015-08-14 06:46:05 +08:00
|
|
|
struct mtd_info *mtd = &nor->mtd;
|
2015-10-31 11:33:24 +08:00
|
|
|
struct device_node *np = spi_nor_get_flash_node(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
int ret;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ret = spi_nor_check(nor);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
mtd: m25p80: allow arbitrary OF matching for "jedec,spi-nor"
When we added the "jedec,spi-nor" compatible string for use in this
driver, we added it as a modalias option. The modalias can be derived in
different ways for platform devices vs. device tree (of_*) matching. But
for device tree matching (the primary target of this identifier string),
the modalias is determined from the first entry in the 'compatible'
property. IOW, the following properties would bind to this driver:
// Option (a), modalias = "spi-nor"
compatible = "jedec,spi-nor";
// Option (b), modalias = "spi-nor"
compatible = "idontknowwhatimdoing,spi-nor";
But the following would not:
// Option (c), modalias = "shinynewdevice"
compatible = "myvendor,shinynewdevice", "jedec,spi-nor";
So, we'd like to match (a) and (c) (even when we don't have an explicit
entry for "shinynewdevice"), and we'd rather not allow (b).
To do this, we
(1) always (for devices without specific platform data) pass the
modalias to the spi-nor library;
(2) rework the spi-nor library to not reject "bad" names, and
instead just fall back to autodetection; and
(3) add the .of_match_table to properly catch all "jedec,spi-nor".
This allows (a) and (c) without warnings, and rejects (b).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-05-20 05:38:22 +08:00
|
|
|
if (name)
|
2015-08-11 03:39:03 +08:00
|
|
|
info = spi_nor_match_id(name);
|
mtd: m25p80: allow arbitrary OF matching for "jedec,spi-nor"
When we added the "jedec,spi-nor" compatible string for use in this
driver, we added it as a modalias option. The modalias can be derived in
different ways for platform devices vs. device tree (of_*) matching. But
for device tree matching (the primary target of this identifier string),
the modalias is determined from the first entry in the 'compatible'
property. IOW, the following properties would bind to this driver:
// Option (a), modalias = "spi-nor"
compatible = "jedec,spi-nor";
// Option (b), modalias = "spi-nor"
compatible = "idontknowwhatimdoing,spi-nor";
But the following would not:
// Option (c), modalias = "shinynewdevice"
compatible = "myvendor,shinynewdevice", "jedec,spi-nor";
So, we'd like to match (a) and (c) (even when we don't have an explicit
entry for "shinynewdevice"), and we'd rather not allow (b).
To do this, we
(1) always (for devices without specific platform data) pass the
modalias to the spi-nor library;
(2) rework the spi-nor library to not reject "bad" names, and
instead just fall back to autodetection; and
(3) add the .of_match_table to properly catch all "jedec,spi-nor".
This allows (a) and (c) without warnings, and rejects (b).
Signed-off-by: Brian Norris <computersforpeace@gmail.com>
2015-05-20 05:38:22 +08:00
|
|
|
/* Try to auto-detect if chip name wasn't specified or not found */
|
2015-08-11 03:39:03 +08:00
|
|
|
if (!info)
|
|
|
|
info = spi_nor_read_id(nor);
|
|
|
|
if (IS_ERR_OR_NULL(info))
|
2014-09-29 17:47:54 +08:00
|
|
|
return -ENOENT;
|
|
|
|
|
2014-12-01 16:42:16 +08:00
|
|
|
/*
|
|
|
|
* If caller has specified name of flash model that can normally be
|
|
|
|
* detected using JEDEC, let's verify it.
|
|
|
|
*/
|
|
|
|
if (name && info->id_len) {
|
2015-08-11 03:39:03 +08:00
|
|
|
const struct flash_info *jinfo;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
jinfo = spi_nor_read_id(nor);
|
|
|
|
if (IS_ERR(jinfo)) {
|
|
|
|
return PTR_ERR(jinfo);
|
|
|
|
} else if (jinfo != info) {
|
2014-02-24 18:37:37 +08:00
|
|
|
/*
|
|
|
|
* JEDEC knows better, so overwrite platform ID. We
|
|
|
|
* can't trust partitions any longer, but we'll let
|
|
|
|
* mtd apply them anyway, since some partitions may be
|
|
|
|
* marked read-only, and we don't want to lose that
|
|
|
|
* information, even if it's not 100% accurate.
|
|
|
|
*/
|
|
|
|
dev_warn(dev, "found %s, expected %s\n",
|
2015-08-11 03:39:03 +08:00
|
|
|
jinfo->name, info->name);
|
|
|
|
info = jinfo;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mutex_init(&nor->lock);
|
|
|
|
|
|
|
|
/*
|
2015-09-02 03:57:15 +08:00
|
|
|
* Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
|
|
|
|
* with the software protection bits set
|
2014-02-24 18:37:37 +08:00
|
|
|
*/
|
|
|
|
|
2015-09-02 03:57:09 +08:00
|
|
|
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
|
|
|
|
JEDEC_MFR(info) == SNOR_MFR_INTEL ||
|
2016-01-30 03:25:35 +08:00
|
|
|
JEDEC_MFR(info) == SNOR_MFR_SST ||
|
|
|
|
info->flags & SPI_NOR_HAS_LOCK) {
|
2014-02-24 18:37:37 +08:00
|
|
|
write_enable(nor);
|
|
|
|
write_sr(nor, 0);
|
2016-01-30 03:25:30 +08:00
|
|
|
spi_nor_wait_till_ready(nor);
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
2014-09-29 04:36:54 +08:00
|
|
|
if (!mtd->name)
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->name = dev_name(dev);
|
2015-08-14 06:46:03 +08:00
|
|
|
mtd->priv = nor;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->type = MTD_NORFLASH;
|
|
|
|
mtd->writesize = 1;
|
|
|
|
mtd->flags = MTD_CAP_NORFLASH;
|
|
|
|
mtd->size = info->sector_size * info->n_sectors;
|
|
|
|
mtd->_erase = spi_nor_erase;
|
|
|
|
mtd->_read = spi_nor_read;
|
|
|
|
|
2015-09-02 03:57:14 +08:00
|
|
|
/* NOR protection support for STmicro/Micron chips and similar */
|
2016-01-30 03:25:35 +08:00
|
|
|
if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
|
|
|
|
info->flags & SPI_NOR_HAS_LOCK) {
|
2015-03-13 15:38:39 +08:00
|
|
|
nor->flash_lock = stm_lock;
|
|
|
|
nor->flash_unlock = stm_unlock;
|
2015-09-02 03:57:12 +08:00
|
|
|
nor->flash_is_locked = stm_is_locked;
|
2015-03-13 15:38:39 +08:00
|
|
|
}
|
|
|
|
|
2015-09-02 03:57:12 +08:00
|
|
|
if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->_lock = spi_nor_lock;
|
|
|
|
mtd->_unlock = spi_nor_unlock;
|
2015-09-02 03:57:12 +08:00
|
|
|
mtd->_is_locked = spi_nor_is_locked;
|
2014-02-24 18:37:37 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* sst nor chips use AAI word program */
|
|
|
|
if (info->flags & SST_WRITE)
|
|
|
|
mtd->_write = sst_write;
|
|
|
|
else
|
|
|
|
mtd->_write = spi_nor_write;
|
|
|
|
|
2014-09-10 15:26:16 +08:00
|
|
|
if (info->flags & USE_FSR)
|
|
|
|
nor->flags |= SNOR_F_USE_FSR;
|
2014-04-29 23:29:51 +08:00
|
|
|
|
2014-08-17 17:27:26 +08:00
|
|
|
#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
|
2014-02-24 18:37:37 +08:00
|
|
|
/* prefer "small sector" erase if possible */
|
|
|
|
if (info->flags & SECT_4K) {
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->erase_opcode = SPINOR_OP_BE_4K;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->erasesize = 4096;
|
|
|
|
} else if (info->flags & SECT_4K_PMC) {
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->erasesize = 4096;
|
2014-08-17 17:27:26 +08:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->erase_opcode = SPINOR_OP_SE;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->erasesize = info->sector_size;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (info->flags & SPI_NOR_NO_ERASE)
|
|
|
|
mtd->flags |= MTD_NO_ERASE;
|
|
|
|
|
|
|
|
mtd->dev.parent = dev;
|
|
|
|
nor->page_size = info->page_size;
|
|
|
|
mtd->writebufsize = nor->page_size;
|
|
|
|
|
|
|
|
if (np) {
|
|
|
|
/* If we were instantiated by DT, use it */
|
|
|
|
if (of_property_read_bool(np, "m25p,fast-read"))
|
|
|
|
nor->flash_read = SPI_NOR_FAST;
|
|
|
|
else
|
|
|
|
nor->flash_read = SPI_NOR_NORMAL;
|
|
|
|
} else {
|
|
|
|
/* If we weren't instantiated by DT, default to fast-read */
|
|
|
|
nor->flash_read = SPI_NOR_FAST;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Some devices cannot do fast-read, no matter what DT tells us */
|
|
|
|
if (info->flags & SPI_NOR_NO_FR)
|
|
|
|
nor->flash_read = SPI_NOR_NORMAL;
|
|
|
|
|
|
|
|
/* Quad/Dual-read mode takes precedence over fast/normal */
|
|
|
|
if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) {
|
2014-11-06 11:24:33 +08:00
|
|
|
ret = set_quad_mode(nor, info);
|
2014-02-24 18:37:37 +08:00
|
|
|
if (ret) {
|
|
|
|
dev_err(dev, "quad mode not supported\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
nor->flash_read = SPI_NOR_QUAD;
|
|
|
|
} else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) {
|
|
|
|
nor->flash_read = SPI_NOR_DUAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Default commands */
|
|
|
|
switch (nor->flash_read) {
|
|
|
|
case SPI_NOR_QUAD:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ_1_1_4;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_DUAL:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ_1_1_2;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_FAST:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ_FAST;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_NORMAL:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "No Read opcode defined\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->program_opcode = SPINOR_OP_PP;
|
2014-02-24 18:37:37 +08:00
|
|
|
|
|
|
|
if (info->addr_width)
|
|
|
|
nor->addr_width = info->addr_width;
|
|
|
|
else if (mtd->size > 0x1000000) {
|
|
|
|
/* enable 4-byte addressing if the device exceeds 16MiB */
|
|
|
|
nor->addr_width = 4;
|
2015-09-02 03:57:09 +08:00
|
|
|
if (JEDEC_MFR(info) == SNOR_MFR_SPANSION) {
|
2014-02-24 18:37:37 +08:00
|
|
|
/* Dedicated 4-byte command set */
|
|
|
|
switch (nor->flash_read) {
|
|
|
|
case SPI_NOR_QUAD:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ4_1_1_4;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_DUAL:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ4_1_1_2;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_FAST:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ4_FAST;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
case SPI_NOR_NORMAL:
|
2014-04-09 10:16:49 +08:00
|
|
|
nor->read_opcode = SPINOR_OP_READ4;
|
2014-02-24 18:37:37 +08:00
|
|
|
break;
|
|
|
|
}
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->program_opcode = SPINOR_OP_PP_4B;
|
2014-02-24 18:37:37 +08:00
|
|
|
/* No small sector erase for 4-byte command set */
|
2014-04-09 09:15:31 +08:00
|
|
|
nor->erase_opcode = SPINOR_OP_SE_4B;
|
2014-02-24 18:37:37 +08:00
|
|
|
mtd->erasesize = info->sector_size;
|
|
|
|
} else
|
2014-11-06 11:24:33 +08:00
|
|
|
set_4byte(nor, info, 1);
|
2014-02-24 18:37:37 +08:00
|
|
|
} else {
|
|
|
|
nor->addr_width = 3;
|
|
|
|
}
|
|
|
|
|
2015-11-11 04:15:27 +08:00
|
|
|
if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
|
|
|
|
dev_err(dev, "address width is too large: %u\n",
|
|
|
|
nor->addr_width);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
nor->read_dummy = spi_nor_read_dummy_cycles(nor);
|
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
dev_info(dev, "%s (%lld Kbytes)\n", info->name,
|
2014-02-24 18:37:37 +08:00
|
|
|
(long long)mtd->size >> 10);
|
|
|
|
|
|
|
|
dev_dbg(dev,
|
|
|
|
"mtd .name = %s, .size = 0x%llx (%lldMiB), "
|
|
|
|
".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
|
|
|
|
mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
|
|
|
|
mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
|
|
|
|
|
|
|
|
if (mtd->numeraseregions)
|
|
|
|
for (i = 0; i < mtd->numeraseregions; i++)
|
|
|
|
dev_dbg(dev,
|
|
|
|
"mtd.eraseregions[%d] = { .offset = 0x%llx, "
|
|
|
|
".erasesize = 0x%.8x (%uKiB), "
|
|
|
|
".numblocks = %d }\n",
|
|
|
|
i, (long long)mtd->eraseregions[i].offset,
|
|
|
|
mtd->eraseregions[i].erasesize,
|
|
|
|
mtd->eraseregions[i].erasesize / 1024,
|
|
|
|
mtd->eraseregions[i].numblocks);
|
|
|
|
return 0;
|
|
|
|
}
|
2014-04-09 09:22:57 +08:00
|
|
|
EXPORT_SYMBOL_GPL(spi_nor_scan);
|
2014-02-24 18:37:37 +08:00
|
|
|
|
2015-08-11 03:39:03 +08:00
|
|
|
static const struct flash_info *spi_nor_match_id(const char *name)
|
2014-02-24 18:37:40 +08:00
|
|
|
{
|
2015-08-11 03:39:03 +08:00
|
|
|
const struct flash_info *id = spi_nor_ids;
|
2014-02-24 18:37:40 +08:00
|
|
|
|
2015-09-03 07:34:35 +08:00
|
|
|
while (id->name) {
|
2014-02-24 18:37:40 +08:00
|
|
|
if (!strcmp(name, id->name))
|
|
|
|
return id;
|
|
|
|
id++;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2014-02-24 18:37:37 +08:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
|
|
|
|
MODULE_AUTHOR("Mike Lavender");
|
|
|
|
MODULE_DESCRIPTION("framework for SPI NOR");
|