aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/mtd/Kconfig11
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c78
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c14
-rw-r--r--drivers/mtd/chips/cfi_probe.c12
-rw-r--r--drivers/mtd/chips/jedec_probe.c1376
-rw-r--r--drivers/mtd/cmdlinepart.c9
-rw-r--r--drivers/mtd/devices/lart.c2
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c2
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/physmap_of.c88
-rw-r--r--drivers/mtd/maps/pnc2000.c93
-rw-r--r--drivers/mtd/maps/scb2_flash.c2
-rw-r--r--drivers/mtd/mtd_blkdevs.c2
-rw-r--r--drivers/mtd/mtdchar.c8
-rw-r--r--drivers/mtd/mtdcore.c2
-rw-r--r--drivers/mtd/mtdoops.c170
-rw-r--r--drivers/mtd/nand/Kconfig17
-rw-r--r--drivers/mtd/nand/Makefile2
-rw-r--r--drivers/mtd/nand/at91_nand.c12
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c20
-rw-r--r--drivers/mtd/nand/cafe_nand.c19
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/orion_nand.c171
-rw-r--r--drivers/mtd/nand/pasemi_nand.c243
-rw-r--r--drivers/mtd/nand/s3c2410.c48
-rw-r--r--drivers/mtd/ofpart.c74
-rw-r--r--drivers/mtd/onenand/onenand_base.c32
-rw-r--r--drivers/mtd/redboot.c25
-rw-r--r--drivers/mtd/ubi/build.c633
-rw-r--r--drivers/mtd/ubi/cdev.c164
-rw-r--r--drivers/mtd/ubi/debug.h21
-rw-r--r--drivers/mtd/ubi/eba.c321
-rw-r--r--drivers/mtd/ubi/gluebi.c9
-rw-r--r--drivers/mtd/ubi/io.c10
-rw-r--r--drivers/mtd/ubi/kapi.c171
-rw-r--r--drivers/mtd/ubi/misc.c2
-rw-r--r--drivers/mtd/ubi/scan.c2
-rw-r--r--drivers/mtd/ubi/ubi.h118
-rw-r--r--drivers/mtd/ubi/upd.c11
-rw-r--r--drivers/mtd/ubi/vmt.c196
-rw-r--r--drivers/mtd/ubi/vtbl.c24
-rw-r--r--drivers/mtd/ubi/wl.c339
-rw-r--r--fs/jffs2/acl.c6
-rw-r--r--fs/jffs2/acl.h2
-rw-r--r--fs/jffs2/fs.c6
-rw-r--r--fs/jffs2/nodelist.c9
-rw-r--r--fs/jffs2/readinode.c31
-rw-r--r--fs/jffs2/write.c28
-rw-r--r--include/linux/mtd/cfi.h12
-rw-r--r--include/linux/mtd/mtdram.h8
-rw-r--r--include/linux/mtd/partitions.h9
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/mtd/mtd-abi.h2
-rw-r--r--include/mtd/ubi-user.h80
56 files changed, 2911 insertions, 1855 deletions
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 8848e8ac705..e8503341e3b 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -150,6 +150,14 @@ config MTD_AFS_PARTS
for your particular device. It won't happen automatically. The
'armflash' map driver (CONFIG_MTD_ARMFLASH) does this, for example.
+config MTD_OF_PARTS
+ tristate "Flash partition map based on OF description"
+ depends on PPC_OF && MTD_PARTITIONS
+ help
+ This provides a partition parsing function which derives
+ the partition map from the children of the flash node,
+ as described in Documentation/powerpc/booting-without-of.txt.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
@@ -286,6 +294,9 @@ config MTD_OOPS
buffer in a flash partition where it can be read back at some
later point.
+ To use, add console=ttyMTDx to the kernel command line,
+ where x is the MTD device number to use.
+
source "drivers/mtd/chips/Kconfig"
source "drivers/mtd/maps/Kconfig"
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 7f0b04b4caa..538e33d11d4 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_CONCAT) += mtdconcat.o
obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
+obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 1707f98c322..47794d23a42 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -50,6 +50,7 @@
#define I82802AC 0x00ac
#define MANUFACTURER_ST 0x0020
#define M50LPW080 0x002F
+#define AT49BV640D 0x02de
static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -157,6 +158,47 @@ static void cfi_tell_features(struct cfi_pri_intelext *extp)
}
#endif
+/* Atmel chips don't use the same PRI format as Intel chips */
+static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
+{
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+ struct cfi_pri_atmel atmel_pri;
+ uint32_t features = 0;
+
+ /* Reverse byteswapping */
+ extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
+ extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
+ extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
+
+ memcpy(&atmel_pri, extp, sizeof(atmel_pri));
+ memset((char *)extp + 5, 0, sizeof(*extp) - 5);
+
+ printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
+
+ if (atmel_pri.Features & 0x01) /* chip erase supported */
+ features |= (1<<0);
+ if (atmel_pri.Features & 0x02) /* erase suspend supported */
+ features |= (1<<1);
+ if (atmel_pri.Features & 0x04) /* program suspend supported */
+ features |= (1<<2);
+ if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
+ features |= (1<<9);
+ if (atmel_pri.Features & 0x20) /* page mode read supported */
+ features |= (1<<7);
+ if (atmel_pri.Features & 0x40) /* queued erase supported */
+ features |= (1<<4);
+ if (atmel_pri.Features & 0x80) /* Protection bits supported */
+ features |= (1<<6);
+
+ extp->FeatureSupport = features;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
+}
+
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
/* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
@@ -227,13 +269,20 @@ static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
/*
* Some chips power-up with all sectors locked by default.
*/
-static void fixup_use_powerup_lock(struct mtd_info *mtd, void *param)
+static void fixup_unlock_powerup_lock(struct mtd_info *mtd, void *param)
{
- printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
- mtd->flags |= MTD_STUPID_LOCK;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
+
+ if (cfip->FeatureSupport&32) {
+ printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
+ mtd->flags |= MTD_POWERUP_LOCK;
+ }
}
static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
#endif
@@ -245,7 +294,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
#endif
{ CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
{ CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
- { MANUFACTURER_INTEL, 0x891c, fixup_use_powerup_lock, NULL, },
+ { MANUFACTURER_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock, NULL, },
{ 0, 0, NULL, NULL }
};
@@ -277,7 +326,7 @@ read_pri_intelext(struct map_info *map, __u16 adr)
return NULL;
if (extp->MajorVersion != '1' ||
- (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
+ (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
"version %c.%c.\n", extp->MajorVersion,
extp->MinorVersion);
@@ -752,6 +801,7 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
{
int ret;
+ DECLARE_WAITQUEUE(wait, current);
retry:
if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING
@@ -808,6 +858,20 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
spin_unlock(contender->mutex);
}
+ /* Check if we already have suspended erase
+ * on this chip. Sleep. */
+ if (mode == FL_ERASING && shared->erasing
+ && shared->erasing->oldstate == FL_ERASING) {
+ spin_unlock(&shared->lock);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ spin_lock(chip->mutex);
+ goto retry;
+ }
+
/* We now own it */
shared->writing = chip;
if (mode == FL_ERASING)
@@ -2294,7 +2358,7 @@ static int cfi_intelext_suspend(struct mtd_info *mtd)
struct flchip *chip;
int ret = 0;
- if ((mtd->flags & MTD_STUPID_LOCK)
+ if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_save_locks(mtd);
@@ -2405,7 +2469,7 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
spin_unlock(chip->mutex);
}
- if ((mtd->flags & MTD_STUPID_LOCK)
+ if ((mtd->flags & MTD_POWERUP_LOCK)
&& extp && (extp->FeatureSupport & (1 << 5)))
cfi_intelext_restore_locks(mtd);
}
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 389acc600f5..d072e87ce4e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -185,6 +185,10 @@ static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
extp->TopBottom = 2;
else
extp->TopBottom = 3;
+
+ /* burst write mode not supported */
+ cfi->cfiq->BufWriteTimeoutTyp = 0;
+ cfi->cfiq->BufWriteTimeoutMax = 0;
}
static void fixup_use_secsi(struct mtd_info *mtd, void *param)
@@ -213,10 +217,11 @@ static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
{
mtd->lock = cfi_atmel_lock;
mtd->unlock = cfi_atmel_unlock;
- mtd->flags |= MTD_STUPID_LOCK;
+ mtd->flags |= MTD_POWERUP_LOCK;
}
static struct cfi_fixup cfi_fixup_table[] = {
+ { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
#ifdef AMD_BOOTLOC_BUG
{ CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
#endif
@@ -229,7 +234,6 @@ static struct cfi_fixup cfi_fixup_table[] = {
#if !FORCE_WORD_WRITE
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
#endif
- { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
{ 0, 0, NULL, NULL }
};
static struct cfi_fixup jedec_fixup_table[] = {
@@ -338,10 +342,12 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
/* Modify the unlock address if we are in compatibility mode */
if ( /* x16 in x8 mode */
((cfi->device_type == CFI_DEVICETYPE_X8) &&
- (cfi->cfiq->InterfaceDesc == 2)) ||
+ (cfi->cfiq->InterfaceDesc ==
+ CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
/* x32 in x16 mode */
((cfi->device_type == CFI_DEVICETYPE_X16) &&
- (cfi->cfiq->InterfaceDesc == 4)))
+ (cfi->cfiq->InterfaceDesc ==
+ CFI_INTERFACE_X16_BY_X32_ASYNC)))
{
cfi->addr_unlock1 = 0xaaa;
cfi->addr_unlock2 = 0x555;
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c
index 60e11a0ada9..f651b6ef1c5 100644
--- a/drivers/mtd/chips/cfi_probe.c
+++ b/drivers/mtd/chips/cfi_probe.c
@@ -370,27 +370,27 @@ static void print_cfi_ident(struct cfi_ident *cfip)
printk("Device size: 0x%X bytes (%d MiB)\n", 1 << cfip->DevSize, 1<< (cfip->DevSize - 20));
printk("Flash Device Interface description: 0x%4.4X\n", cfip->InterfaceDesc);
switch(cfip->InterfaceDesc) {
- case 0:
+ case CFI_INTERFACE_X8_ASYNC:
printk(" - x8-only asynchronous interface\n");
break;
- case 1:
+ case CFI_INTERFACE_X16_ASYNC:
printk(" - x16-only asynchronous interface\n");
break;
- case 2:
+ case CFI_INTERFACE_X8_BY_X16_ASYNC:
printk(" - supports x8 and x16 via BYTE# with asynchronous interface\n");
break;
- case 3:
+ case CFI_INTERFACE_X32_ASYNC:
printk(" - x32-only asynchronous interface\n");
break;
- case 4:
+ case CFI_INTERFACE_X16_BY_X32_ASYNC:
printk(" - supports x16 and x32 via Word# with asynchronous interface\n");
break;
- case 65535:
+ case CFI_INTERFACE_NOT_ALLOWED:
printk(" - Not Allowed / Reserved\n");
break;
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index a67b23b87fc..4be51a86a85 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -194,8 +194,8 @@ enum uaddr {
struct unlock_addr {
- u32 addr1;
- u32 addr2;
+ uint32_t addr1;
+ uint32_t addr2;
};
@@ -246,16 +246,16 @@ static const struct unlock_addr unlock_addrs[] = {
}
};
-
struct amd_flash_info {
- const __u16 mfr_id;
- const __u16 dev_id;
const char *name;
- const int DevSize;
- const int NumEraseRegions;
- const int CmdSet;
- const __u8 uaddr[4]; /* unlock addrs for 8, 16, 32, 64 */
- const ulong regions[6];
+ const uint16_t mfr_id;
+ const uint16_t dev_id;
+ const uint8_t dev_size;
+ const uint8_t nr_regions;
+ const uint16_t cmd_set;
+ const uint32_t regions[6];
+ const uint8_t devtypes; /* Bitmask for x8, x16 etc. */
+ const uint8_t uaddr; /* unlock addrs for 8, 16, 32, 64 */
};
#define ERASEINFO(size,blocks) (size<<8)|(blocks-1)
@@ -280,12 +280,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F032B,
.name = "AMD AM29F032B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,64)
}
@@ -293,13 +292,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV160DT,
.name = "AMD AM29LV160DT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -310,13 +307,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV160DB,
.name = "AMD AM29LV160DB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -327,13 +322,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV400BB,
.name = "AMD AM29LV400BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -344,13 +337,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV400BT,
.name = "AMD AM29LV400BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -361,13 +352,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV800BB,
.name = "AMD AM29LV800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -379,13 +368,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29DL800BB,
.name = "AMD AM29DL800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x08000,1),
@@ -398,13 +385,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29DL800BT,
.name = "AMD AM29DL800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 6,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 6,
.regions = {
ERASEINFO(0x10000,14),
ERASEINFO(0x04000,1),
@@ -417,13 +402,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F800BB,
.name = "AMD AM29F800BB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -434,13 +417,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV800BT,
.name = "AMD AM29LV800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -451,13 +432,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F800BT,
.name = "AMD AM29F800BT",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -468,12 +447,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F017D,
.name = "AMD AM29F017D",
- .uaddr = {
- [0] = MTD_UADDR_DONT_CARE /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -481,12 +459,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F016D,
.name = "AMD AM29F016D",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -494,12 +471,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F080,
.name = "AMD AM29F080",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -507,12 +483,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F040,
.name = "AMD AM29F040",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -520,12 +495,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29LV040B,
.name = "AMD AM29LV040B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -533,12 +507,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_AMD,
.dev_id = AM29F002T,
.name = "AMD AM29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -549,12 +522,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV512,
.name = "Atmel AT49BV512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,1)
}
@@ -562,12 +534,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT29LV512,
.name = "Atmel AT29LV512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x80,256),
ERASEINFO(0x80,256)
@@ -576,13 +547,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV16X,
.name = "Atmel AT49BV16X",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,31)
@@ -591,13 +560,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV16XT,
.name = "Atmel AT49BV16XT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x02000,8)
@@ -606,13 +573,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV32X,
.name = "Atmel AT49BV32X",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -621,13 +586,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ATMEL,
.dev_id = AT49BV32XT,
.name = "Atmel AT49BV32XT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x0AAA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x0AAA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x0AAA, /* ???? */
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -636,12 +599,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29F040C,
.name = "Fujitsu MBM29F040C",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8)
}
@@ -649,13 +611,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29F800BA,
.name = "Fujitsu MBM29F800BA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -666,12 +626,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV650UE,
.name = "Fujitsu MBM29LV650UE",
- .uaddr = {
- [0] = MTD_UADDR_DONT_CARE /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_DONT_CARE,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,128)
}
@@ -679,13 +638,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV320TE,
.name = "Fujitsu MBM29LV320TE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -694,13 +651,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV320BE,
.name = "Fujitsu MBM29LV320BE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -709,13 +664,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV160TE,
.name = "Fujitsu MBM29LV160TE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -726,13 +679,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV160BE,
.name = "Fujitsu MBM29LV160BE",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -743,13 +694,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV800BA,
.name = "Fujitsu MBM29LV800BA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -760,13 +709,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV800TA,
.name = "Fujitsu MBM29LV800TA",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -777,13 +724,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV400BC,
.name = "Fujitsu MBM29LV400BC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -794,13 +739,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_FUJITSU,
.dev_id = MBM29LV400TC,
.name = "Fujitsu MBM29LV400TC",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -811,12 +754,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_HYUNDAI,
.dev_id = HY29F002T,
.name = "Hyundai HY29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -827,12 +769,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3B,
.name = "Intel 28F004B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
@@ -841,12 +782,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F004B3T,
.name = "Intel 28F004B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
@@ -855,13 +795,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F400B3B,
.name = "Intel 28F400B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 7),
@@ -870,13 +808,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F400B3T,
.name = "Intel 28F400B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 7),
ERASEINFO(0x02000, 8),
@@ -885,12 +821,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008B3B,
.name = "Intel 28F008B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
@@ -899,12 +834,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008B3T,
.name = "Intel 28F008B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
@@ -913,12 +847,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008S5,
.name = "Intel 28F008S5",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -926,12 +859,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016S5,
.name = "Intel 28F016S5",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -939,12 +871,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F008SA,
.name = "Intel 28F008SA",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 16),
}
@@ -952,12 +883,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F800B3B,
.name = "Intel 28F800B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 15),
@@ -966,12 +896,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F800B3T,
.name = "Intel 28F800B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 15),
ERASEINFO(0x02000, 8),
@@ -980,12 +909,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016B3B,
.name = "Intel 28F016B3B",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
@@ -994,12 +922,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016S3,
.name = "Intel I28F016S3",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000, 32),
}
@@ -1007,12 +934,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F016B3T,
.name = "Intel 28F016B3T",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
@@ -1021,12 +947,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F160B3B,
.name = "Intel 28F160B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 31),
@@ -1035,12 +960,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F160B3T,
.name = "Intel 28F160B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 31),
ERASEINFO(0x02000, 8),
@@ -1049,12 +973,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F320B3B,
.name = "Intel 28F320B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 63),
@@ -1063,12 +986,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F320B3T,
.name = "Intel 28F320B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 63),
ERASEINFO(0x02000, 8),
@@ -1077,12 +999,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F640B3B,
.name = "Intel 28F640B3B",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000, 8),
ERASEINFO(0x10000, 127),
@@ -1091,12 +1012,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I28F640B3T,
.name = "Intel 28F640B3T",
- .uaddr = {
- [1] = MTD_UADDR_UNNECESSARY, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000, 127),
ERASEINFO(0x02000, 8),
@@ -1105,12 +1025,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I82802AB,
.name = "Intel 82802AB",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1118,12 +1037,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_INTEL,
.dev_id = I82802AC,
.name = "Intel 82802AC",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1131,12 +1049,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV040C,
.name = "Macronix MX29LV040C",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1144,13 +1061,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV160T,
.name = "MXIC MX29LV160T",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1161,13 +1076,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_NEC,
.dev_id = UPD29F064115,
.name = "NEC uPD29F064115",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 3,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 3,
.regions = {
ERASEINFO(0x2000,8),
ERASEINFO(0x10000,126),
@@ -1177,13 +1090,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29LV160B,
.name = "MXIC MX29LV160B",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1194,12 +1105,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F040,
.name = "Macronix MX29F040",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1207,12 +1117,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F016,
.name = "Macronix MX29F016",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -1220,12 +1129,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F004T,
.name = "Macronix MX29F004T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,7),
ERASEINFO(0x08000,1),
@@ -1236,12 +1144,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F004B,
.name = "Macronix MX29F004B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1252,12 +1159,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_MACRONIX,
.dev_id = MX29F002T,
.name = "Macronix MX29F002T",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,3),
ERASEINFO(0x08000,1),
@@ -1268,12 +1174,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL002,
.name = "PMC Pm49FL002",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 64 )
}
@@ -1281,12 +1186,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL004,
.name = "PMC Pm49FL004",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 128 )
}
@@ -1294,12 +1198,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_PMC,
.dev_id = PM49FL008,
.name = "PMC Pm49FL008",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO( 0x01000, 256 )
}
@@ -1307,25 +1210,23 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SHARP,
.dev_id = LH28F640BF,
.name = "LH28F640BF",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_INTEL_STD,
- .NumEraseRegions= 1,
- .regions = {
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_INTEL_STD,
+ .nr_regions = 1,
+ .regions = {
ERASEINFO(0x40000,16),
}
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF512,
.name = "SST 39LF512",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_64KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_64KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,16),
}
@@ -1333,12 +1234,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF010,
.name = "SST 39LF010",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_128KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
@@ -1346,36 +1246,33 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST29EE020,
.name = "SST 29EE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- .regions = {ERASEINFO(0x01000,64),
- }
- }, {
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
+ }, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST29LE020,
.name = "SST 29LE020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_SST_PAGE,
- .NumEraseRegions= 1,
- .regions = {ERASEINFO(0x01000,64),
- }
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_SST_PAGE,
+ .nr_regions = 1,
+ .regions = {ERASEINFO(0x01000,64),
+ }
}, {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF020,
.name = "SST 39LF020",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
@@ -1383,12 +1280,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39LF040,
.name = "SST 39LF040",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1396,12 +1292,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39SF010A,
.name = "SST 39SF010A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_128KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_128KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,32),
}
@@ -1409,26 +1304,24 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST39SF020A,
.name = "SST 39SF020A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,64),
}
}, {
.mfr_id = MANUFACTURER_SST,
- .dev_id = SST49LF040B,
- .name = "SST 49LF040B",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
- .regions = {
+ .dev_id = SST49LF040B,
+ .name = "SST 49LF040B",
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
+ .regions = {
ERASEINFO(0x01000,128),
}
}, {
@@ -1436,12 +1329,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF004B,
.name = "SST 49LF004B",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1449,12 +1341,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF008A,
.name = "SST 49LF008A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
@@ -1462,12 +1353,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF030A,
.name = "SST 49LF030A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,96),
}
@@ -1475,12 +1365,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF040A,
.name = "SST 49LF040A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,128),
}
@@ -1488,57 +1377,49 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_SST,
.dev_id = SST49LF080A,
.name = "SST 49LF080A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x01000,256),
}
}, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
- .dev_id = SST39LF160,
- .name = "SST 39LF160",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
- .regions = {
- ERASEINFO(0x1000,256),
- ERASEINFO(0x1000,256)
- }
- }, {
- .mfr_id = MANUFACTURER_SST, /* should be CFI */
- .dev_id = SST39VF1601,
- .name = "SST 39VF1601",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
- .regions = {
- ERASEINFO(0x1000,256),
- ERASEINFO(0x1000,256)
- }
-
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39LF160,
+ .name = "SST 39LF160",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
+ }, {
+ .mfr_id = MANUFACTURER_SST, /* should be CFI */
+ .dev_id = SST39VF1601,
+ .name = "SST 39VF1601",
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
+ .regions = {
+ ERASEINFO(0x1000,256),
+ ERASEINFO(0x1000,256)
+ }
}, {
.mfr_id = MANUFACTURER_ST,
.dev_id = M29F800AB,
.name = "ST M29F800AB",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1549,13 +1430,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W800DT,
.name = "ST M29W800DT",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,15),
ERASEINFO(0x08000,1),
@@ -1566,13 +1445,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W800DB,
.name = "ST M29W800DB",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA, /* x8 */
- [1] = MTD_UADDR_0x5555_0x2AAA /* x16 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA, /* ???? */
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1583,13 +1460,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W160DT,
.name = "ST M29W160DT",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1600,13 +1475,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST, /* FIXME - CFI device? */
.dev_id = M29W160DB,
.name = "ST M29W160DB",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA, /* ???? */
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1617,12 +1490,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M29W040B,
.name = "ST M29W040B",
- .uaddr = {
- [0] = MTD_UADDR_0x0555_0x02AA /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0555_0x02AA,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1630,12 +1502,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW040,
.name = "ST M50FW040",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_512KiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_512KiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,8),
}
@@ -1643,12 +1514,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW080,
.name = "ST M50FW080",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1656,12 +1526,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50FW016,
.name = "ST M50FW016",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,32),
}
@@ -1669,12 +1538,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_ST,
.dev_id = M50LPW080,
.name = "ST M50LPW080",
- .uaddr = {
- [0] = MTD_UADDR_UNNECESSARY, /* x8 */
- },
- .DevSize = SIZE_1MiB,
- .CmdSet = P_ID_INTEL_EXT,
- .NumEraseRegions= 1,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_UNNECESSARY,
+ .dev_size = SIZE_1MiB,
+ .cmd_set = P_ID_INTEL_EXT,
+ .nr_regions = 1,
.regions = {
ERASEINFO(0x10000,16),
}
@@ -1682,13 +1550,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT160,
.name = "Toshiba TC58FVT160",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000,31),
ERASEINFO(0x08000,1),
@@ -1699,13 +1565,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB160,
.name = "Toshiba TC58FVB160",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_2MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_2MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x04000,1),
ERASEINFO(0x02000,2),
@@ -1716,13 +1580,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB321,
.name = "Toshiba TC58FVB321",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,63)
@@ -1731,13 +1593,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT321,
.name = "Toshiba TC58FVT321",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA /* x16 */
- },
- .DevSize = SIZE_4MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_4MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,63),
ERASEINFO(0x02000,8)
@@ -1746,13 +1606,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVB641,
.name = "Toshiba TC58FVB641",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x02000,8),
ERASEINFO(0x10000,127)
@@ -1761,13 +1619,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_TOSHIBA,
.dev_id = TC58FVT641,
.name = "Toshiba TC58FVT641",
- .uaddr = {
- [0] = MTD_UADDR_0x0AAA_0x0555, /* x8 */
- [1] = MTD_UADDR_0x0555_0x02AA, /* x16 */
- },
- .DevSize = SIZE_8MiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 2,
+ .devtypes = CFI_DEVICETYPE_X16|CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x0AAA_0x0555,
+ .dev_size = SIZE_8MiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 2,
.regions = {
ERASEINFO(0x10000,127),
ERASEINFO(0x02000,8)
@@ -1776,12 +1632,11 @@ static const struct amd_flash_info jedec_table[] = {
.mfr_id = MANUFACTURER_WINBOND,
.dev_id = W49V002A,
.name = "Winbond W49V002A",
- .uaddr = {
- [0] = MTD_UADDR_0x5555_0x2AAA /* x8 */
- },
- .DevSize = SIZE_256KiB,
- .CmdSet = P_ID_AMD_STD,
- .NumEraseRegions= 4,
+ .devtypes = CFI_DEVICETYPE_X8,
+ .uaddr = MTD_UADDR_0x5555_0x2AAA,
+ .dev_size = SIZE_256KiB,
+ .cmd_set = P_ID_AMD_STD,
+ .nr_regions = 4,
.regions = {
ERASEINFO(0x10000, 3),
ERASEINFO(0x08000, 1),
@@ -1791,15 +1646,7 @@ static const struct amd_flash_info jedec_table[] = {
}
};
-
-static int cfi_jedec_setup(struct cfi_private *p_cfi, int index);
-
-static int jedec_probe_chip(struct map_info *map, __u32 base,
- unsigned long *chip_map, struct cfi_private *cfi);
-
-static struct mtd_info *jedec_probe(struct map_info *map);
-
-static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
+static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
@@ -1810,7 +1657,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, __u32 base,
return result.x[0] & mask;
}
-static inline u32 jedec_read_id(struct map_info *map, __u32 base,
+static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
struct cfi_private *cfi)
{
map_word result;
@@ -1821,8 +1668,7 @@ static inline u32 jedec_read_id(struct map_info *map, __u32 base,
return result.x[0] & mask;
}
-static inline void jedec_reset(u32 base, struct map_info *map,
- struct cfi_private *cfi)
+static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
{
/* Reset */
@@ -1832,7 +1678,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
* 0x2aaa, 0xF0 at 0x5555 this will not affect the AMD chips
* as they will ignore the writes and dont care what address
* the F0 is written to */
- if(cfi->addr_unlock1) {
+ if (cfi->addr_unlock1) {
DEBUG( MTD_DEBUG_LEVEL3,
"reset unlock called %x %x \n",
cfi->addr_unlock1,cfi->addr_unlock2);
@@ -1841,7 +1687,7 @@ static inline void jedec_reset(u32 base, struct map_info *map,
}
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
- /* Some misdesigned intel chips do not respond for 0xF0 for a reset,
+ /* Some misdesigned Intel chips do not respond for 0xF0 for a reset,
* so ensure we're in read mode. Send both the Intel and the AMD command
* for this. Intel uses 0xff for this, AMD uses 0xff for NOP, so
* this should be safe.
@@ -1851,42 +1697,20 @@ static inline void jedec_reset(u32 base, struct map_info *map,
}
-static inline __u8 finfo_uaddr(const struct amd_flash_info *finfo, int device_type)
-{
- int uaddr_idx;
- __u8 uaddr = MTD_UADDR_NOT_SUPPORTED;
-
- switch ( device_type ) {
- case CFI_DEVICETYPE_X8: uaddr_idx = 0; break;
- case CFI_DEVICETYPE_X16: uaddr_idx = 1; break;
- case CFI_DEVICETYPE_X32: uaddr_idx = 2; break;
- default:
- printk(KERN_NOTICE "MTD: %s(): unknown device_type %d\n",
- __func__, device_type);
- goto uaddr_done;
- }
-
- uaddr = finfo->uaddr[uaddr_idx];
-
- if (uaddr != MTD_UADDR_NOT_SUPPORTED ) {
- /* ASSERT("The unlock addresses for non-8-bit mode
- are bollocks. We don't really need an array."); */
- uaddr = finfo->uaddr[0];
- }
-
- uaddr_done:
- return uaddr;
-}
-
-
static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
{
int i,num_erase_regions;
- __u8 uaddr;
+ uint8_t uaddr;
- printk("Found: %s\n",jedec_table[index].name);
+ if (! (jedec_table[index].devtypes & p_cfi->device_type)) {
+ DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
+ jedec_table[index].name, 4 * (1<<p_cfi->device_type));
+ return 0;
+ }
+
+ printk(KERN_INFO "Found: %s\n",jedec_table[index].name);
- num_erase_regions = jedec_table[index].NumEraseRegions;
+ num_erase_regions = jedec_table[index].nr_regions;
p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
if (!p_cfi->cfiq) {
@@ -1896,9 +1720,9 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
memset(p_cfi->cfiq,0,sizeof(struct cfi_ident));
- p_cfi->cfiq->P_ID = jedec_table[index].CmdSet;
- p_cfi->cfiq->NumEraseRegions = jedec_table[index].NumEraseRegions;
- p_cfi->cfiq->DevSize = jedec_table[index].DevSize;
+ p_cfi->cfiq->P_ID = jedec_table[index].cmd_set;
+ p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
+ p_cfi->cfiq->DevSize = jedec_table[index].dev_size;
p_cfi->cfi_mode = CFI_MODE_JEDEC;
for (i=0; i<num_erase_regions; i++){
@@ -1910,14 +1734,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
p_cfi->mfr = jedec_table[index].mfr_id;
p_cfi->id = jedec_table[index].dev_id;
- uaddr = finfo_uaddr(&jedec_table[index], p_cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
- kfree( p_cfi->cfiq );
- return 0;
- }
+ uaddr = jedec_table[index].uaddr;
- p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1;
- p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2;
+ /* The table has unlock addresses in _bytes_, and we try not to let
+ our brains explode when we see the datasheets talking about address
+ lines numbered from A-1 to A18. The CFI table has unlock addresses
+ in device-words according to the mode the device is connected in */
+ p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type;
+ p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type;
return 1; /* ok */
}
@@ -1930,14 +1754,14 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
* be perfect - consequently there should be some module parameters that
* could be manually specified to force the chip info.
*/
-static inline int jedec_match( __u32 base,
+static inline int jedec_match( uint32_t base,
struct map_info *map,
struct cfi_private *cfi,
const struct amd_flash_info *finfo )
{
int rc = 0; /* failure until all tests pass */
u32 mfr, id;
- __u8 uaddr;
+ uint8_t uaddr;
/*
* The IDs must match. For X16 and X32 devices operating in
@@ -1950,8 +1774,8 @@ static inline int jedec_match( __u32 base,
*/
switch (cfi->device_type) {
case CFI_DEVICETYPE_X8:
- mfr = (__u8)finfo->mfr_id;
- id = (__u8)finfo->dev_id;
+ mfr = (uint8_t)finfo->mfr_id;
+ id = (uint8_t)finfo->dev_id;
/* bjd: it seems that if we do this, we can end up
* detecting 16bit flashes as an 8bit device, even though
@@ -1964,12 +1788,12 @@ static inline int jedec_match( __u32 base,
}
break;
case CFI_DEVICETYPE_X16:
- mfr = (__u16)finfo->mfr_id;
- id = (__u16)finfo->dev_id;
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint16_t)finfo->dev_id;
break;
case CFI_DEVICETYPE_X32:
- mfr = (__u16)finfo->mfr_id;
- id = (__u32)finfo->dev_id;
+ mfr = (uint16_t)finfo->mfr_id;
+ id = (uint32_t)finfo->dev_id;
break;
default:
printk(KERN_WARNING
@@ -1984,25 +1808,25 @@ static inline int jedec_match( __u32 base,
/* the part size must fit in the memory window */
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): Check fit 0x%.8x + 0x%.8x = 0x%.8x\n",
- __func__, base, 1 << finfo->DevSize, base + (1 << finfo->DevSize) );
- if ( base + cfi_interleave(cfi) * ( 1 << finfo->DevSize ) > map->size ) {
+ __func__, base, 1 << finfo->dev_size, base + (1 << finfo->dev_size) );
+ if ( base + cfi_interleave(cfi) * ( 1 << finfo->dev_size ) > map->size ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): 0x%.4x 0x%.4x %dKiB doesn't fit\n",
__func__, finfo->mfr_id, finfo->dev_id,
- 1 << finfo->DevSize );
+ 1 << finfo->dev_size );
goto match_done;
}
- uaddr = finfo_uaddr(finfo, cfi->device_type);
- if ( uaddr == MTD_UADDR_NOT_SUPPORTED ) {
+ if (! (finfo->devtypes & cfi->device_type))
goto match_done;
- }
+
+ uaddr = finfo->uaddr;
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): check unlock addrs 0x%.4x 0x%.4x\n",
__func__, cfi->addr_unlock1, cfi->addr_unlock2 );
if ( MTD_UADDR_UNNECESSARY != uaddr && MTD_UADDR_DONT_CARE != uaddr
- && ( unlock_addrs[uaddr].addr1 != cfi->addr_unlock1 ||
- unlock_addrs[uaddr].addr2 != cfi->addr_unlock2 ) ) {
+ && ( unlock_addrs[uaddr].addr1 / cfi->device_type != cfi->addr_unlock1 ||
+ unlock_addrs[uaddr].addr2 / cfi->device_type != cfi->addr_unlock2 ) ) {
DEBUG( MTD_DEBUG_LEVEL3,
"MTD %s(): 0x%.4x 0x%.4x did not match\n",
__func__,
@@ -2042,7 +1866,7 @@ static inline int jedec_match( __u32 base,
* were truly frobbing a real device.
*/
DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): return to ID mode\n", __func__ );
- if(cfi->addr_unlock1) {
+ if (cfi->addr_unlock1) {
cfi_send_gen_cmd(0xaa, cfi->addr_unlock1, base, map, cfi, cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, base, map, cfi, cfi->device_type, NULL);
}
@@ -2068,8 +1892,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
if (MTD_UADDR_UNNECESSARY == uaddr_idx)
return 0;
- cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1;
- cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2;
+ cfi->addr_unlock1 = unlock_addrs[uaddr_idx].addr1 / cfi->device_type;
+ cfi->addr_unlock2 = unlock_addrs[uaddr_idx].addr2 / cfi->device_type;
}
/* Make certain we aren't probing past the end of map */
@@ -2081,19 +1905,11 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
}
/* Ensure the unlock addresses we try stay inside the map */
- probe_offset1 = cfi_build_cmd_addr(
- cfi->addr_unlock1,
- cfi_interleave(cfi),
- cfi->device_type);
- probe_offset2 = cfi_build_cmd_addr(
- cfi->addr_unlock1,
- cfi_interleave(cfi),
- cfi->device_type);
+ probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
+ probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
((base + probe_offset2 + map_bankwidth(map)) >= map->size))
- {
goto retry;
- }
/* Reset */
jedec_reset(base, map, cfi);
@@ -2128,8 +1944,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
}
goto retry;
} else {
- __u16 mfr;
- __u16 id;
+ uint16_t mfr;
+ uint16_t id;
/* Make sure it is a chip of the same manufacturer and id */
mfr = jedec_read_mfr(map, base, cfi);
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index 23fab14f163..b44292abd9f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -9,7 +9,7 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * <partdef> := <size>[@offset][<name>][ro]
+ * <partdef> := <size>[@offset][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
* <name> := '(' NAME ')'
@@ -143,6 +143,13 @@ static struct mtd_partition * newpart(char *s,
s += 2;
}
+ /* if lk is found do NOT unlock the MTD partition*/
+ if (strncmp(s, "lk", 2) == 0)
+ {
+ mask_flags |= MTD_POWERUP_LOCK;
+ s += 2;
+ }
+
/* test if more partitions are following */
if (*s == ',')
{
diff --git a/drivers/mtd/devices/lart.c b/drivers/mtd/devices/lart.c
index 4ea50a1dda8..99fd210feae 100644
--- a/drivers/mtd/devices/lart.c
+++ b/drivers/mtd/devices/lart.c
@@ -323,7 +323,7 @@ static int flash_probe (void)
/* put the flash back into command mode */
write32 (DATA_TO_FLASH (READ_ARRAY),0x00000000);
- return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || FLASH_DEVICE_16mbit_BOTTOM));
+ return (manufacturer == FLASH_MANUFACTURER && (devtype == FLASH_DEVICE_16mbit_TOP || devtype == FLASH_DEVICE_16mbit_BOTTOM));
}
/*
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a5ed6d232c3..b35e4813a3a 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -420,7 +420,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
status = dataflash_waitready(priv->spi);
/* Check result of the compare operation */
- if ((status & (1 << 6)) == 1) {
+ if (status & (1 << 6)) {
printk(KERN_ERR "%s: compare page %u, err %d\n",
spi->dev.bus_id, pageaddr, status);
remaining = 0;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index a592fc04cf7..12c253664eb 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -110,13 +110,6 @@ config MTD_SUN_UFLASH
Sun Microsystems boardsets. This driver will require CFI support
in the kernel, so if you did not enable CFI previously, do that now.
-config MTD_PNC2000
- tristate "CFI Flash device mapped on Photron PNC-2000"
- depends on X86 && MTD_CFI && MTD_PARTITIONS
- help
- PNC-2000 is the name of Network Camera product from PHOTRON
- Ltd. in Japan. It uses CFI-compliant flash.
-
config MTD_SC520CDP
tristate "CFI Flash device mapped on AMD SC520 CDP"
depends on X86 && MTD_CFI && MTD_CONCAT
@@ -576,7 +569,7 @@ config MTD_BAST_MAXSIZE
default "4"
config MTD_SHARP_SL
- bool "ROM mapped on Sharp SL Series"
+ tristate "ROM mapped on Sharp SL Series"
depends on ARCH_PXA
help
This enables access to the flash chip on the Sharp SL Series of PDAs.
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 316382a1401..a9cbe80f99a 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -28,7 +28,6 @@ obj-$(CONFIG_MTD_PHYSMAP) += physmap.o
obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o
obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o
obj-$(CONFIG_MTD_PMC_MSP_RAMROOT)+= pmcmsp-ramroot.o
-obj-$(CONFIG_MTD_PNC2000) += pnc2000.o
obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o
obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o
obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index aeed9ea7971..49acd417189 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -80,64 +80,6 @@ static int parse_obsolete_partitions(struct of_device *dev,
return nr_parts;
}
-
-static int __devinit parse_partitions(struct of_flash *info,
- struct of_device *dev)
-{
- const char *partname;
- static const char *part_probe_types[]
- = { "cmdlinepart", "RedBoot", NULL };
- struct device_node *dp = dev->node, *pp;
- int nr_parts, i;
-
- /* First look for RedBoot table or partitions on the command
- * line, these take precedence over device tree information */
- nr_parts = parse_mtd_partitions(info->mtd, part_probe_types,
- &info->parts, 0);
- if (nr_parts > 0) {
- add_mtd_partitions(info->mtd, info->parts, nr_parts);
- return 0;
- }
-
- /* First count the subnodes */
- nr_parts = 0;
- for (pp = dp->child; pp; pp = pp->sibling)
- nr_parts++;
-
- if (nr_parts == 0)
- return parse_obsolete_partitions(dev, info, dp);
-
- info->parts = kzalloc(nr_parts * sizeof(*info->parts),
- GFP_KERNEL);
- if (!info->parts)
- return -ENOMEM;
-
- for (pp = dp->child, i = 0; pp; pp = pp->sibling, i++) {
- const u32 *reg;
- int len;
-
- reg = of_get_property(pp, "reg", &len);
- if (!reg || (len != 2*sizeof(u32))) {
- dev_err(&dev->dev, "Invalid 'reg' on %s\n",
- dp->full_name);
- kfree(info->parts);
- info->parts = NULL;
- return -EINVAL;
- }
- info->parts[i].offset = reg[0];
- info->parts[i].size = reg[1];
-
- partname = of_get_property(pp, "label", &len);
- if (!partname)
- partname = of_get_property(pp, "name", &len);
- info->parts[i].name = (char *)partname;
-
- if (of_get_property(pp, "read-only", &len))
- info->parts[i].mask_flags = MTD_WRITEABLE;
- }
-
- return nr_parts;
-}
#else /* MTD_PARTITIONS */
#define OF_FLASH_PARTS(info) (0)
#define parse_partitions(info, dev) (0)
@@ -212,6 +154,10 @@ static struct mtd_info * __devinit obsolete_probe(struct of_device *dev,
static int __devinit of_flash_probe(struct of_device *dev,
const struct of_device_id *match)
{
+#ifdef CONFIG_MTD_PARTITIONS
+ static const char *part_probe_types[]
+ = { "cmdlinepart", "RedBoot", NULL };
+#endif
struct device_node *dp = dev->node;
struct resource res;
struct of_flash *info;
@@ -274,13 +220,33 @@ static int __devinit of_flash_probe(struct of_device *dev,
}
info->mtd->owner = THIS_MODULE;
- err = parse_partitions(info, dev);
+#ifdef CONFIG_MTD_PARTITIONS
+ /* First look for RedBoot table or partitions on the command
+ * line, these take precedence over device tree information */
+ err = parse_mtd_partitions(info->mtd, part_probe_types,
+ &info->parts, 0);
if (err < 0)
- goto err_out;
+ return err;
+
+#ifdef CONFIG_MTD_OF_PARTS
+ if (err == 0) {
+ err = of_mtd_parse_partitions(&dev->dev, info->mtd,
+ dp, &info->parts);
+ if (err < 0)
+ return err;
+ }
+#endif
+
+ if (err == 0) {
+ err = parse_obsolete_partitions(dev, info, dp);
+ if (err < 0)
+ return err;
+ }
if (err > 0)
- add_mtd_partitions(info->mtd, OF_FLASH_PARTS(info), err);
+ add_mtd_partitions(info->mtd, info->parts, err);
else
+#endif
add_mtd_device(info->mtd);
return 0;
diff --git a/drivers/mtd/maps/pnc2000.c b/drivers/mtd/maps/pnc2000.c
deleted file mode 100644
index d7e16c2d5c4..00000000000
--- a/drivers/mtd/maps/pnc2000.c
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * pnc2000.c - mapper for Photron PNC-2000 board.
- *
- * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
- *
- * This code is GPL
- *
- * $Id: pnc2000.c,v 1.18 2005/11/07 11:14:28 gleixner Exp $
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-
-#define WINDOW_ADDR 0xbf000000
-#define WINDOW_SIZE 0x00400000
-
-/*
- * MAP DRIVER STUFF
- */
-
-
-static struct map_info pnc_map = {
- .name = "PNC-2000",
- .size = WINDOW_SIZE,
- .bankwidth = 4,
- .phys = 0xFFFFFFFF,
- .virt = (void __iomem *)WINDOW_ADDR,
-};
-
-
-/*
- * MTD 'PARTITIONING' STUFF
- */
-static struct mtd_partition pnc_partitions[3] = {
- {
- .name = "PNC-2000 boot firmware",
- .size = 0x20000,
- .offset = 0
- },
- {
- .name = "PNC-2000 kernel",
- .size = 0x1a0000,
- .offset = 0x20000
- },
- {
- .name = "PNC-2000 filesystem",
- .size = 0x240000,
- .offset = 0x1c0000
- }
-};
-
-/*
- * This is the master MTD device for which all the others are just
- * auto-relocating aliases.
- */
-static struct mtd_info *mymtd;
-
-static int __init init_pnc2000(void)
-{
- printk(KERN_NOTICE "Photron PNC-2000 flash mapping: %x at %x\n", WINDOW_SIZE, WINDOW_ADDR);
-
- simple_map_init(&pnc_map);
-
- mymtd = do_map_probe("cfi_probe", &pnc_map);
- if (mymtd) {
- mymtd->owner = THIS_MODULE;
- return add_mtd_partitions(mymtd, pnc_partitions, 3);
- }
-
- return -ENXIO;
-}
-
-static void __exit cleanup_pnc2000(void)
-{
- if (mymtd) {
- del_mtd_partitions(mymtd);
- map_destroy(mymtd);
- }
-}
-
-module_init(init_pnc2000);
-module_exit(cleanup_pnc2000);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp>");
-MODULE_DESCRIPTION("MTD map driver for Photron PNC-2000 board");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index dcfb85840d1..0fc5584324e 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -79,7 +79,7 @@ scb2_fixup_mtd(struct mtd_info *mtd)
struct cfi_private *cfi = map->fldrv_priv;
/* barf if this doesn't look right */
- if (cfi->cfiq->InterfaceDesc != 1) {
+ if (cfi->cfiq->InterfaceDesc != CFI_INTERFACE_X16_ASYNC) {
printk(KERN_ERR MODNAME ": unsupported InterfaceDesc: %#x\n",
cfi->cfiq->InterfaceDesc);
return -1;
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 74d9d30edab..839eed8430a 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -248,9 +248,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
return -EBUSY;
}
- mutex_init(&new->lock);
list_add_tail(&new->list, &tr->devs);
added:
+ mutex_init(&new->lock);
if (!tr->writesect)
new->readonly = 1;
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index a0cee86464c..5d3ac512ce1 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -481,6 +481,7 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
{
struct mtd_oob_buf buf;
struct mtd_oob_ops ops;
+ uint32_t retlen;
if(!(file->f_mode & 2))
return -EPERM;
@@ -520,8 +521,11 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
buf.start &= ~(mtd->oobsize - 1);
ret = mtd->write_oob(mtd, buf.start, &ops);
- if (copy_to_user(argp + sizeof(uint32_t), &ops.oobretlen,
- sizeof(uint32_t)))
+ if (ops.oobretlen > 0xFFFFFFFFU)
+ ret = -EOVERFLOW;
+ retlen = ops.oobretlen;
+ if (copy_to_user(&((struct mtd_oob_buf *)argp)->length,
+ &retlen, sizeof(buf.length)))
ret = -EFAULT;
kfree(ops.oobbuf);
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 6c2645e2837..f7e7890e5bc 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -61,7 +61,7 @@ int add_mtd_device(struct mtd_info *mtd)
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_STUPID_LOCK) && mtd->unlock) {
+ && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
if (mtd->unlock(mtd, 0, mtd->size))
printk(KERN_WARNING
"%s: unlock failed, "
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index f8af627f0b9..34681bc9110 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -28,19 +28,24 @@
#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/wait.h>
+#include <linux/spinlock.h>
#include <linux/mtd/mtd.h>
#define OOPS_PAGE_SIZE 4096
-static struct mtdoops_context {
+struct mtdoops_context {
int mtd_index;
- struct work_struct work;
+ struct work_struct work_erase;
+ struct work_struct work_write;
struct mtd_info *mtd;
int oops_pages;
int nextpage;
int nextcount;
void *oops_buf;
+
+ /* writecount and disabling ready are spin lock protected */
+ spinlock_t writecount_lock;
int ready;
int writecount;
} oops_cxt;
@@ -62,10 +67,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
erase.mtd = mtd;
erase.callback = mtdoops_erase_callback;
erase.addr = offset;
- if (mtd->erasesize < OOPS_PAGE_SIZE)
- erase.len = OOPS_PAGE_SIZE;
- else
- erase.len = mtd->erasesize;
+ erase.len = mtd->erasesize;
erase.priv = (u_long)&wait_q;
set_current_state(TASK_INTERRUPTIBLE);
@@ -87,7 +89,7 @@ static int mtdoops_erase_block(struct mtd_info *mtd, int offset)
return 0;
}
-static int mtdoops_inc_counter(struct mtdoops_context *cxt)
+static void mtdoops_inc_counter(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
size_t retlen;
@@ -103,25 +105,30 @@ static int mtdoops_inc_counter(struct mtdoops_context *cxt)
ret = mtd->read(mtd, cxt->nextpage * OOPS_PAGE_SIZE, 4,
&retlen, (u_char *) &count);
- if ((retlen != 4) || (ret < 0)) {
+ if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
", err %d.\n", cxt->nextpage * OOPS_PAGE_SIZE,
retlen, ret);
- return 1;
+ schedule_work(&cxt->work_erase);
+ return;
}
/* See if we need to erase the next block */
- if (count != 0xffffffff)
- return 1;
+ if (count != 0xffffffff) {
+ schedule_work(&cxt->work_erase);
+ return;
+ }
printk(KERN_DEBUG "mtdoops: Ready %d, %d (no erase)\n",
cxt->nextpage, cxt->nextcount);
cxt->ready = 1;
- return 0;
}
-static void mtdoops_prepare(struct mtdoops_context *cxt)
+/* Scheduled work - when we can't proceed without erasing a block */
+static void mtdoops_workfunc_erase(struct work_struct *work)
{
+ struct mtdoops_context *cxt =
+ container_of(work, struct mtdoops_context, work_erase);
struct mtd_info *mtd = cxt->mtd;
int i = 0, j, ret, mod;
@@ -136,8 +143,14 @@ static void mtdoops_prepare(struct mtdoops_context *cxt)
cxt->nextpage = 0;
}
- while (mtd->block_isbad &&
- mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE)) {
+ while (mtd->block_isbad) {
+ ret = mtd->block_isbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ if (!ret)
+ break;
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: block_isbad failed, aborting.\n");
+ return;
+ }
badblock:
printk(KERN_WARNING "mtdoops: Bad block at %08x\n",
cxt->nextpage * OOPS_PAGE_SIZE);
@@ -154,34 +167,61 @@ badblock:
for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
ret = mtdoops_erase_block(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
- if (ret < 0) {
- if (mtd->block_markbad)
- mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
- goto badblock;
+ if (ret >= 0) {
+ printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
+ cxt->ready = 1;
+ return;
}
- printk(KERN_DEBUG "mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount);
-
- cxt->ready = 1;
+ if (mtd->block_markbad && (ret == -EIO)) {
+ ret = mtd->block_markbad(mtd, cxt->nextpage * OOPS_PAGE_SIZE);
+ if (ret < 0) {
+ printk(KERN_ERR "mtdoops: block_markbad failed, aborting.\n");
+ return;
+ }
+ }
+ goto badblock;
}
-static void mtdoops_workfunc(struct work_struct *work)
+static void mtdoops_workfunc_write(struct work_struct *work)
{
struct mtdoops_context *cxt =
- container_of(work, struct mtdoops_context, work);
+ container_of(work, struct mtdoops_context, work_write);
+ struct mtd_info *mtd = cxt->mtd;
+ size_t retlen;
+ int ret;
- mtdoops_prepare(cxt);
-}
+ if (cxt->writecount < OOPS_PAGE_SIZE)
+ memset(cxt->oops_buf + cxt->writecount, 0xff,
+ OOPS_PAGE_SIZE - cxt->writecount);
+
+ ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
+ OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+
+ cxt->writecount = 0;
+
+ if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
+ printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
+ cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
-static int find_next_position(struct mtdoops_context *cxt)
+ mtdoops_inc_counter(cxt);
+}
+
+static void find_next_position(struct mtdoops_context *cxt)
{
struct mtd_info *mtd = cxt->mtd;
- int page, maxpos = 0;
+ int ret, page, maxpos = 0;
u32 count, maxcount = 0xffffffff;
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
- mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+ ret = mtd->read(mtd, page * OOPS_PAGE_SIZE, 4, &retlen, (u_char *) &count);
+ if ((retlen != 4) || ((ret < 0) && (ret != -EUCLEAN))) {
+ printk(KERN_ERR "mtdoops: Read failure at %d (%td of 4 read)"
+ ", err %d.\n", page * OOPS_PAGE_SIZE, retlen, ret);
+ continue;
+ }
+
if (count == 0xffffffff)
continue;
if (maxcount == 0xffffffff) {
@@ -205,20 +245,19 @@ static int find_next_position(struct mtdoops_context *cxt)
cxt->ready = 1;
printk(KERN_DEBUG "mtdoops: Ready %d, %d (first init)\n",
cxt->nextpage, cxt->nextcount);
- return 0;
+ return;
}
cxt->nextpage = maxpos;
cxt->nextcount = maxcount;
- return mtdoops_inc_counter(cxt);
+ mtdoops_inc_counter(cxt);
}
static void mtdoops_notify_add(struct mtd_info *mtd)
{
struct mtdoops_context *cxt = &oops_cxt;
- int ret;
if ((mtd->index != cxt->mtd_index) || cxt->mtd_index < 0)
return;
@@ -229,14 +268,18 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
return;
}
+ if (mtd->erasesize < OOPS_PAGE_SIZE) {
+ printk(KERN_ERR "Eraseblock size of MTD partition %d too small\n",
+ mtd->index);
+ return;
+ }
+
cxt->mtd = mtd;
cxt->oops_pages = mtd->size / OOPS_PAGE_SIZE;
- ret = find_next_position(cxt);
- if (ret == 1)
- mtdoops_prepare(cxt);
+ find_next_position(cxt);
- printk(KERN_DEBUG "mtdoops: Attached to MTD device %d\n", mtd->index);
+ printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
}
static void mtdoops_notify_remove(struct mtd_info *mtd)
@@ -254,31 +297,24 @@ static void mtdoops_console_sync(void)
{
struct mtdoops_context *cxt = &oops_cxt;
struct mtd_info *mtd = cxt->mtd;
- size_t retlen;
- int ret;
+ unsigned long flags;
- if (!cxt->ready || !mtd)
+ if (!cxt->ready || !mtd || cxt->writecount == 0)
return;
- if (cxt->writecount == 0)
+ /*
+ * Once ready is 0 and we've held the lock no further writes to the
+ * buffer will happen
+ */
+ spin_lock_irqsave(&cxt->writecount_lock, flags);
+ if (!cxt->ready) {
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
return;
-
- if (cxt->writecount < OOPS_PAGE_SIZE)
- memset(cxt->oops_buf + cxt->writecount, 0xff,
- OOPS_PAGE_SIZE - cxt->writecount);
-
- ret = mtd->write(mtd, cxt->nextpage * OOPS_PAGE_SIZE,
- OOPS_PAGE_SIZE, &retlen, cxt->oops_buf);
+ }
cxt->ready = 0;
- cxt->writecount = 0;
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
- if ((retlen != OOPS_PAGE_SIZE) || (ret < 0))
- printk(KERN_ERR "mtdoops: Write failure at %d (%td of %d written), err %d.\n",
- cxt->nextpage * OOPS_PAGE_SIZE, retlen, OOPS_PAGE_SIZE, ret);
-
- ret = mtdoops_inc_counter(cxt);
- if (ret == 1)
- schedule_work(&cxt->work);
+ schedule_work(&cxt->work_write);
}
static void
@@ -286,7 +322,7 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
{
struct mtdoops_context *cxt = co->data;
struct mtd_info *mtd = cxt->mtd;
- int i;
+ unsigned long flags;
if (!oops_in_progress) {
mtdoops_console_sync();
@@ -296,6 +332,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if (!cxt->ready || !mtd)
return;
+ /* Locking on writecount ensures sequential writes to the buffer */
+ spin_lock_irqsave(&cxt->writecount_lock, flags);
+
+ /* Check ready status didn't change whilst waiting for the lock */
+ if (!cxt->ready)
+ return;
+
if (cxt->writecount == 0) {
u32 *stamp = cxt->oops_buf;
*stamp = cxt->nextcount;
@@ -305,10 +348,13 @@ mtdoops_console_write(struct console *co, const char *s, unsigned int count)
if ((count + cxt->writecount) > OOPS_PAGE_SIZE)
count = OOPS_PAGE_SIZE - cxt->writecount;
- for (i = 0; i < count; i++, s++)
- *((char *)(cxt->oops_buf) + cxt->writecount + i) = *s;
+ memcpy(cxt->oops_buf + cxt->writecount, s, count);
+ cxt->writecount += count;
+
+ spin_unlock_irqrestore(&cxt->writecount_lock, flags);
- cxt->writecount = cxt->writecount + count;
+ if (cxt->writecount == OOPS_PAGE_SIZE)
+ mtdoops_console_sync();
}
static int __init mtdoops_console_setup(struct console *co, char *options)
@@ -334,7 +380,6 @@ static struct console mtdoops_console = {
.write = mtdoops_console_write,
.setup = mtdoops_console_setup,
.unblank = mtdoops_console_sync,
- .flags = CON_PRINTBUFFER,
.index = -1,
.data = &oops_cxt,
};
@@ -347,11 +392,12 @@ static int __init mtdoops_console_init(void)
cxt->oops_buf = vmalloc(OOPS_PAGE_SIZE);
if (!cxt->oops_buf) {
- printk(KERN_ERR "Failed to allocate oops buffer workspace\n");
+ printk(KERN_ERR "Failed to allocate mtdoops buffer workspace\n");
return -ENOMEM;
}
- INIT_WORK(&cxt->work, mtdoops_workfunc);
+ INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
+ INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
register_console(&mtdoops_console);
register_mtd_user(&mtdoops_notifier);
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 246d4512f64..0a840d5d75a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -93,7 +93,7 @@ config MTD_NAND_AU1550
config MTD_NAND_BF5XX
tristate "Blackfin on-chip NAND Flash Controller driver"
- depends on BF54x && MTD_NAND
+ depends on (BF54x || BF52x) && MTD_NAND
help
This enables the Blackfin on-chip NAND flash controller
@@ -283,6 +283,12 @@ config MTD_NAND_CM_X270
tristate "Support for NAND Flash on CM-X270 modules"
depends on MTD_NAND && MACH_ARMCORE
+config MTD_NAND_PASEMI
+ tristate "NAND support for PA Semi PWRficient"
+ depends on MTD_NAND && PPC_PASEMI
+ help
+ Enables support for NAND Flash interface on PA Semi PWRficient
+ based boards
config MTD_NAND_NANDSIM
tristate "Support for NAND Flash Simulator"
@@ -306,4 +312,13 @@ config MTD_ALAUDA
These two (and possibly other) Alauda-based cardreaders for
SmartMedia and xD allow raw flash access.
+config MTD_NAND_ORION
+ tristate "NAND Flash support for Marvell Orion SoC"
+ depends on ARCH_ORION && MTD_NAND
+ help
+ This enables the NAND flash controller on Orion machines.
+
+ No board specific support is done by this driver, each board
+ must advertise a platform_device for the driver to attach.
+
endif # MTD_NAND
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 3ad6c0165da..e35f5ea3a7a 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -29,5 +29,7 @@ obj-$(CONFIG_MTD_NAND_CM_X270) += cmx270_nand.o
obj-$(CONFIG_MTD_NAND_BASLER_EXCITE) += excite_nandflash.o
obj-$(CONFIG_MTD_NAND_PLATFORM) += plat_nand.o
obj-$(CONFIG_MTD_ALAUDA) += alauda.o
+obj-$(CONFIG_MTD_NAND_PASEMI) += pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION) += orion_nand.o
nand-objs := nand_base.o nand_bbt.o
diff --git a/drivers/mtd/nand/at91_nand.c b/drivers/mtd/nand/at91_nand.c
index b2a5672df6e..c9fb2acf405 100644
--- a/drivers/mtd/nand/at91_nand.c
+++ b/drivers/mtd/nand/at91_nand.c
@@ -156,14 +156,14 @@ static int __init at91_nand_probe(struct platform_device *pdev)
}
#ifdef CONFIG_MTD_PARTITIONS
- if (host->board->partition_info)
- partitions = host->board->partition_info(mtd->size, &num_partitions);
#ifdef CONFIG_MTD_CMDLINE_PARTS
- else {
- mtd->name = "at91_nand";
- num_partitions = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
- }
+ mtd->name = "at91_nand";
+ num_partitions = parse_mtd_partitions(mtd, part_probes,
+ &partitions, 0);
#endif
+ if (num_partitions <= 0 && host->board->partition_info)
+ partitions = host->board->partition_info(mtd->size,
+ &num_partitions);
if ((!partitions) || (num_partitions == 0)) {
printk(KERN_ERR "at91_nand: No parititions defined, or unsupported device.\n");
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index 1657ecd7488..542850cd4c3 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -74,7 +74,22 @@ static int hardware_ecc = 1;
static int hardware_ecc;
#endif
-static unsigned short bfin_nfc_pin_req[] = {P_NAND_CE, P_NAND_RB, 0};
+static unsigned short bfin_nfc_pin_req[] =
+ {P_NAND_CE,
+ P_NAND_RB,
+ P_NAND_D0,
+ P_NAND_D1,
+ P_NAND_D2,
+ P_NAND_D3,
+ P_NAND_D4,
+ P_NAND_D5,
+ P_NAND_D6,
+ P_NAND_D7,
+ P_NAND_WE,
+ P_NAND_RE,
+ P_NAND_CLE,
+ P_NAND_ALE,
+ 0};
/*
* Data structures for bf5xx nand flash controller driver
@@ -507,12 +522,13 @@ static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
init_completion(&info->dma_completion);
+#ifdef CONFIG_BF54x
/* Setup DMAC1 channel mux for NFC which shared with SDH */
val = bfin_read_DMAC1_PERIMUX();
val &= 0xFFFE;
bfin_write_DMAC1_PERIMUX(val);
SSYNC();
-
+#endif
/* Request NFC DMA channel */
ret = request_dma(CH_NFC, "BF5XX NFC driver");
if (ret < 0) {
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 1e811715211..da6ceaa80ba 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -11,6 +11,7 @@
#undef DEBUG
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
#include <linux/rslib.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -52,6 +53,7 @@
struct cafe_priv {
struct nand_chip nand;
+ struct mtd_partition *parts;
struct pci_dev *pdev;
void __iomem *mmio;
struct rs_control *rs;
@@ -84,6 +86,10 @@ static unsigned int numtimings;
static int timing[3];
module_param_array(timing, int, &numtimings, 0644);
+#ifdef CONFIG_MTD_PARTITIONS
+static const char *part_probes[] = { "RedBoot", NULL };
+#endif
+
/* Hrm. Why isn't this already conditional on something in the struct device? */
#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
@@ -620,7 +626,9 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
{
struct mtd_info *mtd;
struct cafe_priv *cafe;
+ struct mtd_partition *parts;
uint32_t ctrl;
+ int nr_parts;
int err = 0;
/* Very old versions shared the same PCI ident for all three
@@ -787,7 +795,18 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
goto out_irq;
pci_set_drvdata(pdev, mtd);
+
+ /* We register the whole device first, separate from the partitions */
add_mtd_device(mtd);
+
+#ifdef CONFIG_MTD_PARTITIONS
+ nr_parts = parse_mtd_partitions(mtd, part_probes, &parts, 0);
+ if (nr_parts > 0) {
+ cafe->parts = parts;
+ dev_info(&cafe->pdev->dev, "%d RedBoot partitions found\n", nr_parts);
+ add_mtd_partitions(mtd, parts, nr_parts);
+ }
+#endif
goto out;
out_irq:
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index e29c1da7f56..85a7283845f 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2469,8 +2469,12 @@ int nand_scan_tail(struct mtd_info *mtd)
chip->ecc.write_oob = nand_write_oob_std;
case NAND_ECC_HW_SYNDROME:
- if (!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) {
+ if ((!chip->ecc.calculate || !chip->ecc.correct ||
+ !chip->ecc.hwctl) &&
+ (!chip->ecc.read_page ||
+ chip->ecc.read_page == nand_read_page_hwecc) ||
+ !chip->ecc.write_page ||
+ chip->ecc.write_page == nand_write_page_hwecc) {
printk(KERN_WARNING "No ECC functions supplied, "
"Hardware ECC not possible\n");
BUG();
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
new file mode 100644
index 00000000000..9162cca0182
--- /dev/null
+++ b/drivers/mtd/nand/orion_nand.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/partitions.h>
+#include <asm/io.h>
+#include <asm/sizes.h>
+#include <asm/arch/platform.h>
+#include <asm/arch/hardware.h>
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+static const char *part_probes[] = { "cmdlinepart", NULL };
+#endif
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+ struct nand_chip *nc = mtd->priv;
+ struct orion_nand_data *board = nc->priv;
+ u32 offs;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ offs = (1 << board->cle);
+ else if (ctrl & NAND_ALE)
+ offs = (1 << board->ale);
+ else
+ return;
+
+ if (nc->options & NAND_BUSWIDTH_16)
+ offs <<= 1;
+
+ writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+ struct mtd_info *mtd;
+ struct nand_chip *nc;
+ struct orion_nand_data *board;
+ void __iomem *io_base;
+ int ret = 0;
+#ifdef CONFIG_MTD_PARTITIONS
+ struct mtd_partition *partitions = NULL;
+ int num_part = 0;
+#endif
+
+ nc = kzalloc(sizeof(struct nand_chip) + sizeof(struct mtd_info), GFP_KERNEL);
+ if (!nc) {
+ printk(KERN_ERR "orion_nand: failed to allocate device structure.\n");
+ ret = -ENOMEM;
+ goto no_res;
+ }
+ mtd = (struct mtd_info *)(nc + 1);
+
+ io_base = ioremap(pdev->resource[0].start,
+ pdev->resource[0].end - pdev->resource[0].start + 1);
+ if (!io_base) {
+ printk(KERN_ERR "orion_nand: ioremap failed\n");
+ ret = -EIO;
+ goto no_res;
+ }
+
+ board = pdev->dev.platform_data;
+
+ mtd->priv = nc;
+ mtd->owner = THIS_MODULE;
+
+ nc->priv = board;
+ nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+ nc->cmd_ctrl = orion_nand_cmd_ctrl;
+ nc->ecc.mode = NAND_ECC_SOFT;
+
+ if (board->width == 16)
+ nc->options |= NAND_BUSWIDTH_16;
+
+ platform_set_drvdata(pdev, mtd);
+
+ if (nand_scan(mtd, 1)) {
+ ret = -ENXIO;
+ goto no_dev;
+ }
+
+#ifdef CONFIG_MTD_PARTITIONS
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+ mtd->name = "orion_nand";
+ num_part = parse_mtd_partitions(mtd, part_probes, &partitions, 0);
+#endif
+ /* If cmdline partitions have been passed, let them be used */
+ if (num_part <= 0) {
+ num_part = board->nr_parts;
+ partitions = board->parts;
+ }
+
+ if (partitions && num_part > 0)
+ ret = add_mtd_partitions(mtd, partitions, num_part);
+ else
+ ret = add_mtd_device(mtd);
+#else
+ ret = add_mtd_device(mtd);
+#endif
+
+ if (ret) {
+ nand_release(mtd);
+ goto no_dev;
+ }
+
+ return 0;
+
+no_dev:
+ platform_set_drvdata(pdev, NULL);
+ iounmap(io_base);
+no_res:
+ kfree(nc);
+
+ return ret;
+}
+
+static int __devexit orion_nand_remove(struct platform_device *pdev)
+{
+ struct mtd_info *mtd = platform_get_drvdata(pdev);
+ struct nand_chip *nc = mtd->priv;
+
+ nand_release(mtd);
+
+ iounmap(nc->IO_ADDR_W);
+
+ kfree(nc);
+
+ return 0;
+}
+
+static struct platform_driver orion_nand_driver = {
+ .probe = orion_nand_probe,
+ .remove = orion_nand_remove,
+ .driver = {
+ .name = "orion_nand",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init orion_nand_init(void)
+{
+ return platform_driver_register(&orion_nand_driver);
+}
+
+static void __exit orion_nand_exit(void)
+{
+ platform_driver_unregister(&orion_nand_driver);
+}
+
+module_init(orion_nand_init);
+module_exit(orion_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
new file mode 100644
index 00000000000..75c89903902
--- /dev/null
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR 0x00004000
+#define CLE_PIN_CTL 15
+#define ALE_PIN_CTL 14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ while (len > 0x800) {
+ memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+ buf += 0x800;
+ len -= 0x800;
+ }
+ memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ struct nand_chip *chip = mtd->priv;
+
+ if (cmd == NAND_CMD_NONE)
+ return;
+
+ if (ctrl & NAND_CLE)
+ out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+ else
+ out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+ /* Push out posted writes */
+ eieio();
+ inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+ return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int __devinit pasemi_nand_probe(struct of_device *ofdev,
+ const struct of_device_id *match)
+{
+ struct pci_dev *pdev;
+ struct device_node *np = ofdev->node;
+ struct resource res;
+ struct nand_chip *chip;
+ int err = 0;
+
+ err = of_address_to_resource(np, 0, &res);
+
+ if (err)
+ return -EINVAL;
+
+ /* We only support one device at the moment */
+ if (pasemi_nand_mtd)
+ return -ENODEV;
+
+ pr_debug("pasemi_nand at %lx-%lx\n", res.start, res.end);
+
+ /* Allocate memory for MTD device structure and private data */
+ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) +
+ sizeof(struct nand_chip), GFP_KERNEL);
+ if (!pasemi_nand_mtd) {
+ printk(KERN_WARNING
+ "Unable to allocate PASEMI NAND MTD device structure\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Get pointer to private data */
+ chip = (struct nand_chip *)&pasemi_nand_mtd[1];
+
+ /* Link the private data with the MTD structure */
+ pasemi_nand_mtd->priv = chip;
+ pasemi_nand_mtd->owner = THIS_MODULE;
+
+ chip->IO_ADDR_R = of_iomap(np, 0);
+ chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+ if (!chip->IO_ADDR_R) {
+ err = -EIO;
+ goto out_mtd;
+ }
+
+ pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+ if (!pdev) {
+ err = -ENODEV;
+ goto out_ior;
+ }
+
+ lpcctl = pci_resource_start(pdev, 0);
+
+ if (!request_region(lpcctl, 4, driver_name)) {
+ err = -EBUSY;
+ goto out_ior;
+ }
+
+ chip->cmd_ctrl = pasemi_hwcontrol;
+ chip->dev_ready = pasemi_device_ready;
+ chip->read_buf = pasemi_read_buf;
+ chip->write_buf = pasemi_write_buf;
+ chip->chip_delay = 0;
+ chip->ecc.mode = NAND_ECC_SOFT;
+
+ /* Enable the following for a flash based bad block table */
+ chip->options = NAND_USE_FLASH_BBT | NAND_NO_AUTOINCR;
+
+ /* Scan to find existance of the device */
+ if (nand_scan(pasemi_nand_mtd, 1)) {
+ err = -ENXIO;
+ goto out_lpc;
+ }
+
+ if (add_mtd_device(pasemi_nand_mtd)) {
+ printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n");
+ err = -ENODEV;
+ goto out_lpc;
+ }
+
+ printk(KERN_INFO "PA Semi NAND flash at %08lx, control at I/O %x\n",
+ res.start, lpcctl);
+
+ return 0;
+
+ out_lpc:
+ release_region(lpcctl, 4);
+ out_ior:
+ iounmap(chip->IO_ADDR_R);
+ out_mtd:
+ kfree(pasemi_nand_mtd);
+ out:
+ return err;
+}
+
+static int __devexit pasemi_nand_remove(struct of_device *ofdev)
+{
+ struct nand_chip *chip;
+
+ if (!pasemi_nand_mtd)
+ return 0;
+
+ chip = pasemi_nand_mtd->priv;
+
+ /* Release resources, unregister device */
+ nand_release(pasemi_nand_mtd);
+
+ release_region(lpcctl, 4);
+
+ iounmap(chip->IO_ADDR_R);
+
+ /* Free the MTD device structure */
+ kfree(pasemi_nand_mtd);
+
+ pasemi_nand_mtd = NULL;
+
+ return 0;
+}
+
+static struct of_device_id pasemi_nand_match[] =
+{
+ {
+ .compatible = "pasemi,localbus-nand",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct of_platform_driver pasemi_nand_driver =
+{
+ .name = (char*)driver_name,
+ .match_table = pasemi_nand_match,
+ .probe = pasemi_nand_probe,
+ .remove = pasemi_nand_remove,
+};
+
+static int __init pasemi_nand_init(void)
+{
+ return of_register_platform_driver(&pasemi_nand_driver);
+}
+module_init(pasemi_nand_init);
+
+static void __exit pasemi_nand_exit(void)
+{
+ of_unregister_platform_driver(&pasemi_nand_driver);
+}
+module_exit(pasemi_nand_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 66f76e9618d..d31cb7b3fee 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -120,6 +120,8 @@ struct s3c2410_nand_info {
int sel_bit;
int mtd_count;
+ unsigned long save_nfconf;
+
enum s3c_cpu_type cpu_type;
};
@@ -364,23 +366,21 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
/* calculate the bit position of the error */
- bit = (diff2 >> 2) & 1;
- bit |= (diff2 >> 3) & 2;
- bit |= (diff2 >> 4) & 4;
+ bit = ((diff2 >> 3) & 1) |
+ ((diff2 >> 4) & 2) |
+ ((diff2 >> 5) & 4);
/* calculate the byte position of the error */
- byte = (diff1 << 1) & 0x80;
- byte |= (diff1 << 2) & 0x40;
- byte |= (diff1 << 3) & 0x20;
- byte |= (diff1 << 4) & 0x10;
-
- byte |= (diff0 >> 3) & 0x08;
- byte |= (diff0 >> 2) & 0x04;
- byte |= (diff0 >> 1) & 0x02;
- byte |= (diff0 >> 0) & 0x01;
-
- byte |= (diff2 << 8) & 0x100;
+ byte = ((diff2 << 7) & 0x100) |
+ ((diff1 << 0) & 0x80) |
+ ((diff1 << 1) & 0x40) |
+ ((diff1 << 2) & 0x20) |
+ ((diff1 << 3) & 0x10) |
+ ((diff0 >> 4) & 0x08) |
+ ((diff0 >> 3) & 0x04) |
+ ((diff0 >> 2) & 0x02) |
+ ((diff0 >> 1) & 0x01);
dev_dbg(info->device, "correcting error bit %d, byte %d\n",
bit, byte);
@@ -399,7 +399,7 @@ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
if ((diff0 & ~(1<<fls(diff0))) == 0)
return 1;
- return 0;
+ return -1;
}
/* ECC functions
@@ -810,6 +810,16 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
if (info) {
+ info->save_nfconf = readl(info->regs + S3C2410_NFCONF);
+
+ /* For the moment, we must ensure nFCE is high during
+ * the time we are suspended. This really should be
+ * handled by suspending the MTDs we are using, but
+ * that is currently not the case. */
+
+ writel(info->save_nfconf | info->sel_bit,
+ info->regs + S3C2410_NFCONF);
+
if (!allow_clk_stop(info))
clk_disable(info->clk);
}
@@ -820,11 +830,19 @@ static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
static int s3c24xx_nand_resume(struct platform_device *dev)
{
struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+ unsigned long nfconf;
if (info) {
clk_enable(info->clk);
s3c2410_nand_inithw(info, dev);
+ /* Restore the state of the nFCE line. */
+
+ nfconf = readl(info->regs + S3C2410_NFCONF);
+ nfconf &= ~info->sel_bit;
+ nfconf |= info->save_nfconf & info->sel_bit;
+ writel(nfconf, info->regs + S3C2410_NFCONF);
+
if (allow_clk_stop(info))
clk_disable(info->clk);
}
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
new file mode 100644
index 00000000000..f86e06934cd
--- /dev/null
+++ b/drivers/mtd/ofpart.c
@@ -0,0 +1,74 @@
+/*
+ * Flash partitions described by the OF (or flattened) device tree
+ *
+ * Copyright (C) 2006 MontaVista Software Inc.
+ * Author: Vitaly Wool <vwool@ru.mvista.com>
+ *
+ * Revised to handle newer style flash binding by:
+ * Copyright (C) 2007 David Gibson, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+int __devinit of_mtd_parse_partitions(struct device *dev,
+ struct mtd_info *mtd,
+ struct device_node *node,
+ struct mtd_partition **pparts)
+{
+ const char *partname;
+ struct device_node *pp;
+ int nr_parts, i;
+
+ /* First count the subnodes */
+ pp = NULL;
+ nr_parts = 0;
+ while ((pp = of_get_next_child(node, pp)))
+ nr_parts++;
+
+ if (nr_parts == 0)
+ return 0;
+
+ *pparts = kzalloc(nr_parts * sizeof(**pparts), GFP_KERNEL);
+ if (!*pparts)
+ return -ENOMEM;
+
+ pp = NULL;
+ i = 0;
+ while ((pp = of_get_next_child(node, pp))) {
+ const u32 *reg;
+ int len;
+
+ reg = of_get_property(pp, "reg", &len);
+ if (!reg || (len != 2 * sizeof(u32))) {
+ of_node_put(pp);
+ dev_err(dev, "Invalid 'reg' on %s\n", node->full_name);
+ kfree(*pparts);
+ *pparts = NULL;
+ return -EINVAL;
+ }
+ (*pparts)[i].offset = reg[0];
+ (*pparts)[i].size = reg[1];
+
+ partname = of_get_property(pp, "label", &len);
+ if (!partname)
+ partname = of_get_property(pp, "name", &len);
+ (*pparts)[i].name = (char *)partname;
+
+ if (of_get_property(pp, "read-only", &len))
+ (*pparts)[i].mask_flags = MTD_WRITEABLE;
+
+ i++;
+ }
+
+ return nr_parts;
+}
+EXPORT_SYMBOL(of_mtd_parse_partitions);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index 1b0b3201141..ed9f9c061ac 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -855,6 +855,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
this->command(mtd, ONENAND_CMD_READ, from, writesize);
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
}
}
@@ -913,6 +915,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
/* Now wait for load */
ret = this->wait(mtd, FL_READING);
onenand_update_bufferram(mtd, from, !ret);
+ if (ret == -EBADMSG)
+ ret = 0;
}
/*
@@ -923,12 +927,12 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
ops->retlen = read;
ops->oobretlen = oobread;
- if (mtd->ecc_stats.failed - stats.failed)
- return -EBADMSG;
-
if (ret)
return ret;
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
}
@@ -944,6 +948,7 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
struct onenand_chip *this = mtd->priv;
+ struct mtd_ecc_stats stats;
int read = 0, thislen, column, oobsize;
size_t len = ops->ooblen;
mtd_oob_mode_t mode = ops->mode;
@@ -977,6 +982,8 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
+ stats = mtd->ecc_stats;
+
while (read < len) {
cond_resched();
@@ -988,18 +995,16 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
onenand_update_bufferram(mtd, from, 0);
ret = this->wait(mtd, FL_READING);
- /* First copy data and check return value for ECC handling */
+ if (ret && ret != -EBADMSG) {
+ printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
+ break;
+ }
if (mode == MTD_OOB_AUTO)
onenand_transfer_auto_oob(mtd, buf, column, thislen);
else
this->read_bufferram(mtd, ONENAND_SPARERAM, buf, column, thislen);
- if (ret) {
- printk(KERN_ERR "onenand_read_oob_nolock: read failed = 0x%x\n", ret);
- break;
- }
-
read += thislen;
if (read == len)
@@ -1016,7 +1021,14 @@ static int onenand_read_oob_nolock(struct mtd_info *mtd, loff_t from,
}
ops->oobretlen = read;
- return ret;
+
+ if (ret)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return 0;
}
/**
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index a61351f88ec..47474903263 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -59,16 +59,31 @@ static int parse_redboot_partitions(struct mtd_info *master,
static char nullstring[] = "unallocated";
#endif
+ if ( directory < 0 ) {
+ offset = master->size + directory * master->erasesize;
+ while (master->block_isbad &&
+ master->block_isbad(master, offset)) {
+ if (!offset) {
+ nogood:
+ printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
+ return -EIO;
+ }
+ offset -= master->erasesize;
+ }
+ } else {
+ offset = directory * master->erasesize;
+ while (master->block_isbad &&
+ master->block_isbad(master, offset)) {
+ offset += master->erasesize;
+ if (offset == master->size)
+ goto nogood;
+ }
+ }
buf = vmalloc(master->erasesize);
if (!buf)
return -ENOMEM;
- if ( directory < 0 )
- offset = master->size + directory*master->erasesize;
- else
- offset = directory*master->erasesize;
-
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 023653977a1..8f1f9feb2d6 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -21,11 +21,16 @@
*/
/*
- * This file includes UBI initialization and building of UBI devices. At the
- * moment UBI devices may only be added while UBI is initialized, but dynamic
- * device add/remove functionality is planned. Also, at the moment we only
- * attach UBI devices by scanning, which will become a bottleneck when flashes
- * reach certain large size. Then one may improve UBI and add other methods.
+ * This file includes UBI initialization and building of UBI devices.
+ *
+ * When UBI is initialized, it attaches all the MTD devices specified as the
+ * module load parameters or the kernel boot parameters. If MTD devices were
+ * specified, UBI does not attach any MTD device, but it is possible to do
+ * later using the "UBI control device".
+ *
+ * At the moment we only attach UBI devices by scanning, which will become a
+ * bottleneck when flashes reach certain large size. Then one may improve UBI
+ * and add other methods, although it does not seem to be easy to do.
*/
#include <linux/err.h>
@@ -33,7 +38,9 @@
#include <linux/moduleparam.h>
#include <linux/stringify.h>
#include <linux/stat.h>
+#include <linux/miscdevice.h>
#include <linux/log2.h>
+#include <linux/kthread.h>
#include "ubi.h"
/* Maximum length of the 'mtd=' parameter */
@@ -43,13 +50,11 @@
* struct mtd_dev_param - MTD device parameter description data structure.
* @name: MTD device name or number string
* @vid_hdr_offs: VID header offset
- * @data_offs: data offset
*/
struct mtd_dev_param
{
char name[MTD_PARAM_LEN_MAX];
int vid_hdr_offs;
- int data_offs;
};
/* Numbers of elements set in the @mtd_dev_param array */
@@ -58,14 +63,30 @@ static int mtd_devs = 0;
/* MTD devices specification parameters */
static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
-/* Number of UBI devices in system */
-int ubi_devices_cnt;
+/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
+struct class *ubi_class;
+
+/* Slab cache for lock-tree entries */
+struct kmem_cache *ubi_ltree_slab;
+
+/* Slab cache for wear-leveling entries */
+struct kmem_cache *ubi_wl_entry_slab;
+
+/* UBI control character device */
+static struct miscdevice ubi_ctrl_cdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ubi_ctrl",
+ .fops = &ubi_ctrl_cdev_operations,
+};
/* All UBI devices in system */
-struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
+static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
-/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
-struct class *ubi_class;
+/* Serializes UBI devices creations and removals */
+DEFINE_MUTEX(ubi_devices_mutex);
+
+/* Protects @ubi_devices and @ubi->ref_count */
+static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -101,38 +122,150 @@ static struct device_attribute dev_min_io_size =
__ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_mtd_num =
+ __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+
+/**
+ * ubi_get_device - get UBI device.
+ * @ubi_num: UBI device number
+ *
+ * This function returns UBI device description object for UBI device number
+ * @ubi_num, or %NULL if the device does not exist. This function increases the
+ * device reference count to prevent removal of the device. In other words, the
+ * device cannot be removed if its reference count is not zero.
+ */
+struct ubi_device *ubi_get_device(int ubi_num)
+{
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (ubi) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi;
+}
+
+/**
+ * ubi_put_device - drop an UBI device reference.
+ * @ubi: UBI device description object
+ */
+void ubi_put_device(struct ubi_device *ubi)
+{
+ spin_lock(&ubi_devices_lock);
+ ubi->ref_count -= 1;
+ put_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+}
+
+/**
+ * ubi_get_by_major - get UBI device description object by character device
+ * major number.
+ * @major: major number
+ *
+ * This function is similar to 'ubi_get_device()', but it searches the device
+ * by its major number.
+ */
+struct ubi_device *ubi_get_by_major(int major)
+{
+ int i;
+ struct ubi_device *ubi;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_assert(ubi->ref_count >= 0);
+ ubi->ref_count += 1;
+ get_device(&ubi->dev);
+ spin_unlock(&ubi_devices_lock);
+ return ubi;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return NULL;
+}
+
+/**
+ * ubi_major2num - get UBI device number by character device major number.
+ * @major: major number
+ *
+ * This function searches UBI device number object by its major number. If UBI
+ * device was not found, this function returns -ENODEV, otherwise the UBI device
+ * number is returned.
+ */
+int ubi_major2num(int major)
+{
+ int i, ubi_num = -ENODEV;
+
+ spin_lock(&ubi_devices_lock);
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ struct ubi_device *ubi = ubi_devices[i];
+
+ if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ ubi_num = ubi->ubi_num;
+ break;
+ }
+ }
+ spin_unlock(&ubi_devices_lock);
+
+ return ubi_num;
+}
/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
static ssize_t dev_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const struct ubi_device *ubi;
+ ssize_t ret;
+ struct ubi_device *ubi;
+ /*
+ * The below code looks weird, but it actually makes sense. We get the
+ * UBI device reference from the contained 'struct ubi_device'. But it
+ * is unclear if the device was removed or not yet. Indeed, if the
+ * device was removed before we increased its reference count,
+ * 'ubi_get_device()' will return -ENODEV and we fail.
+ *
+ * Remember, 'struct ubi_device' is freed in the release function, so
+ * we still can use 'ubi->ubi_num'.
+ */
ubi = container_of(dev, struct ubi_device, dev);
+ ubi = ubi_get_device(ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
if (attr == &dev_eraseblock_size)
- return sprintf(buf, "%d\n", ubi->leb_size);
+ ret = sprintf(buf, "%d\n", ubi->leb_size);
else if (attr == &dev_avail_eraseblocks)
- return sprintf(buf, "%d\n", ubi->avail_pebs);
+ ret = sprintf(buf, "%d\n", ubi->avail_pebs);
else if (attr == &dev_total_eraseblocks)
- return sprintf(buf, "%d\n", ubi->good_peb_count);
+ ret = sprintf(buf, "%d\n", ubi->good_peb_count);
else if (attr == &dev_volumes_count)
- return sprintf(buf, "%d\n", ubi->vol_count);
+ ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT);
else if (attr == &dev_max_ec)
- return sprintf(buf, "%d\n", ubi->max_ec);
+ ret = sprintf(buf, "%d\n", ubi->max_ec);
else if (attr == &dev_reserved_for_bad)
- return sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
+ ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs);
else if (attr == &dev_bad_peb_count)
- return sprintf(buf, "%d\n", ubi->bad_peb_count);
+ ret = sprintf(buf, "%d\n", ubi->bad_peb_count);
else if (attr == &dev_max_vol_count)
- return sprintf(buf, "%d\n", ubi->vtbl_slots);
+ ret = sprintf(buf, "%d\n", ubi->vtbl_slots);
else if (attr == &dev_min_io_size)
- return sprintf(buf, "%d\n", ubi->min_io_size);
+ ret = sprintf(buf, "%d\n", ubi->min_io_size);
else if (attr == &dev_bgt_enabled)
- return sprintf(buf, "%d\n", ubi->thread_enabled);
+ ret = sprintf(buf, "%d\n", ubi->thread_enabled);
+ else if (attr == &dev_mtd_num)
+ ret = sprintf(buf, "%d\n", ubi->mtd->index);
else
- BUG();
+ ret = -EINVAL;
- return 0;
+ ubi_put_device(ubi);
+ return ret;
}
/* Fake "release" method for UBI devices */
@@ -150,68 +283,44 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
int err;
ubi->dev.release = dev_release;
- ubi->dev.devt = MKDEV(ubi->major, 0);
+ ubi->dev.devt = ubi->cdev.dev;
ubi->dev.class = ubi_class;
sprintf(&ubi->dev.bus_id[0], UBI_NAME_STR"%d", ubi->ubi_num);
err = device_register(&ubi->dev);
if (err)
- goto out;
+ return err;
err = device_create_file(&ubi->dev, &dev_eraseblock_size);
if (err)
- goto out_unregister;
+ return err;
err = device_create_file(&ubi->dev, &dev_avail_eraseblocks);
if (err)
- goto out_eraseblock_size;
+ return err;
err = device_create_file(&ubi->dev, &dev_total_eraseblocks);
if (err)
- goto out_avail_eraseblocks;
+ return err;
err = device_create_file(&ubi->dev, &dev_volumes_count);
if (err)
- goto out_total_eraseblocks;
+ return err;
err = device_create_file(&ubi->dev, &dev_max_ec);
if (err)
- goto out_volumes_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_reserved_for_bad);
if (err)
- goto out_volumes_max_ec;
+ return err;
err = device_create_file(&ubi->dev, &dev_bad_peb_count);
if (err)
- goto out_reserved_for_bad;
+ return err;
err = device_create_file(&ubi->dev, &dev_max_vol_count);
if (err)
- goto out_bad_peb_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_min_io_size);
if (err)
- goto out_max_vol_count;
+ return err;
err = device_create_file(&ubi->dev, &dev_bgt_enabled);
if (err)
- goto out_min_io_size;
-
- return 0;
-
-out_min_io_size:
- device_remove_file(&ubi->dev, &dev_min_io_size);
-out_max_vol_count:
- device_remove_file(&ubi->dev, &dev_max_vol_count);
-out_bad_peb_count:
- device_remove_file(&ubi->dev, &dev_bad_peb_count);
-out_reserved_for_bad:
- device_remove_file(&ubi->dev, &dev_reserved_for_bad);
-out_volumes_max_ec:
- device_remove_file(&ubi->dev, &dev_max_ec);
-out_volumes_count:
- device_remove_file(&ubi->dev, &dev_volumes_count);
-out_total_eraseblocks:
- device_remove_file(&ubi->dev, &dev_total_eraseblocks);
-out_avail_eraseblocks:
- device_remove_file(&ubi->dev, &dev_avail_eraseblocks);
-out_eraseblock_size:
- device_remove_file(&ubi->dev, &dev_eraseblock_size);
-out_unregister:
- device_unregister(&ubi->dev);
-out:
- ubi_err("failed to initialize sysfs for %s", ubi->ubi_name);
+ return err;
+ err = device_create_file(&ubi->dev, &dev_mtd_num);
return err;
}
@@ -221,6 +330,7 @@ out:
*/
static void ubi_sysfs_close(struct ubi_device *ubi)
{
+ device_remove_file(&ubi->dev, &dev_mtd_num);
device_remove_file(&ubi->dev, &dev_bgt_enabled);
device_remove_file(&ubi->dev, &dev_min_io_size);
device_remove_file(&ubi->dev, &dev_max_vol_count);
@@ -244,7 +354,7 @@ static void kill_volumes(struct ubi_device *ubi)
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i])
- ubi_free_volume(ubi, i);
+ ubi_free_volume(ubi, ubi->volumes[i]);
}
/**
@@ -259,7 +369,7 @@ static int uif_init(struct ubi_device *ubi)
int i, err;
dev_t dev;
- mutex_init(&ubi->vtbl_mutex);
+ mutex_init(&ubi->volumes_mutex);
spin_lock_init(&ubi->volumes_lock);
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
@@ -278,39 +388,40 @@ static int uif_init(struct ubi_device *ubi)
return err;
}
+ ubi_assert(MINOR(dev) == 0);
cdev_init(&ubi->cdev, &ubi_cdev_operations);
- ubi->major = MAJOR(dev);
- dbg_msg("%s major is %u", ubi->ubi_name, ubi->major);
+ dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev));
ubi->cdev.owner = THIS_MODULE;
- dev = MKDEV(ubi->major, 0);
err = cdev_add(&ubi->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device %s", ubi->ubi_name);
+ ubi_err("cannot add character device");
goto out_unreg;
}
err = ubi_sysfs_init(ubi);
if (err)
- goto out_cdev;
+ goto out_sysfs;
for (i = 0; i < ubi->vtbl_slots; i++)
if (ubi->volumes[i]) {
- err = ubi_add_volume(ubi, i);
- if (err)
+ err = ubi_add_volume(ubi, ubi->volumes[i]);
+ if (err) {
+ ubi_err("cannot add volume %d", i);
goto out_volumes;
+ }
}
return 0;
out_volumes:
kill_volumes(ubi);
+out_sysfs:
ubi_sysfs_close(ubi);
-out_cdev:
cdev_del(&ubi->cdev);
out_unreg:
- unregister_chrdev_region(MKDEV(ubi->major, 0),
- ubi->vtbl_slots + 1);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
+ ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err);
return err;
}
@@ -323,7 +434,7 @@ static void uif_close(struct ubi_device *ubi)
kill_volumes(ubi);
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
- unregister_chrdev_region(MKDEV(ubi->major, 0), ubi->vtbl_slots + 1);
+ unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
}
/**
@@ -384,9 +495,9 @@ out_si:
* assumed:
* o EC header is always at offset zero - this cannot be changed;
* o VID header starts just after the EC header at the closest address
- * aligned to @io->@hdrs_min_io_size;
+ * aligned to @io->hdrs_min_io_size;
* o data starts just after the VID header at the closest address aligned to
- * @io->@min_io_size
+ * @io->min_io_size
*
* This function returns zero in case of success and a negative error code in
* case of failure.
@@ -407,6 +518,9 @@ static int io_init(struct ubi_device *ubi)
return -EINVAL;
}
+ if (ubi->vid_hdr_offset < 0)
+ return -EINVAL;
+
/*
* Note, in this implementation we support MTD devices with 0x7FFFFFFF
* physical eraseblocks maximum.
@@ -424,7 +538,8 @@ static int io_init(struct ubi_device *ubi)
/* Make sure minimal I/O unit is power of 2 */
if (!is_power_of_2(ubi->min_io_size)) {
- ubi_err("bad min. I/O unit");
+ ubi_err("min. I/O unit (%d) is not power of 2",
+ ubi->min_io_size);
return -EINVAL;
}
@@ -453,10 +568,8 @@ static int io_init(struct ubi_device *ubi)
}
/* Similar for the data offset */
- if (ubi->leb_start == 0) {
- ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
- ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
- }
+ ubi->leb_start = ubi->vid_hdr_offset + ubi->vid_hdr_alsize;
+ ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size);
dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset);
dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset);
@@ -514,76 +627,91 @@ static int io_init(struct ubi_device *ubi)
}
/**
- * attach_mtd_dev - attach an MTD device.
- * @mtd_dev: MTD device name or number string
+ * ubi_attach_mtd_dev - attach an MTD device.
+ * @mtd_dev: MTD device description object
+ * @ubi_num: number to assign to the new UBI device
* @vid_hdr_offset: VID header offset
- * @data_offset: data offset
*
- * This function attaches an MTD device to UBI. It first treats @mtd_dev as the
- * MTD device name, and tries to open it by this name. If it is unable to open,
- * it tries to convert @mtd_dev to an integer and open the MTD device by its
- * number. Returns zero in case of success and a negative error code in case of
- * failure.
+ * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number
+ * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in
+ * which case this function finds a vacant device nubert and assings it
+ * automatically. Returns the new UBI device number in case of success and a
+ * negative error code in case of failure.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
*/
-static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
- int data_offset)
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
{
struct ubi_device *ubi;
- struct mtd_info *mtd;
int i, err;
- mtd = get_mtd_device_nm(mtd_dev);
- if (IS_ERR(mtd)) {
- int mtd_num;
- char *endp;
-
- if (PTR_ERR(mtd) != -ENODEV)
- return PTR_ERR(mtd);
-
- /*
- * Probably this is not MTD device name but MTD device number -
- * check this out.
- */
- mtd_num = simple_strtoul(mtd_dev, &endp, 0);
- if (*endp != '\0' || mtd_dev == endp) {
- ubi_err("incorrect MTD device: \"%s\"", mtd_dev);
- return -ENODEV;
+ /*
+ * Check if we already have the same MTD device attached.
+ *
+ * Note, this function assumes that UBI devices creations and deletions
+ * are serialized, so it does not take the &ubi_devices_lock.
+ */
+ for (i = 0; i < UBI_MAX_DEVICES; i++) {
+ ubi = ubi_devices[i];
+ if (ubi && mtd->index == ubi->mtd->index) {
+ dbg_err("mtd%d is already attached to ubi%d",
+ mtd->index, i);
+ return -EEXIST;
}
+ }
- mtd = get_mtd_device(NULL, mtd_num);
- if (IS_ERR(mtd))
- return PTR_ERR(mtd);
+ /*
+ * Make sure this MTD device is not emulated on top of an UBI volume
+ * already. Well, generally this recursion works fine, but there are
+ * different problems like the UBI module takes a reference to itself
+ * by attaching (and thus, opening) the emulated MTD device. This
+ * results in inability to unload the module. And in general it makes
+ * no sense to attach emulated MTD devices, so we prohibit this.
+ */
+ if (mtd->type == MTD_UBIVOLUME) {
+ ubi_err("refuse attaching mtd%d - it is already emulated on "
+ "top of UBI", mtd->index);
+ return -EINVAL;
}
- /* Check if we already have the same MTD device attached */
- for (i = 0; i < ubi_devices_cnt; i++)
- if (ubi_devices[i]->mtd->index == mtd->index) {
- ubi_err("mtd%d is already attached to ubi%d",
- mtd->index, i);
- err = -EINVAL;
- goto out_mtd;
+ if (ubi_num == UBI_DEV_NUM_AUTO) {
+ /* Search for an empty slot in the @ubi_devices array */
+ for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
+ if (!ubi_devices[ubi_num])
+ break;
+ if (ubi_num == UBI_MAX_DEVICES) {
+ dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES);
+ return -ENFILE;
+ }
+ } else {
+ if (ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ /* Make sure ubi_num is not busy */
+ if (ubi_devices[ubi_num]) {
+ dbg_err("ubi%d already exists", ubi_num);
+ return -EEXIST;
}
-
- ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
- GFP_KERNEL);
- if (!ubi) {
- err = -ENOMEM;
- goto out_mtd;
}
- ubi->ubi_num = ubi_devices_cnt;
+ ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL);
+ if (!ubi)
+ return -ENOMEM;
+
ubi->mtd = mtd;
+ ubi->ubi_num = ubi_num;
+ ubi->vid_hdr_offset = vid_hdr_offset;
- dbg_msg("attaching mtd%d to ubi%d: VID header offset %d data offset %d",
- ubi->mtd->index, ubi_devices_cnt, vid_hdr_offset, data_offset);
+ dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
+ mtd->index, ubi_num, vid_hdr_offset);
- ubi->vid_hdr_offset = vid_hdr_offset;
- ubi->leb_start = data_offset;
err = io_init(ubi);
if (err)
goto out_free;
mutex_init(&ubi->buf_mutex);
+ mutex_init(&ubi->ckvol_mutex);
ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1)
goto out_free;
@@ -609,8 +737,16 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
if (err)
goto out_detach;
- ubi_msg("attached mtd%d to ubi%d", ubi->mtd->index, ubi_devices_cnt);
- ubi_msg("MTD device name: \"%s\"", ubi->mtd->name);
+ ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
+ if (IS_ERR(ubi->bgt_thread)) {
+ err = PTR_ERR(ubi->bgt_thread);
+ ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
+ err);
+ goto out_uif;
+ }
+
+ ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num);
+ ubi_msg("MTD device name: \"%s\"", mtd->name);
ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20);
ubi_msg("physical eraseblock size: %d bytes (%d KiB)",
ubi->peb_size, ubi->peb_size >> 10);
@@ -638,9 +774,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
wake_up_process(ubi->bgt_thread);
}
- ubi_devices_cnt += 1;
- return 0;
+ ubi_devices[ubi_num] = ubi;
+ return ubi_num;
+out_uif:
+ uif_close(ubi);
out_detach:
ubi_eba_close(ubi);
ubi_wl_close(ubi);
@@ -652,21 +790,58 @@ out_free:
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi);
-out_mtd:
- put_mtd_device(mtd);
- ubi_devices[ubi_devices_cnt] = NULL;
return err;
}
/**
- * detach_mtd_dev - detach an MTD device.
- * @ubi: UBI device description object
+ * ubi_detach_mtd_dev - detach an MTD device.
+ * @ubi_num: UBI device number to detach from
+ * @anyway: detach MTD even if device reference count is not zero
+ *
+ * This function destroys an UBI device number @ubi_num and detaches the
+ * underlying MTD device. Returns zero in case of success and %-EBUSY if the
+ * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
+ * exist.
+ *
+ * Note, the invocations of this function has to be serialized by the
+ * @ubi_devices_mutex.
*/
-static void detach_mtd_dev(struct ubi_device *ubi)
+int ubi_detach_mtd_dev(int ubi_num, int anyway)
{
- int ubi_num = ubi->ubi_num, mtd_num = ubi->mtd->index;
+ struct ubi_device *ubi;
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
+
+ spin_lock(&ubi_devices_lock);
+ ubi = ubi_devices[ubi_num];
+ if (!ubi) {
+ spin_unlock(&ubi_devices_lock);
+ return -EINVAL;
+ }
+
+ if (ubi->ref_count) {
+ if (!anyway) {
+ spin_unlock(&ubi_devices_lock);
+ return -EBUSY;
+ }
+ /* This may only happen if there is a bug */
+ ubi_err("%s reference count %d, destroy anyway",
+ ubi->ubi_name, ubi->ref_count);
+ }
+ ubi_devices[ubi_num] = NULL;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_assert(ubi_num == ubi->ubi_num);
dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
+
+ /*
+ * Before freeing anything, we have to stop the background thread to
+ * prevent it from doing anything on this device while we are freeing.
+ */
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
uif_close(ubi);
ubi_eba_close(ubi);
ubi_wl_close(ubi);
@@ -677,11 +852,51 @@ static void detach_mtd_dev(struct ubi_device *ubi)
#ifdef CONFIG_MTD_UBI_DEBUG
vfree(ubi->dbg_peb_buf);
#endif
- kfree(ubi_devices[ubi_num]);
- ubi_devices[ubi_num] = NULL;
- ubi_devices_cnt -= 1;
- ubi_assert(ubi_devices_cnt >= 0);
- ubi_msg("mtd%d is detached from ubi%d", mtd_num, ubi_num);
+ ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
+ kfree(ubi);
+ return 0;
+}
+
+/**
+ * ltree_entry_ctor - lock tree entries slab cache constructor.
+ * @obj: the lock-tree entry to construct
+ * @cache: the lock tree entry slab cache
+ * @flags: constructor flags
+ */
+static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
+{
+ struct ubi_ltree_entry *le = obj;
+
+ le->users = 0;
+ init_rwsem(&le->mutex);
+}
+
+/**
+ * find_mtd_device - open an MTD device by its name or number.
+ * @mtd_dev: name or number of the device
+ *
+ * This function tries to open and MTD device described by @mtd_dev string,
+ * which is first treated as an ASCII number, and if it is not true, it is
+ * treated as MTD device name. Returns MTD device description object in case of
+ * success and a negative error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
+{
+ struct mtd_info *mtd;
+ int mtd_num;
+ char *endp;
+
+ mtd_num = simple_strtoul(mtd_dev, &endp, 0);
+ if (*endp != '\0' || mtd_dev == endp) {
+ /*
+ * This does not look like an ASCII integer, probably this is
+ * MTD device name.
+ */
+ mtd = get_mtd_device_nm(mtd_dev);
+ } else
+ mtd = get_mtd_device(NULL, mtd_num);
+
+ return mtd;
}
static int __init ubi_init(void)
@@ -693,47 +908,105 @@ static int __init ubi_init(void)
BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
if (mtd_devs > UBI_MAX_DEVICES) {
- printk("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
+ printk(KERN_ERR "UBI error: too many MTD devices, "
+ "maximum is %d\n", UBI_MAX_DEVICES);
return -EINVAL;
}
+ /* Create base sysfs directory and sysfs files */
ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
- if (IS_ERR(ubi_class))
- return PTR_ERR(ubi_class);
+ if (IS_ERR(ubi_class)) {
+ err = PTR_ERR(ubi_class);
+ printk(KERN_ERR "UBI error: cannot create UBI class\n");
+ goto out;
+ }
err = class_create_file(ubi_class, &ubi_version);
- if (err)
+ if (err) {
+ printk(KERN_ERR "UBI error: cannot create sysfs file\n");
goto out_class;
+ }
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ printk(KERN_ERR "UBI error: cannot register device\n");
+ goto out_version;
+ }
+
+ ubi_ltree_slab = kmem_cache_create("ubi_ltree_slab",
+ sizeof(struct ubi_ltree_entry), 0,
+ 0, &ltree_entry_ctor);
+ if (!ubi_ltree_slab)
+ goto out_dev_unreg;
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab)
+ goto out_ltree;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
struct mtd_dev_param *p = &mtd_dev_param[i];
+ struct mtd_info *mtd;
cond_resched();
- err = attach_mtd_dev(p->name, p->vid_hdr_offs, p->data_offs);
- if (err)
+
+ mtd = open_mtd_device(p->name);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
goto out_detach;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO,
+ p->vid_hdr_offs);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0) {
+ put_mtd_device(mtd);
+ printk(KERN_ERR "UBI error: cannot attach %s\n",
+ p->name);
+ goto out_detach;
+ }
}
return 0;
out_detach:
for (k = 0; k < i; k++)
- detach_mtd_dev(ubi_devices[k]);
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ kmem_cache_destroy(ubi_wl_entry_slab);
+out_ltree:
+ kmem_cache_destroy(ubi_ltree_slab);
+out_dev_unreg:
+ misc_deregister(&ubi_ctrl_cdev);
+out_version:
class_remove_file(ubi_class, &ubi_version);
out_class:
class_destroy(ubi_class);
+out:
+ printk(KERN_ERR "UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
module_init(ubi_init);
static void __exit ubi_exit(void)
{
- int i, n = ubi_devices_cnt;
+ int i;
- for (i = 0; i < n; i++)
- detach_mtd_dev(ubi_devices[i]);
+ for (i = 0; i < UBI_MAX_DEVICES; i++)
+ if (ubi_devices[i]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ kmem_cache_destroy(ubi_wl_entry_slab);
+ kmem_cache_destroy(ubi_ltree_slab);
+ misc_deregister(&ubi_ctrl_cdev);
class_remove_file(ubi_class, &ubi_version);
class_destroy(ubi_class);
}
@@ -754,7 +1027,8 @@ static int __init bytes_str_to_int(const char *str)
result = simple_strtoul(str, &endp, 0);
if (str == endp || result < 0) {
- printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
return -EINVAL;
}
@@ -764,15 +1038,14 @@ static int __init bytes_str_to_int(const char *str)
case 'M':
result *= 1024;
case 'K':
- case 'k':
result *= 1024;
- if (endp[1] == 'i' && (endp[2] == '\0' ||
- endp[2] == 'B' || endp[2] == 'b'))
+ if (endp[1] == 'i' && endp[2] == 'B')
endp += 2;
case '\0':
break;
default:
- printk("UBI error: incorrect bytes count: \"%s\"\n", str);
+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
+ str);
return -EINVAL;
}
@@ -795,21 +1068,25 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
char *pbuf = &buf[0];
char *tokens[3] = {NULL, NULL, NULL};
+ if (!val)
+ return -EINVAL;
+
if (mtd_devs == UBI_MAX_DEVICES) {
- printk("UBI error: too many parameters, max. is %d\n",
+ printk(KERN_ERR "UBI error: too many parameters, max. is %d\n",
UBI_MAX_DEVICES);
return -EINVAL;
}
len = strnlen(val, MTD_PARAM_LEN_MAX);
if (len == MTD_PARAM_LEN_MAX) {
- printk("UBI error: parameter \"%s\" is too long, max. is %d\n",
- val, MTD_PARAM_LEN_MAX);
+ printk(KERN_ERR "UBI error: parameter \"%s\" is too long, "
+ "max. is %d\n", val, MTD_PARAM_LEN_MAX);
return -EINVAL;
}
if (len == 0) {
- printk("UBI warning: empty 'mtd=' parameter - ignored\n");
+ printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - "
+ "ignored\n");
return 0;
}
@@ -823,7 +1100,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
tokens[i] = strsep(&pbuf, ",");
if (pbuf) {
- printk("UBI error: too many arguments at \"%s\"\n", val);
+ printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n",
+ val);
return -EINVAL;
}
@@ -832,13 +1110,9 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
if (tokens[1])
p->vid_hdr_offs = bytes_str_to_int(tokens[1]);
- if (tokens[2])
- p->data_offs = bytes_str_to_int(tokens[2]);
if (p->vid_hdr_offs < 0)
return p->vid_hdr_offs;
- if (p->data_offs < 0)
- return p->data_offs;
mtd_devs += 1;
return 0;
@@ -846,16 +1120,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num>[,<vid_hdr_offs>,<data_offs>]. "
+ "mtd=<name|num>[,<vid_hdr_offs>].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number or name. "
- "Optional \"vid_hdr_offs\" and \"data_offs\" parameters "
- "specify UBI VID header position and data starting "
- "position to be used by UBI.\n"
- "Example: mtd=content,1984,2048 mtd=4 - attach MTD device"
- "with name content using VID header offset 1984 and data "
- "start 2048, and MTD device number 4 using default "
- "offsets");
+ "MTD devices may be specified by their number or name.\n"
+ "Optional \"vid_hdr_offs\" parameter specifies UBI VID "
+ "header position and data starting position to be used "
+ "by UBI.\n"
+ "Example: mtd=content,1984 mtd=4 - attach MTD device"
+ "with name \"content\" using VID header offset 1984, and "
+ "MTD device number 4 with default VID header offset.");
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index fe4da1e96c5..5ec13dc4705 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -28,6 +28,11 @@
*
* Major and minor numbers are assigned dynamically to both UBI and volume
* character devices.
+ *
+ * Well, there is the third kind of character devices - the UBI control
+ * character device, which allows to manipulate by UBI devices - create and
+ * delete them. In other words, it is used for attaching and detaching MTD
+ * devices.
*/
#include <linux/module.h>
@@ -39,34 +44,6 @@
#include <asm/div64.h>
#include "ubi.h"
-/*
- * Maximum sequence numbers of UBI and volume character device IOCTLs (direct
- * logical eraseblock erase is a debug-only feature).
- */
-#define UBI_CDEV_IOC_MAX_SEQ 2
-#ifndef CONFIG_MTD_UBI_DEBUG_USERSPACE_IO
-#define VOL_CDEV_IOC_MAX_SEQ 1
-#else
-#define VOL_CDEV_IOC_MAX_SEQ 2
-#endif
-
-/**
- * major_to_device - get UBI device object by character device major number.
- * @major: major number
- *
- * This function returns a pointer to the UBI device object.
- */
-static struct ubi_device *major_to_device(int major)
-{
- int i;
-
- for (i = 0; i < ubi_devices_cnt; i++)
- if (ubi_devices[i] && ubi_devices[i]->major == major)
- return ubi_devices[i];
- BUG();
- return NULL;
-}
-
/**
* get_exclusive - get exclusive access to an UBI volume.
* @desc: volume descriptor
@@ -124,9 +101,11 @@ static void revoke_exclusive(struct ubi_volume_desc *desc, int mode)
static int vol_cdev_open(struct inode *inode, struct file *file)
{
struct ubi_volume_desc *desc;
- const struct ubi_device *ubi = major_to_device(imajor(inode));
- int vol_id = iminor(inode) - 1;
- int mode;
+ int vol_id = iminor(inode) - 1, mode, ubi_num;
+
+ ubi_num = ubi_major2num(imajor(inode));
+ if (ubi_num < 0)
+ return ubi_num;
if (file->f_mode & FMODE_WRITE)
mode = UBI_READWRITE;
@@ -135,7 +114,7 @@ static int vol_cdev_open(struct inode *inode, struct file *file)
dbg_msg("open volume %d, mode %d", vol_id, mode);
- desc = ubi_open_volume(ubi->ubi_num, vol_id, mode);
+ desc = ubi_open_volume(ubi_num, vol_id, mode);
if (IS_ERR(desc))
return PTR_ERR(desc);
@@ -249,7 +228,7 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
if (off + len >= vol->usable_leb_size)
len = vol->usable_leb_size - off;
- err = ubi_eba_read_leb(ubi, vol_id, lnum, tbuf, off, len, 0);
+ err = ubi_eba_read_leb(ubi, vol, lnum, tbuf, off, len, 0);
if (err)
break;
@@ -289,7 +268,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
struct ubi_volume_desc *desc = file->private_data;
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- int lnum, off, len, tbuf_size, vol_id = vol->vol_id, err = 0;
+ int lnum, off, len, tbuf_size, err = 0;
size_t count_save = count;
char *tbuf;
uint64_t tmp;
@@ -339,7 +318,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
break;
}
- err = ubi_eba_write_leb(ubi, vol_id, lnum, tbuf, off, len,
+ err = ubi_eba_write_leb(ubi, vol, lnum, tbuf, off, len,
UBI_UNKNOWN);
if (err)
break;
@@ -377,7 +356,8 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
err = ubi_more_update_data(ubi, vol->vol_id, buf, count);
if (err < 0) {
- ubi_err("cannot write %zd bytes of update data", count);
+ ubi_err("cannot write %zd bytes of update data, error %d",
+ count, err);
return err;
}
@@ -483,7 +463,7 @@ static int vol_cdev_ioctl(struct inode *inode, struct file *file,
}
dbg_msg("erase LEB %d:%d", vol->vol_id, lnum);
- err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
break;
@@ -580,9 +560,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- ubi = major_to_device(imajor(inode));
- if (IS_ERR(ubi))
- return PTR_ERR(ubi);
+ ubi = ubi_get_by_major(imajor(inode));
+ if (!ubi)
+ return -ENODEV;
switch (cmd) {
/* Create volume command */
@@ -591,8 +571,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_mkvol_req req;
dbg_msg("create volume");
- err = copy_from_user(&req, argp,
- sizeof(struct ubi_mkvol_req));
+ err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req));
if (err) {
err = -EFAULT;
break;
@@ -604,7 +583,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
req.name[req.name_len] = '\0';
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_create_volume(ubi, &req);
+ mutex_unlock(&ubi->volumes_mutex);
if (err)
break;
@@ -633,10 +614,16 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_remove_volume(desc);
- if (err)
- ubi_close_volume(desc);
+ mutex_unlock(&ubi->volumes_mutex);
+ /*
+ * The volume is deleted (unless an error occurred), and the
+ * 'struct ubi_volume' object will be freed when
+ * 'ubi_close_volume()' will call 'put_device()'.
+ */
+ ubi_close_volume(desc);
break;
}
@@ -648,8 +635,7 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
struct ubi_rsvol_req req;
dbg_msg("re-size volume");
- err = copy_from_user(&req, argp,
- sizeof(struct ubi_rsvol_req));
+ err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req));
if (err) {
err = -EFAULT;
break;
@@ -669,7 +655,9 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
pebs = !!do_div(tmp, desc->vol->usable_leb_size);
pebs += tmp;
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_resize_volume(desc, pebs);
+ mutex_unlock(&ubi->volumes_mutex);
ubi_close_volume(desc);
break;
}
@@ -679,9 +667,93 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file,
break;
}
+ ubi_put_device(ubi);
+ return err;
+}
+
+static int ctrl_cdev_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ void __user *argp = (void __user *)arg;
+
+ if (!capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ switch (cmd) {
+ /* Attach an MTD device command */
+ case UBI_IOCATT:
+ {
+ struct ubi_attach_req req;
+ struct mtd_info *mtd;
+
+ dbg_msg("attach MTD device");
+ err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req));
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ if (req.mtd_num < 0 ||
+ (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) {
+ err = -EINVAL;
+ break;
+ }
+
+ mtd = get_mtd_device(NULL, req.mtd_num);
+ if (IS_ERR(mtd)) {
+ err = PTR_ERR(mtd);
+ break;
+ }
+
+ /*
+ * Note, further request verification is done by
+ * 'ubi_attach_mtd_dev()'.
+ */
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset);
+ mutex_unlock(&ubi_devices_mutex);
+ if (err < 0)
+ put_mtd_device(mtd);
+ else
+ /* @err contains UBI device number */
+ err = put_user(err, (__user int32_t *)argp);
+
+ break;
+ }
+
+ /* Detach an MTD device command */
+ case UBI_IOCDET:
+ {
+ int ubi_num;
+
+ dbg_msg("dettach MTD device");
+ err = get_user(ubi_num, (__user int32_t *)argp);
+ if (err) {
+ err = -EFAULT;
+ break;
+ }
+
+ mutex_lock(&ubi_devices_mutex);
+ err = ubi_detach_mtd_dev(ubi_num, 0);
+ mutex_unlock(&ubi_devices_mutex);
+ break;
+ }
+
+ default:
+ err = -ENOTTY;
+ break;
+ }
+
return err;
}
+/* UBI control character device operations */
+struct file_operations ubi_ctrl_cdev_operations = {
+ .ioctl = ctrl_cdev_ioctl,
+ .owner = THIS_MODULE,
+};
+
/* UBI character device operations */
struct file_operations ubi_cdev_operations = {
.owner = THIS_MODULE,
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 467722eb618..51c40b17f1e 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -39,8 +39,9 @@
#ifdef CONFIG_MTD_UBI_DEBUG_MSG
/* Generic debugging message */
-#define dbg_msg(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG: %s: " fmt "\n", __FUNCTION__, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...) \
+ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+ current->pid, __FUNCTION__, ##__VA_ARGS__)
#define ubi_dbg_dump_stack() dump_stack()
@@ -76,36 +77,28 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA
/* Messages from the eraseblock association unit */
-#define dbg_eba(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG eba: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_eba(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL
/* Messages from the wear-leveling unit */
-#define dbg_wl(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG wl: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_wl(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO
/* Messages from the input/output unit */
-#define dbg_io(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG io: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_io(fmt, ...) ({})
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_MSG_BLD
/* Initialization and build messages */
-#define dbg_bld(fmt, ...) \
- printk(KERN_DEBUG "UBI DBG bld: %s: " fmt "\n", __FUNCTION__, \
- ##__VA_ARGS__)
+#define dbg_bld(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__)
#else
#define dbg_bld(fmt, ...) ({})
#endif
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 880fa369035..85297cde4ac 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -31,7 +31,7 @@
* logical eraseblock it is locked for reading or writing. The per-logical
* eraseblock locking is implemented by means of the lock tree. The lock tree
* is an RB-tree which refers all the currently locked logical eraseblocks. The
- * lock tree elements are &struct ltree_entry objects. They are indexed by
+ * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by
* (@vol_id, @lnum) pairs.
*
* EBA also maintains the global sequence counter which is incremented each
@@ -50,29 +50,6 @@
#define EBA_RESERVED_PEBS 1
/**
- * struct ltree_entry - an entry in the lock tree.
- * @rb: links RB-tree nodes
- * @vol_id: volume ID of the locked logical eraseblock
- * @lnum: locked logical eraseblock number
- * @users: how many tasks are using this logical eraseblock or wait for it
- * @mutex: read/write mutex to implement read/write access serialization to
- * the (@vol_id, @lnum) logical eraseblock
- *
- * When a logical eraseblock is being locked - corresponding &struct ltree_entry
- * object is inserted to the lock tree (@ubi->ltree).
- */
-struct ltree_entry {
- struct rb_node rb;
- int vol_id;
- int lnum;
- int users;
- struct rw_semaphore mutex;
-};
-
-/* Slab cache for lock-tree entries */
-static struct kmem_cache *ltree_slab;
-
-/**
* next_sqnum - get next sequence number.
* @ubi: UBI device description object
*
@@ -112,20 +89,20 @@ static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
* @vol_id: volume ID
* @lnum: logical eraseblock number
*
- * This function returns a pointer to the corresponding &struct ltree_entry
+ * This function returns a pointer to the corresponding &struct ubi_ltree_entry
* object if the logical eraseblock is locked and %NULL if it is not.
* @ubi->ltree_lock has to be locked.
*/
-static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
- int lnum)
+static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
+ int lnum)
{
struct rb_node *p;
p = ubi->ltree.rb_node;
while (p) {
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
- le = rb_entry(p, struct ltree_entry, rb);
+ le = rb_entry(p, struct ubi_ltree_entry, rb);
if (vol_id < le->vol_id)
p = p->rb_left;
@@ -155,12 +132,12 @@ static struct ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
* Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
* failed.
*/
-static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
- int lnum)
+static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
+ int vol_id, int lnum)
{
- struct ltree_entry *le, *le1, *le_free;
+ struct ubi_ltree_entry *le, *le1, *le_free;
- le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
+ le = kmem_cache_alloc(ubi_ltree_slab, GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
@@ -189,7 +166,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
p = &ubi->ltree.rb_node;
while (*p) {
parent = *p;
- le1 = rb_entry(parent, struct ltree_entry, rb);
+ le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
if (vol_id < le1->vol_id)
p = &(*p)->rb_left;
@@ -211,7 +188,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
spin_unlock(&ubi->ltree_lock);
if (le_free)
- kmem_cache_free(ltree_slab, le_free);
+ kmem_cache_free(ubi_ltree_slab, le_free);
return le;
}
@@ -227,7 +204,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
*/
static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@@ -245,7 +222,7 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free = 0;
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@@ -259,7 +236,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_read(&le->mutex);
if (free)
- kmem_cache_free(ltree_slab, le);
+ kmem_cache_free(ubi_ltree_slab, le);
}
/**
@@ -273,7 +250,7 @@ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
*/
static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
{
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
le = ltree_add_entry(ubi, vol_id, lnum);
if (IS_ERR(le))
@@ -283,6 +260,44 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
}
/**
+ * leb_write_lock - lock logical eraseblock for writing.
+ * @ubi: UBI device description object
+ * @vol_id: volume ID
+ * @lnum: logical eraseblock number
+ *
+ * This function locks a logical eraseblock for writing if there is no
+ * contention and does nothing if there is contention. Returns %0 in case of
+ * success, %1 in case of contention, and and a negative error code in case of
+ * failure.
+ */
+static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
+{
+ int free;
+ struct ubi_ltree_entry *le;
+
+ le = ltree_add_entry(ubi, vol_id, lnum);
+ if (IS_ERR(le))
+ return PTR_ERR(le);
+ if (down_write_trylock(&le->mutex))
+ return 0;
+
+ /* Contention, cancel */
+ spin_lock(&ubi->ltree_lock);
+ le->users -= 1;
+ ubi_assert(le->users >= 0);
+ if (le->users == 0) {
+ rb_erase(&le->rb, &ubi->ltree);
+ free = 1;
+ } else
+ free = 0;
+ spin_unlock(&ubi->ltree_lock);
+ if (free)
+ kmem_cache_free(ubi_ltree_slab, le);
+
+ return 1;
+}
+
+/**
* leb_write_unlock - unlock logical eraseblock.
* @ubi: UBI device description object
* @vol_id: volume ID
@@ -291,7 +306,7 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
{
int free;
- struct ltree_entry *le;
+ struct ubi_ltree_entry *le;
spin_lock(&ubi->ltree_lock);
le = ltree_lookup(ubi, vol_id, lnum);
@@ -306,23 +321,26 @@ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
up_write(&le->mutex);
if (free)
- kmem_cache_free(ltree_slab, le);
+ kmem_cache_free(ubi_ltree_slab, le);
}
/**
* ubi_eba_unmap_leb - un-map logical eraseblock.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
*
* This function un-maps logical eraseblock @lnum and schedules corresponding
* physical eraseblock for erasure. Returns zero in case of success and a
* negative error code in case of failure.
*/
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum)
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum)
{
- int idx = vol_id2idx(ubi, vol_id), err, pnum;
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, vol_id = vol->vol_id;
+
+ ubi_assert(ubi->ref_count > 0);
+ ubi_assert(vol->ref_count > 0);
if (ubi->ro_mode)
return -EROFS;
@@ -349,7 +367,7 @@ out_unlock:
/**
* ubi_eba_read_leb - read data.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: buffer to store the read data
* @offset: offset from where to read
@@ -365,14 +383,16 @@ out_unlock:
* returned for any volume type if an ECC error was detected by the MTD device
* driver. Other negative error cored may be returned in case of other errors.
*/
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
- int offset, int len, int check)
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check)
{
- int err, pnum, scrub = 0, idx = vol_id2idx(ubi, vol_id);
+ int err, pnum, scrub = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
- struct ubi_volume *vol = ubi->volumes[idx];
uint32_t uninitialized_var(crc);
+ ubi_assert(ubi->ref_count > 0);
+ ubi_assert(vol->ref_count > 0);
+
err = leb_read_lock(ubi, vol_id, lnum);
if (err)
return err;
@@ -578,7 +598,7 @@ write_error:
/**
* ubi_eba_write_leb - write data to dynamic volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: the data to write
* @offset: offset within the logical eraseblock where to write
@@ -586,17 +606,19 @@ write_error:
* @dtype: data type
*
* This function writes data to logical eraseblock @lnum of a dynamic volume
- * @vol_id. Returns zero in case of success and a negative error code in case
+ * @vol. Returns zero in case of success and a negative error code in case
* of failure. In case of error, it is possible that something was still
* written to the flash media, but may be some garbage.
*/
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype)
{
- int idx = vol_id2idx(ubi, vol_id), err, pnum, tries = 0;
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
+ ubi_assert(ubi->ref_count > 0);
+ ubi_assert(vol->ref_count > 0);
+
if (ubi->ro_mode)
return -EROFS;
@@ -613,7 +635,8 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
if (err) {
ubi_warn("failed to write data to PEB %d", pnum);
if (err == -EIO && ubi->bad_allowed)
- err = recover_peb(ubi, pnum, vol_id, lnum, buf, offset, len);
+ err = recover_peb(ubi, pnum, vol_id, lnum, buf,
+ offset, len);
if (err)
ubi_ro_mode(ubi);
}
@@ -656,11 +679,14 @@ retry:
goto write_error;
}
- err = ubi_io_write_data(ubi, buf, pnum, offset, len);
- if (err) {
- ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, "
- "PEB %d", len, offset, vol_id, lnum, pnum);
- goto write_error;
+ if (len) {
+ err = ubi_io_write_data(ubi, buf, pnum, offset, len);
+ if (err) {
+ ubi_warn("failed to write %d bytes at offset %d of "
+ "LEB %d:%d, PEB %d", len, offset, vol_id,
+ lnum, pnum);
+ goto write_error;
+ }
}
vol->eba_tbl[lnum] = pnum;
@@ -698,7 +724,7 @@ write_error:
/**
* ubi_eba_write_leb_st - write data to static volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@@ -706,7 +732,7 @@ write_error:
* @used_ebs: how many logical eraseblocks will this volume contain
*
* This function writes data to logical eraseblock @lnum of static volume
- * @vol_id. The @used_ebs argument should contain total number of logical
+ * @vol. The @used_ebs argument should contain total number of logical
* eraseblock in this static volume.
*
* When writing to the last logical eraseblock, the @len argument doesn't have
@@ -718,15 +744,17 @@ write_error:
* volumes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype, int used_ebs)
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
+ int used_ebs)
{
- int err, pnum, tries = 0, data_size = len;
- int idx = vol_id2idx(ubi, vol_id);
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
+ ubi_assert(ubi->ref_count > 0);
+ ubi_assert(vol->ref_count > 0);
+
if (ubi->ro_mode)
return -EROFS;
@@ -819,7 +847,7 @@ write_error:
/*
* ubi_eba_atomic_leb_change - change logical eraseblock atomically.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
* @lnum: logical eraseblock number
* @buf: data to write
* @len: how many bytes to write
@@ -834,14 +862,16 @@ write_error:
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype)
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype)
{
- int err, pnum, tries = 0, idx = vol_id2idx(ubi, vol_id);
- struct ubi_volume *vol = ubi->volumes[idx];
+ int err, pnum, tries = 0, vol_id = vol->vol_id;
struct ubi_vid_hdr *vid_hdr;
uint32_t crc;
+ ubi_assert(ubi->ref_count > 0);
+ ubi_assert(vol->ref_count > 0);
+
if (ubi->ro_mode)
return -EROFS;
@@ -928,20 +958,6 @@ write_error:
}
/**
- * ltree_entry_ctor - lock tree entries slab cache constructor.
- * @obj: the lock-tree entry to construct
- * @cache: the lock tree entry slab cache
- * @flags: constructor flags
- */
-static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
-{
- struct ltree_entry *le = obj;
-
- le->users = 0;
- init_rwsem(&le->mutex);
-}
-
-/**
* ubi_eba_copy_leb - copy logical eraseblock.
* @ubi: UBI device description object
* @from: physical eraseblock number from where to copy
@@ -950,14 +966,16 @@ static void ltree_entry_ctor(struct kmem_cache *cache, void *obj)
*
* This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this
- * function. Returns zero in case of success, %UBI_IO_BITFLIPS if the operation
- * was canceled because bit-flips were detected at the target PEB, and a
- * negative error code in case of failure.
+ * function. Returns:
+ * o %0 in case of success;
+ * o %1 if the operation was canceled and should be tried later (e.g.,
+ * because a bit-flip was detected at the target PEB);
+ * o %2 if the volume is being deleted and this LEB should not be moved.
*/
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr)
{
- int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
+ int err, vol_id, lnum, data_size, aldata_size, idx;
struct ubi_volume *vol;
uint32_t crc;
@@ -973,51 +991,67 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
- /*
- * We do not want anybody to write to this logical eraseblock while we
- * are moving it, so we lock it.
- */
- err = leb_write_lock(ubi, vol_id, lnum);
- if (err)
- return err;
-
- mutex_lock(&ubi->buf_mutex);
-
- /*
- * But the logical eraseblock might have been put by this time.
- * Cancel if it is true.
- */
idx = vol_id2idx(ubi, vol_id);
-
+ spin_lock(&ubi->volumes_lock);
/*
- * We may race with volume deletion/re-size, so we have to hold
- * @ubi->volumes_lock.
+ * Note, we may race with volume deletion, which means that the volume
+ * this logical eraseblock belongs to might be being deleted. Since the
+ * volume deletion unmaps all the volume's logical eraseblocks, it will
+ * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/
- spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[idx];
if (!vol) {
- dbg_eba("volume %d was removed meanwhile", vol_id);
+ /* No need to do further work, cancel */
+ dbg_eba("volume %d is being removed, cancel", vol_id);
spin_unlock(&ubi->volumes_lock);
- goto out_unlock;
+ return 2;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+ /*
+ * We do not want anybody to write to this logical eraseblock while we
+ * are moving it, so lock it.
+ *
+ * Note, we are using non-waiting locking here, because we cannot sleep
+ * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
+ * unmapping the LEB which is mapped to the PEB we are going to move
+ * (@from). This task locks the LEB and goes sleep in the
+ * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
+ * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
+ * LEB is already locked, we just do not move it and return %1.
+ */
+ err = leb_write_trylock(ubi, vol_id, lnum);
+ if (err) {
+ dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
+ return err;
}
- pnum = vol->eba_tbl[lnum];
- if (pnum != from) {
+ /*
+ * The LEB might have been put meanwhile, and the task which put it is
+ * probably waiting on @ubi->move_mutex. No need to continue the work,
+ * cancel it.
+ */
+ if (vol->eba_tbl[lnum] != from) {
dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to "
- "PEB %d, cancel", vol_id, lnum, from, pnum);
- spin_unlock(&ubi->volumes_lock);
- goto out_unlock;
+ "PEB %d, cancel", vol_id, lnum, from,
+ vol->eba_tbl[lnum]);
+ err = 1;
+ goto out_unlock_leb;
}
- spin_unlock(&ubi->volumes_lock);
-
- /* OK, now the LEB is locked and we can safely start moving it */
+ /*
+ * OK, now the LEB is locked and we can safely start moving iy. Since
+ * this function utilizes thie @ubi->peb1_buf buffer which is shared
+ * with some other functions, so lock the buffer by taking the
+ * @ubi->buf_mutex.
+ */
+ mutex_lock(&ubi->buf_mutex);
dbg_eba("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
- goto out_unlock;
+ goto out_unlock_buf;
}
/*
@@ -1053,7 +1087,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err)
- goto out_unlock;
+ goto out_unlock_buf;
cond_resched();
@@ -1062,13 +1096,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err) {
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read VID header back from PEB %d", to);
- goto out_unlock;
+ else
+ err = 1;
+ goto out_unlock_buf;
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err)
- goto out_unlock;
+ goto out_unlock_buf;
cond_resched();
@@ -1082,7 +1118,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read data back from PEB %d",
to);
- goto out_unlock;
+ else
+ err = 1;
+ goto out_unlock_buf;
}
cond_resched();
@@ -1090,15 +1128,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
ubi_warn("read data back from PEB %d - it is different",
to);
- goto out_unlock;
+ goto out_unlock_buf;
}
}
ubi_assert(vol->eba_tbl[lnum] == from);
vol->eba_tbl[lnum] = to;
-out_unlock:
+out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
+out_unlock_leb:
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
@@ -1125,14 +1164,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
- if (ubi_devices_cnt == 0) {
- ltree_slab = kmem_cache_create("ubi_ltree_slab",
- sizeof(struct ltree_entry), 0,
- 0, &ltree_entry_ctor);
- if (!ltree_slab)
- return -ENOMEM;
- }
-
ubi->global_sqnum = si->max_sqnum + 1;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
@@ -1168,6 +1199,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
}
}
+ if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
+ ubi_err("no enough physical eraseblocks (%d, need %d)",
+ ubi->avail_pebs, EBA_RESERVED_PEBS);
+ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= EBA_RESERVED_PEBS;
+ ubi->rsvd_pebs += EBA_RESERVED_PEBS;
+
if (ubi->bad_allowed) {
ubi_calculate_reserved(ubi);
@@ -1184,15 +1224,6 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
- if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
- ubi_err("no enough physical eraseblocks (%d, need %d)",
- ubi->avail_pebs, EBA_RESERVED_PEBS);
- err = -ENOSPC;
- goto out_free;
- }
- ubi->avail_pebs -= EBA_RESERVED_PEBS;
- ubi->rsvd_pebs += EBA_RESERVED_PEBS;
-
dbg_eba("EBA unit is initialized");
return 0;
@@ -1202,8 +1233,6 @@ out_free:
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
- if (ubi_devices_cnt == 0)
- kmem_cache_destroy(ltree_slab);
return err;
}
@@ -1222,6 +1251,4 @@ void ubi_eba_close(const struct ubi_device *ubi)
continue;
kfree(ubi->volumes[i]->eba_tbl);
}
- if (ubi_devices_cnt == 1)
- kmem_cache_destroy(ltree_slab);
}
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c
index 41ff74c60e1..d397219238d 100644
--- a/drivers/mtd/ubi/gluebi.c
+++ b/drivers/mtd/ubi/gluebi.c
@@ -129,8 +129,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len,
if (to_read > total_read)
to_read = total_read;
- err = ubi_eba_read_leb(ubi, vol->vol_id, lnum, buf, offs,
- to_read, 0);
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0);
if (err)
break;
@@ -187,8 +186,8 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len,
if (to_write > total_written)
to_write = total_written;
- err = ubi_eba_write_leb(ubi, vol->vol_id, lnum, buf, offs,
- to_write, UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write,
+ UBI_UNKNOWN);
if (err)
break;
@@ -237,7 +236,7 @@ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr)
return -EROFS;
for (i = 0; i < count; i++) {
- err = ubi_eba_unmap_leb(ubi, vol->vol_id, lnum + i);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum + i);
if (err)
goto out_err;
}
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 7c304eec78b..db3efdef243 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -173,6 +173,16 @@ retry:
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
ubi_dbg_dump_stack();
+
+ /*
+ * The driver should never return -EBADMSG if it failed to read
+ * all the requested data. But some buggy drivers might do
+ * this, so we change it to -EIO.
+ */
+ if (read != len && err == -EBADMSG) {
+ ubi_assert(0);
+ err = -EIO;
+ }
} else {
ubi_assert(len == read);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 03c774f4154..146957c3380 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -30,23 +30,27 @@
* @ubi_num: UBI device number
* @di: the information is stored here
*
- * This function returns %0 in case of success and a %-ENODEV if there is no
- * such UBI device.
+ * This function returns %0 in case of success, %-EINVAL if the UBI device
+ * number is invalid, and %-ENODEV if there is no such UBI device.
*/
int ubi_get_device_info(int ubi_num, struct ubi_device_info *di)
{
- const struct ubi_device *ubi;
+ struct ubi_device *ubi;
+
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return -EINVAL;
- if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES ||
- !ubi_devices[ubi_num])
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
return -ENODEV;
- ubi = ubi_devices[ubi_num];
di->ubi_num = ubi->ubi_num;
di->leb_size = ubi->leb_size;
di->min_io_size = ubi->min_io_size;
di->ro_mode = ubi->ro_mode;
- di->cdev = MKDEV(ubi->major, 0);
+ di->cdev = ubi->cdev.dev;
+
+ ubi_put_device(ubi);
return 0;
}
EXPORT_SYMBOL_GPL(ubi_get_device_info);
@@ -73,7 +77,7 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc,
vi->usable_leb_size = vol->usable_leb_size;
vi->name_len = vol->name_len;
vi->name = vol->name;
- vi->cdev = MKDEV(ubi->major, vi->vol_id + 1);
+ vi->cdev = vol->cdev.dev;
}
EXPORT_SYMBOL_GPL(ubi_get_volume_info);
@@ -104,37 +108,39 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
- err = -ENODEV;
- if (ubi_num < 0)
- return ERR_PTR(err);
-
- ubi = ubi_devices[ubi_num];
-
- if (!try_module_get(THIS_MODULE))
- return ERR_PTR(err);
-
- if (ubi_num >= UBI_MAX_DEVICES || !ubi)
- goto out_put;
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
- err = -EINVAL;
- if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
- goto out_put;
if (mode != UBI_READONLY && mode != UBI_READWRITE &&
mode != UBI_EXCLUSIVE)
- goto out_put;
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * First of all, we have to get the UBI device to prevent its removal.
+ */
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
+
+ if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+ err = -EINVAL;
+ goto out_put_ubi;
+ }
desc = kmalloc(sizeof(struct ubi_volume_desc), GFP_KERNEL);
if (!desc) {
err = -ENOMEM;
- goto out_put;
+ goto out_put_ubi;
}
+ err = -ENODEV;
+ if (!try_module_get(THIS_MODULE))
+ goto out_free;
+
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol) {
- err = -ENODEV;
+ if (!vol)
goto out_unlock;
- }
err = -EBUSY;
switch (mode) {
@@ -156,21 +162,19 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
vol->exclusive = 1;
break;
}
+ get_device(&vol->dev);
+ vol->ref_count += 1;
spin_unlock(&ubi->volumes_lock);
desc->vol = vol;
desc->mode = mode;
- /*
- * To prevent simultaneous checks of the same volume we use @vtbl_mutex,
- * although it is not the purpose it was introduced for.
- */
- mutex_lock(&ubi->vtbl_mutex);
+ mutex_lock(&ubi->ckvol_mutex);
if (!vol->checked) {
/* This is the first open - check the volume */
err = ubi_check_volume(ubi, vol_id);
if (err < 0) {
- mutex_unlock(&ubi->vtbl_mutex);
+ mutex_unlock(&ubi->ckvol_mutex);
ubi_close_volume(desc);
return ERR_PTR(err);
}
@@ -181,14 +185,17 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
}
vol->checked = 1;
}
- mutex_unlock(&ubi->vtbl_mutex);
+ mutex_unlock(&ubi->ckvol_mutex);
+
return desc;
out_unlock:
spin_unlock(&ubi->volumes_lock);
- kfree(desc);
-out_put:
module_put(THIS_MODULE);
+out_free:
+ kfree(desc);
+out_put_ubi:
+ ubi_put_device(ubi);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(ubi_open_volume);
@@ -205,8 +212,8 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
int mode)
{
int i, vol_id = -1, len;
- struct ubi_volume_desc *ret;
struct ubi_device *ubi;
+ struct ubi_volume_desc *ret;
dbg_msg("open volume %s, mode %d", name, mode);
@@ -217,14 +224,12 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
if (len > UBI_VOL_NAME_MAX)
return ERR_PTR(-EINVAL);
- ret = ERR_PTR(-ENODEV);
- if (!try_module_get(THIS_MODULE))
- return ret;
-
- if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi_devices[ubi_num])
- goto out_put;
+ if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+ return ERR_PTR(-EINVAL);
- ubi = ubi_devices[ubi_num];
+ ubi = ubi_get_device(ubi_num);
+ if (!ubi)
+ return ERR_PTR(-ENODEV);
spin_lock(&ubi->volumes_lock);
/* Walk all volumes of this UBI device */
@@ -238,13 +243,16 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
}
spin_unlock(&ubi->volumes_lock);
- if (vol_id < 0)
- goto out_put;
-
- ret = ubi_open_volume(ubi_num, vol_id, mode);
+ if (vol_id >= 0)
+ ret = ubi_open_volume(ubi_num, vol_id, mode);
+ else
+ ret = ERR_PTR(-ENODEV);
-out_put:
- module_put(THIS_MODULE);
+ /*
+ * We should put the UBI device even in case of success, because
+ * 'ubi_open_volume()' took a reference as well.
+ */
+ ubi_put_device(ubi);
return ret;
}
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
@@ -256,10 +264,11 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
void ubi_close_volume(struct ubi_volume_desc *desc)
{
struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
- spin_lock(&vol->ubi->volumes_lock);
+ spin_lock(&ubi->volumes_lock);
switch (desc->mode) {
case UBI_READONLY:
vol->readers -= 1;
@@ -270,9 +279,12 @@ void ubi_close_volume(struct ubi_volume_desc *desc)
case UBI_EXCLUSIVE:
vol->exclusive = 0;
}
- spin_unlock(&vol->ubi->volumes_lock);
+ vol->ref_count -= 1;
+ spin_unlock(&ubi->volumes_lock);
kfree(desc);
+ put_device(&vol->dev);
+ ubi_put_device(ubi);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(ubi_close_volume);
@@ -332,7 +344,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
if (len == 0)
return 0;
- err = ubi_eba_read_leb(ubi, vol_id, lnum, buf, offset, len, check);
+ err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
if (err && err == -EBADMSG && vol->vol_type == UBI_STATIC_VOLUME) {
ubi_warn("mark volume %d as corrupted", vol_id);
vol->corrupted = 1;
@@ -399,7 +411,7 @@ int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
- return ubi_eba_write_leb(ubi, vol_id, lnum, buf, offset, len, dtype);
+ return ubi_eba_write_leb(ubi, vol, lnum, buf, offset, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_write);
@@ -448,7 +460,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
if (len == 0)
return 0;
- return ubi_eba_atomic_leb_change(ubi, vol_id, lnum, buf, len, dtype);
+ return ubi_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype);
}
EXPORT_SYMBOL_GPL(ubi_leb_change);
@@ -481,7 +493,7 @@ int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
- err = ubi_eba_unmap_leb(ubi, vol_id, lnum);
+ err = ubi_eba_unmap_leb(ubi, vol, lnum);
if (err)
return err;
@@ -542,11 +554,56 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum)
if (vol->upd_marker)
return -EBADF;
- return ubi_eba_unmap_leb(ubi, vol_id, lnum);
+ return ubi_eba_unmap_leb(ubi, vol, lnum);
}
EXPORT_SYMBOL_GPL(ubi_leb_unmap);
/**
+ * ubi_leb_map - map logical erasblock to a physical eraseblock.
+ * @desc: volume descriptor
+ * @lnum: logical eraseblock number
+ * @dtype: expected data type
+ *
+ * This function maps an un-mapped logical eraseblock @lnum to a physical
+ * eraseblock. This means, that after a successfull invocation of this
+ * function the logical eraseblock @lnum will be empty (contain only %0xFF
+ * bytes) and be mapped to a physical eraseblock, even if an unclean reboot
+ * happens.
+ *
+ * This function returns zero in case of success, %-EBADF if the volume is
+ * damaged because of an interrupted update, %-EBADMSG if the logical
+ * eraseblock is already mapped, and other negative error codes in case of
+ * other failures.
+ */
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype)
+{
+ struct ubi_volume *vol = desc->vol;
+ struct ubi_device *ubi = vol->ubi;
+ int vol_id = vol->vol_id;
+
+ dbg_msg("unmap LEB %d:%d", vol_id, lnum);
+
+ if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+ return -EROFS;
+
+ if (lnum < 0 || lnum >= vol->reserved_pebs)
+ return -EINVAL;
+
+ if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM &&
+ dtype != UBI_UNKNOWN)
+ return -EINVAL;
+
+ if (vol->upd_marker)
+ return -EBADF;
+
+ if (vol->eba_tbl[lnum] >= 0)
+ return -EBADMSG;
+
+ return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0, dtype);
+}
+EXPORT_SYMBOL_GPL(ubi_leb_map);
+
+/**
* ubi_is_mapped - check if logical eraseblock is mapped.
* @desc: volume descriptor
* @lnum: logical eraseblock number
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index 9e2338c8e2c..93e05281201 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -79,7 +79,7 @@ int ubi_check_volume(struct ubi_device *ubi, int vol_id)
else
size = vol->usable_leb_size;
- err = ubi_eba_read_leb(ubi, vol_id, i, buf, 0, size, 1);
+ err = ubi_eba_read_leb(ubi, vol, i, buf, 0, size, 1);
if (err) {
if (err == -EBADMSG)
err = 1;
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index c7b0afc9d28..c57e8eff986 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -769,7 +769,7 @@ struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
*/
static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum)
{
- long long ec;
+ long long uninitialized_var(ec);
int err, bitflips = 0, vol_id, ec_corr = 0;
dbg_bld("scan PEB %d", pnum);
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 5e941a63303..ef22f922f58 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -94,8 +94,43 @@ enum {
UBI_IO_BITFLIPS
};
-extern int ubi_devices_cnt;
-extern struct ubi_device *ubi_devices[];
+/**
+ * struct ubi_wl_entry - wear-leveling entry.
+ * @rb: link in the corresponding RB-tree
+ * @ec: erase counter
+ * @pnum: physical eraseblock number
+ *
+ * This data structure is used in the WL unit. Each physical eraseblock has a
+ * corresponding &struct wl_entry object which may be kept in different
+ * RB-trees. See WL unit for details.
+ */
+struct ubi_wl_entry {
+ struct rb_node rb;
+ int ec;
+ int pnum;
+};
+
+/**
+ * struct ubi_ltree_entry - an entry in the lock tree.
+ * @rb: links RB-tree nodes
+ * @vol_id: volume ID of the locked logical eraseblock
+ * @lnum: locked logical eraseblock number
+ * @users: how many tasks are using this logical eraseblock or wait for it
+ * @mutex: read/write mutex to implement read/write access serialization to
+ * the (@vol_id, @lnum) logical eraseblock
+ *
+ * This data structure is used in the EBA unit to implement per-LEB locking.
+ * When a logical eraseblock is being locked - corresponding
+ * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree).
+ * See EBA unit for details.
+ */
+struct ubi_ltree_entry {
+ struct rb_node rb;
+ int vol_id;
+ int lnum;
+ int users;
+ struct rw_semaphore mutex;
+};
struct ubi_volume_desc;
@@ -105,10 +140,10 @@ struct ubi_volume_desc;
* @cdev: character device object to create character device
* @ubi: reference to the UBI device description object
* @vol_id: volume ID
+ * @ref_count: volume reference count
* @readers: number of users holding this volume in read-only mode
* @writers: number of users holding this volume in read-write mode
* @exclusive: whether somebody holds this volume in exclusive mode
- * @removed: if the volume was removed
* @checked: if this static volume was checked
*
* @reserved_pebs: how many physical eraseblocks are reserved for this volume
@@ -121,7 +156,7 @@ struct ubi_volume_desc;
* @corrupted: non-zero if the volume is corrupted (static volumes only)
* @alignment: volume alignment
* @data_pad: how many bytes are not used at the end of physical eraseblocks to
- * satisfy the requested alignment
+ * satisfy the requested alignment
* @name_len: volume name length
* @name: volume name
*
@@ -150,10 +185,10 @@ struct ubi_volume {
struct cdev cdev;
struct ubi_device *ubi;
int vol_id;
+ int ref_count;
int readers;
int writers;
int exclusive;
- int removed;
int checked;
int reserved_pebs;
@@ -200,17 +235,17 @@ struct ubi_wl_entry;
/**
* struct ubi_device - UBI device description structure
- * @dev: class device object to use the the Linux device model
+ * @dev: UBI device object to use the the Linux device model
* @cdev: character device object to create character device
* @ubi_num: UBI device number
* @ubi_name: UBI device name
- * @major: character device major number
* @vol_count: number of volumes in this UBI device
* @volumes: volumes of this UBI device
* @volumes_lock: protects @volumes, @rsvd_pebs, @avail_pebs, beb_rsvd_pebs,
- * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count, @vol->readers,
- * @vol->writers, @vol->exclusive, @vol->removed, @vol->mapping and
- * @vol->eba_tbl.
+ * @beb_rsvd_level, @bad_peb_count, @good_peb_count, @vol_count,
+ * @vol->readers, @vol->writers, @vol->exclusive,
+ * @vol->ref_count, @vol->mapping and @vol->eba_tbl.
+ * @ref_count: count of references on the UBI device
*
* @rsvd_pebs: count of reserved physical eraseblocks
* @avail_pebs: count of available physical eraseblocks
@@ -221,7 +256,8 @@ struct ubi_wl_entry;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
- * @vtbl_mutex: protects on-flash volume table
+ * @volumes_mutex: protects on-flash volume table and serializes volume
+ * changes, like creation, deletion, update, resize
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
@@ -238,15 +274,15 @@ struct ubi_wl_entry;
* @prot.pnum: protection tree indexed by physical eraseblock numbers
* @prot.aec: protection tree indexed by absolute erase counter value
* @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from,
- * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
- * fields
+ * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works
+ * fields
+ * @move_mutex: serializes eraseblock moves
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
- * physical eraseblock
+ * physical eraseblock
* @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to
- * @move_from_put: if the "from" PEB was put
* @move_to_put: if the "to" PEB was put
* @works: list of pending works
* @works_count: count of pending works
@@ -273,13 +309,13 @@ struct ubi_wl_entry;
* @hdrs_min_io_size
* @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
- * not
+ * not
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: proptects @peb_buf1 and @peb_buf2
- * @dbg_peb_buf: buffer of PEB size used for debugging
+ * @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: proptects @dbg_peb_buf
*/
struct ubi_device {
@@ -287,10 +323,10 @@ struct ubi_device {
struct device dev;
int ubi_num;
char ubi_name[sizeof(UBI_NAME_STR)+5];
- int major;
int vol_count;
struct ubi_volume *volumes[UBI_MAX_VOLUMES+UBI_INT_VOL_COUNT];
spinlock_t volumes_lock;
+ int ref_count;
int rsvd_pebs;
int avail_pebs;
@@ -300,7 +336,7 @@ struct ubi_device {
int vtbl_slots;
int vtbl_size;
struct ubi_vtbl_record *vtbl;
- struct mutex vtbl_mutex;
+ struct mutex volumes_mutex;
int max_ec;
int mean_ec;
@@ -320,12 +356,13 @@ struct ubi_device {
struct rb_root aec;
} prot;
spinlock_t wl_lock;
+ struct mutex move_mutex;
+ struct rw_semaphore work_sem;
int wl_scheduled;
struct ubi_wl_entry **lookuptbl;
unsigned long long abs_ec;
struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to;
- int move_from_put;
int move_to_put;
struct list_head works;
int works_count;
@@ -355,15 +392,20 @@ struct ubi_device {
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
+ struct mutex ckvol_mutex;
#ifdef CONFIG_MTD_UBI_DEBUG
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
};
+extern struct kmem_cache *ubi_ltree_slab;
+extern struct kmem_cache *ubi_wl_entry_slab;
+extern struct file_operations ubi_ctrl_cdev_operations;
extern struct file_operations ubi_cdev_operations;
extern struct file_operations ubi_vol_cdev_operations;
extern struct class *ubi_class;
+extern struct mutex ubi_devices_mutex;
/* vtbl.c */
int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
@@ -374,8 +416,8 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si);
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req);
int ubi_remove_volume(struct ubi_volume_desc *desc);
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs);
-int ubi_add_volume(struct ubi_device *ubi, int vol_id);
-void ubi_free_volume(struct ubi_device *ubi, int vol_id);
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol);
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol);
/* upd.c */
int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes);
@@ -399,16 +441,17 @@ void ubi_gluebi_updated(struct ubi_volume *vol);
#endif
/* eba.c */
-int ubi_eba_unmap_leb(struct ubi_device *ubi, int vol_id, int lnum);
-int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
- int offset, int len, int check);
-int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
+int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum);
+int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ void *buf, int offset, int len, int check);
+int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
const void *buf, int offset, int len, int dtype);
-int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype,
+int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype,
int used_ebs);
-int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
- const void *buf, int len, int dtype);
+int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
+ int lnum, const void *buf, int len, int dtype);
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
@@ -421,6 +464,7 @@ int ubi_wl_flush(struct ubi_device *ubi);
int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si);
void ubi_wl_close(struct ubi_device *ubi);
+int ubi_thread(void *u);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
@@ -439,6 +483,14 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
+/* build.c */
+int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
+int ubi_detach_mtd_dev(int ubi_num, int anyway);
+struct ubi_device *ubi_get_device(int ubi_num);
+void ubi_put_device(struct ubi_device *ubi);
+struct ubi_device *ubi_get_by_major(int major);
+int ubi_major2num(int major);
+
/*
* ubi_rb_for_each_entry - walk an RB-tree.
* @rb: a pointer to type 'struct rb_node' to to use as a loop counter
@@ -523,8 +575,10 @@ static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
*/
static inline void ubi_ro_mode(struct ubi_device *ubi)
{
- ubi->ro_mode = 1;
- ubi_warn("switch to read-only mode");
+ if (!ubi->ro_mode) {
+ ubi->ro_mode = 1;
+ ubi_warn("switch to read-only mode");
+ }
}
/**
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0efc586a832..e32b04d2e04 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -67,7 +67,9 @@ static int set_update_marker(struct ubi_device *ubi, int vol_id)
memcpy(&vtbl_rec, &ubi->vtbl[vol_id], sizeof(struct ubi_vtbl_record));
vtbl_rec.upd_marker = 1;
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 1;
return err;
}
@@ -106,7 +108,9 @@ static int clear_update_marker(struct ubi_device *ubi, int vol_id, long long byt
vol->last_eb_bytes = vol->usable_leb_size;
}
+ mutex_lock(&ubi->volumes_mutex);
err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
+ mutex_unlock(&ubi->volumes_mutex);
vol->upd_marker = 0;
return err;
}
@@ -136,7 +140,7 @@ int ubi_start_update(struct ubi_device *ubi, int vol_id, long long bytes)
/* Before updating - wipe out the volume */
for (i = 0; i < vol->reserved_pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, i);
+ err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
return err;
}
@@ -209,8 +213,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
if (len != l)
dbg_msg("skip last %d bytes (0xFF)", len - l);
- err = ubi_eba_write_leb(ubi, vol_id, lnum, buf, 0, l,
- UBI_UNKNOWN);
+ err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, l, UBI_UNKNOWN);
} else {
/*
* When writing static volume, and this is the last logical
@@ -222,7 +225,7 @@ static int write_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
* contain zeros, not random trash.
*/
memset(buf + len, 0, vol->usable_leb_size - len);
- err = ubi_eba_write_leb_st(ubi, vol_id, lnum, buf, len,
+ err = ubi_eba_write_leb_st(ubi, vol, lnum, buf, len,
UBI_UNKNOWN, used_ebs);
}
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 88629a320c2..177227e1f80 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -63,21 +63,30 @@ static struct device_attribute attr_vol_upd_marker =
* B. process 2 removes volume Y;
* C. process 1 starts reading the /<sysfs>/class/ubi/ubiX_Y/reserved_ebs file;
*
- * What we want to do in a situation like that is to return error when the file
- * is read. This is done by means of the 'removed' flag and the 'vol_lock' of
- * the UBI volume description object.
+ * In this situation, this function will return %-ENODEV because it will find
+ * out that the volume was removed from the @ubi->volumes array.
*/
static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int ret;
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
+ struct ubi_device *ubi;
- spin_lock(&vol->ubi->volumes_lock);
- if (vol->removed) {
- spin_unlock(&vol->ubi->volumes_lock);
+ ubi = ubi_get_device(vol->ubi->ubi_num);
+ if (!ubi)
+ return -ENODEV;
+
+ spin_lock(&ubi->volumes_lock);
+ if (!ubi->volumes[vol->vol_id]) {
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
return -ENODEV;
}
+ /* Take a reference to prevent volume removal */
+ vol->ref_count += 1;
+ spin_unlock(&ubi->volumes_lock);
+
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &attr_vol_type) {
@@ -94,15 +103,22 @@ static ssize_t vol_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
- else if (attr == &attr_vol_usable_eb_size) {
+ else if (attr == &attr_vol_usable_eb_size)
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
- } else if (attr == &attr_vol_data_bytes)
+ else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
- BUG();
- spin_unlock(&vol->ubi->volumes_lock);
+ /* This must be a bug */
+ ret = -EINVAL;
+
+ /* We've done the operation, drop volume and UBI device references */
+ spin_lock(&ubi->volumes_lock);
+ vol->ref_count -= 1;
+ ubi_assert(vol->ref_count >= 0);
+ spin_unlock(&ubi->volumes_lock);
+ ubi_put_device(ubi);
return ret;
}
@@ -110,7 +126,7 @@ static ssize_t vol_attribute_show(struct device *dev,
static void vol_release(struct device *dev)
{
struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev);
- ubi_assert(vol->removed);
+
kfree(vol);
}
@@ -152,9 +168,7 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
if (err)
return err;
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
- if (err)
- return err;
- return 0;
+ return err;
}
/**
@@ -180,16 +194,18 @@ static void volume_sysfs_close(struct ubi_volume *vol)
* @req: volume creation request
*
* This function creates volume described by @req. If @req->vol_id id
- * %UBI_VOL_NUM_AUTO, this function automatically assigne ID to the new volume
+ * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume
* and saves it in @req->vol_id. Returns zero in case of success and a negative
- * error code in case of failure.
+ * error code in case of failure. Note, the caller has to have the
+ * @ubi->volumes_mutex locked.
*/
int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
{
- int i, err, vol_id = req->vol_id;
+ int i, err, vol_id = req->vol_id, dont_free = 0;
struct ubi_volume *vol;
struct ubi_vtbl_record vtbl_rec;
uint64_t bytes;
+ dev_t dev;
if (ubi->ro_mode)
return -EROFS;
@@ -199,7 +215,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
return -ENOMEM;
spin_lock(&ubi->volumes_lock);
-
if (vol_id == UBI_VOL_NUM_AUTO) {
/* Find unused volume ID */
dbg_msg("search for vacant volume ID");
@@ -252,6 +267,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
}
ubi->avail_pebs -= vol->reserved_pebs;
ubi->rsvd_pebs += vol->reserved_pebs;
+ spin_unlock(&ubi->volumes_lock);
vol->vol_id = vol_id;
vol->alignment = req->alignment;
@@ -259,10 +275,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->vol_type = req->vol_type;
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len + 1);
- vol->exclusive = 1;
vol->ubi = ubi;
- ubi->volumes[vol_id] = vol;
- spin_unlock(&ubi->volumes_lock);
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -299,9 +312,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
- err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol_id + 1), 1);
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device for volume %d", vol_id);
+ ubi_err("cannot add character device");
goto out_mapping;
}
@@ -311,12 +325,15 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
- vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+ vol->dev.devt = dev;
vol->dev.class = ubi_class;
+
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
- if (err)
+ if (err) {
+ ubi_err("cannot register device");
goto out_gluebi;
+ }
err = volume_sysfs_init(ubi, vol);
if (err)
@@ -339,15 +356,27 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
goto out_sysfs;
spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
ubi->vol_count += 1;
- vol->exclusive = 0;
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
return 0;
+out_sysfs:
+ /*
+ * We have registered our device, we should not free the volume*
+ * description object in this function in case of an error - it is
+ * freed by the release function.
+ *
+ * Get device reference to prevent the release function from being
+ * called just after sysfs has been closed.
+ */
+ dont_free = 1;
+ get_device(&vol->dev);
+ volume_sysfs_close(vol);
out_gluebi:
- err = ubi_destroy_gluebi(vol);
+ ubi_destroy_gluebi(vol);
out_cdev:
cdev_del(&vol->cdev);
out_mapping:
@@ -356,26 +385,13 @@ out_acc:
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= vol->reserved_pebs;
ubi->avail_pebs += vol->reserved_pebs;
- ubi->volumes[vol_id] = NULL;
out_unlock:
spin_unlock(&ubi->volumes_lock);
- kfree(vol);
- return err;
-
- /*
- * We are registered, so @vol is destroyed in the release function and
- * we have to de-initialize differently.
- */
-out_sysfs:
- err = ubi_destroy_gluebi(vol);
- cdev_del(&vol->cdev);
- kfree(vol->eba_tbl);
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= vol->reserved_pebs;
- ubi->avail_pebs += vol->reserved_pebs;
- ubi->volumes[vol_id] = NULL;
- spin_unlock(&ubi->volumes_lock);
- volume_sysfs_close(vol);
+ if (dont_free)
+ put_device(&vol->dev);
+ else
+ kfree(vol);
+ ubi_err("cannot create volume %d, error %d", vol_id, err);
return err;
}
@@ -385,7 +401,8 @@ out_sysfs:
*
* This function removes volume described by @desc. The volume has to be opened
* in "exclusive" mode. Returns zero in case of success and a negative error
- * code in case of failure.
+ * code in case of failure. The caller has to have the @ubi->volumes_mutex
+ * locked.
*/
int ubi_remove_volume(struct ubi_volume_desc *desc)
{
@@ -400,30 +417,36 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
if (ubi->ro_mode)
return -EROFS;
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ /*
+ * The volume is busy, probably someone is reading one of its
+ * sysfs files.
+ */
+ err = -EBUSY;
+ goto out_unlock;
+ }
+ ubi->volumes[vol_id] = NULL;
+ spin_unlock(&ubi->volumes_lock);
+
err = ubi_destroy_gluebi(vol);
if (err)
- return err;
+ goto out_err;
err = ubi_change_vtbl_record(ubi, vol_id, NULL);
if (err)
- return err;
+ goto out_err;
for (i = 0; i < vol->reserved_pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, i);
+ err = ubi_eba_unmap_leb(ubi, vol, i);
if (err)
- return err;
+ goto out_err;
}
- spin_lock(&ubi->volumes_lock);
- vol->removed = 1;
- ubi->volumes[vol_id] = NULL;
- spin_unlock(&ubi->volumes_lock);
-
kfree(vol->eba_tbl);
vol->eba_tbl = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
- kfree(desc);
spin_lock(&ubi->volumes_lock);
ubi->rsvd_pebs -= reserved_pebs;
@@ -441,8 +464,15 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
spin_unlock(&ubi->volumes_lock);
paranoid_check_volumes(ubi);
- module_put(THIS_MODULE);
return 0;
+
+out_err:
+ ubi_err("cannot remove volume %d, error %d", vol_id, err);
+ spin_lock(&ubi->volumes_lock);
+ ubi->volumes[vol_id] = vol;
+out_unlock:
+ spin_unlock(&ubi->volumes_lock);
+ return err;
}
/**
@@ -450,8 +480,9 @@ int ubi_remove_volume(struct ubi_volume_desc *desc)
* @desc: volume descriptor
* @reserved_pebs: new size in physical eraseblocks
*
- * This function returns zero in case of success, and a negative error code in
- * case of failure.
+ * This function re-sizes the volume and returns zero in case of success, and a
+ * negative error code in case of failure. The caller has to have the
+ * @ubi->volumes_mutex locked.
*/
int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
{
@@ -487,6 +518,15 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
for (i = 0; i < reserved_pebs; i++)
new_mapping[i] = UBI_LEB_UNMAPPED;
+ spin_lock(&ubi->volumes_lock);
+ if (vol->ref_count > 1) {
+ spin_unlock(&ubi->volumes_lock);
+ err = -EBUSY;
+ goto out_free;
+ }
+ spin_unlock(&ubi->volumes_lock);
+
+
/* Reserve physical eraseblocks */
pebs = reserved_pebs - vol->reserved_pebs;
if (pebs > 0) {
@@ -516,7 +556,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (pebs < 0) {
for (i = 0; i < -pebs; i++) {
- err = ubi_eba_unmap_leb(ubi, vol_id, reserved_pebs + i);
+ err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i);
if (err)
goto out_acc;
}
@@ -565,27 +605,28 @@ out_free:
/**
* ubi_add_volume - add volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
*
- * This function adds an existin volume and initializes all its data
- * structures. Returnes zero in case of success and a negative error code in
+ * This function adds an existing volume and initializes all its data
+ * structures. Returns zero in case of success and a negative error code in
* case of failure.
*/
-int ubi_add_volume(struct ubi_device *ubi, int vol_id)
+int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
- int err;
- struct ubi_volume *vol = ubi->volumes[vol_id];
+ int err, vol_id = vol->vol_id;
+ dev_t dev;
dbg_msg("add volume %d", vol_id);
ubi_dbg_dump_vol_info(vol);
- ubi_assert(vol);
/* Register character device for the volume */
cdev_init(&vol->cdev, &ubi_vol_cdev_operations);
vol->cdev.owner = THIS_MODULE;
- err = cdev_add(&vol->cdev, MKDEV(ubi->major, vol->vol_id + 1), 1);
+ dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
+ err = cdev_add(&vol->cdev, dev, 1);
if (err) {
- ubi_err("cannot add character device for volume %d", vol_id);
+ ubi_err("cannot add character device for volume %d, error %d",
+ vol_id, err);
return err;
}
@@ -595,7 +636,7 @@ int ubi_add_volume(struct ubi_device *ubi, int vol_id)
vol->dev.release = vol_release;
vol->dev.parent = &ubi->dev;
- vol->dev.devt = MKDEV(ubi->major, vol->vol_id + 1);
+ vol->dev.devt = dev;
vol->dev.class = ubi_class;
sprintf(&vol->dev.bus_id[0], "%s_%d", ubi->ubi_name, vol->vol_id);
err = device_register(&vol->dev);
@@ -623,22 +664,19 @@ out_cdev:
/**
* ubi_free_volume - free volume.
* @ubi: UBI device description object
- * @vol_id: volume ID
+ * @vol: volume description object
*
- * This function frees all resources for volume @vol_id but does not remove it.
+ * This function frees all resources for volume @vol but does not remove it.
* Used only when the UBI device is detached.
*/
-void ubi_free_volume(struct ubi_device *ubi, int vol_id)
+void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
- struct ubi_volume *vol = ubi->volumes[vol_id];
- dbg_msg("free volume %d", vol_id);
- ubi_assert(vol);
+ dbg_msg("free volume %d", vol->vol_id);
- vol->removed = 1;
+ ubi->volumes[vol->vol_id] = NULL;
err = ubi_destroy_gluebi(vol);
- ubi->volumes[vol_id] = NULL;
cdev_del(&vol->cdev);
volume_sysfs_close(vol);
}
@@ -820,9 +858,7 @@ static void paranoid_check_volumes(struct ubi_device *ubi)
{
int i;
- mutex_lock(&ubi->vtbl_mutex);
for (i = 0; i < ubi->vtbl_slots; i++)
paranoid_check_volume(ubi, i);
- mutex_unlock(&ubi->vtbl_mutex);
}
#endif
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 25b3bd61c7e..7a1a8a1da61 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -86,8 +86,10 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
{
int i, err;
uint32_t crc;
+ struct ubi_volume *layout_vol;
ubi_assert(idx >= 0 && idx < ubi->vtbl_slots);
+ layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOL_ID)];
if (!vtbl_rec)
vtbl_rec = &empty_vtbl_record;
@@ -96,31 +98,25 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
vtbl_rec->crc = cpu_to_be32(crc);
}
- mutex_lock(&ubi->vtbl_mutex);
memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
- err = ubi_eba_unmap_leb(ubi, UBI_LAYOUT_VOL_ID, i);
- if (err) {
- mutex_unlock(&ubi->vtbl_mutex);
+ err = ubi_eba_unmap_leb(ubi, layout_vol, i);
+ if (err)
return err;
- }
- err = ubi_eba_write_leb(ubi, UBI_LAYOUT_VOL_ID, i, ubi->vtbl, 0,
+
+ err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
ubi->vtbl_size, UBI_LONGTERM);
- if (err) {
- mutex_unlock(&ubi->vtbl_mutex);
+ if (err)
return err;
- }
}
paranoid_vtbl_check(ubi);
- mutex_unlock(&ubi->vtbl_mutex);
return ubi_wl_flush(ubi);
}
/**
- * vol_til_check - check if volume table is not corrupted and contains sensible
- * data.
- *
+ * vtbl_check - check if volume table is not corrupted and contains sensible
+ * data.
* @ubi: UBI device description object
* @vtbl: volume table
*
@@ -568,6 +564,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->last_eb_bytes = sv->last_data_size;
}
+ /* And add the layout volume */
vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL);
if (!vol)
return -ENOMEM;
@@ -583,6 +580,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
vol->used_bytes =
(long long)vol->used_ebs * (ubi->leb_size - vol->data_pad);
vol->vol_id = UBI_LAYOUT_VOL_ID;
+ vol->ref_count = 1;
ubi_assert(!ubi->volumes[i]);
ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6330c8cc72b..0d44ad95ab8 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -117,21 +117,6 @@
#define WL_MAX_FAILURES 32
/**
- * struct ubi_wl_entry - wear-leveling entry.
- * @rb: link in the corresponding RB-tree
- * @ec: erase counter
- * @pnum: physical eraseblock number
- *
- * Each physical eraseblock has a corresponding &struct wl_entry object which
- * may be kept in different RB-trees.
- */
-struct ubi_wl_entry {
- struct rb_node rb;
- int ec;
- int pnum;
-};
-
-/**
* struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree
* @rb_aec: link in the @wl->prot.aec RB-tree
@@ -216,9 +201,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
#define paranoid_check_in_wl_tree(e, root)
#endif
-/* Slab cache for wear-leveling entries */
-static struct kmem_cache *wl_entries_slab;
-
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
@@ -267,15 +249,26 @@ static int do_work(struct ubi_device *ubi)
int err;
struct ubi_work *wrk;
- spin_lock(&ubi->wl_lock);
+ cond_resched();
+ /*
+ * @ubi->work_sem is used to synchronize with the workers. Workers take
+ * it in read mode, so many of them may be doing works at a time. But
+ * the queue flush code has to be sure the whole queue of works is
+ * done, and it takes the mutex in write mode.
+ */
+ down_read(&ubi->work_sem);
+ spin_lock(&ubi->wl_lock);
if (list_empty(&ubi->works)) {
spin_unlock(&ubi->wl_lock);
+ up_read(&ubi->work_sem);
return 0;
}
wrk = list_entry(ubi->works.next, struct ubi_work, list);
list_del(&wrk->list);
+ ubi->works_count -= 1;
+ ubi_assert(ubi->works_count >= 0);
spin_unlock(&ubi->wl_lock);
/*
@@ -286,11 +279,8 @@ static int do_work(struct ubi_device *ubi)
err = wrk->func(ubi, wrk, 0);
if (err)
ubi_err("work failed with error code %d", err);
+ up_read(&ubi->work_sem);
- spin_lock(&ubi->wl_lock);
- ubi->works_count -= 1;
- ubi_assert(ubi->works_count >= 0);
- spin_unlock(&ubi->wl_lock);
return err;
}
@@ -549,8 +539,12 @@ retry:
* prot_tree_del - remove a physical eraseblock from the protection trees
* @ubi: UBI device description object
* @pnum: the physical eraseblock to remove
+ *
+ * This function returns PEB @pnum from the protection trees and returns zero
+ * in case of success and %-ENODEV if the PEB was not found in the protection
+ * trees.
*/
-static void prot_tree_del(struct ubi_device *ubi, int pnum)
+static int prot_tree_del(struct ubi_device *ubi, int pnum)
{
struct rb_node *p;
struct ubi_wl_prot_entry *pe = NULL;
@@ -561,7 +555,7 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
if (pnum == pe->e->pnum)
- break;
+ goto found;
if (pnum < pe->e->pnum)
p = p->rb_left;
@@ -569,10 +563,14 @@ static void prot_tree_del(struct ubi_device *ubi, int pnum)
p = p->rb_right;
}
+ return -ENODEV;
+
+found:
ubi_assert(pe->e->pnum == pnum);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
kfree(pe);
+ return 0;
}
/**
@@ -744,7 +742,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
{
- int err, put = 0;
+ int err, put = 0, scrubbing = 0, protect = 0;
+ struct ubi_wl_prot_entry *uninitialized_var(pe);
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
@@ -757,21 +756,17 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!vid_hdr)
return -ENOMEM;
+ mutex_lock(&ubi->move_mutex);
spin_lock(&ubi->wl_lock);
+ ubi_assert(!ubi->move_from && !ubi->move_to);
+ ubi_assert(!ubi->move_to_put);
- /*
- * Only one WL worker at a time is supported at this implementation, so
- * make sure a PEB is not being moved already.
- */
- if (ubi->move_to || !ubi->free.rb_node ||
+ if (!ubi->free.rb_node ||
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
- * Only one WL worker at a time is supported at this
- * implementation, so if a LEB is already being moved, cancel.
- *
- * No free physical eraseblocks? Well, we cancel wear-leveling
- * then. It will be triggered again when a free physical
- * eraseblock appears.
+ * No free physical eraseblocks? Well, they must be waiting in
+ * the queue to be erased. Cancel movement - it will be
+ * triggered again when a free physical eraseblock appears.
*
* No used physical eraseblocks? They must be temporarily
* protected from being moved. They will be moved to the
@@ -780,10 +775,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
!ubi->free.rb_node, !ubi->used.rb_node);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ goto out_cancel;
}
if (!ubi->scrub.rb_node) {
@@ -798,27 +790,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
e1->ec, e2->ec);
- ubi->wl_scheduled = 0;
- spin_unlock(&ubi->wl_lock);
- ubi_free_vid_hdr(ubi, vid_hdr);
- return 0;
+ goto out_cancel;
}
paranoid_check_in_wl_tree(e1, &ubi->used);
rb_erase(&e1->rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
+ /* Perform scrubbing */
+ scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
paranoid_check_in_wl_tree(e1, &ubi->scrub);
- rb_erase(&e1->rb, &ubi->scrub);
+ rb_erase(&e1->rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
paranoid_check_in_wl_tree(e2, &ubi->free);
rb_erase(&e2->rb, &ubi->free);
- ubi_assert(!ubi->move_from && !ubi->move_to);
- ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
@@ -828,6 +817,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* We so far do not know which logical eraseblock our physical
* eraseblock (@e1) belongs to. We have to read the volume identifier
* header first.
+ *
+ * Note, we are protected from this PEB being unmapped and erased. The
+ * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
+ * which is being moved was unmapped.
*/
err = ubi_io_read_vid_hdr(ubi, e1->pnum, vid_hdr, 0);
@@ -842,32 +835,51 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* likely have the VID header in place.
*/
dbg_wl("PEB %d has no VID header", e1->pnum);
- err = 0;
- } else {
- ubi_err("error %d while reading VID header from PEB %d",
- err, e1->pnum);
- if (err > 0)
- err = -EIO;
+ goto out_not_moved;
}
- goto error;
+
+ ubi_err("error %d while reading VID header from PEB %d",
+ err, e1->pnum);
+ if (err > 0)
+ err = -EIO;
+ goto out_error;
}
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) {
- if (err == UBI_IO_BITFLIPS)
- err = 0;
- goto error;
+
+ if (err < 0)
+ goto out_error;
+ if (err == 1)
+ goto out_not_moved;
+
+ /*
+ * For some reason the LEB was not moved - it might be because
+ * the volume is being deleted. We should prevent this PEB from
+ * being selected for wear-levelling movement for some "time",
+ * so put it to the protection tree.
+ */
+
+ dbg_wl("cancelled moving PEB %d", e1->pnum);
+ pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
+ if (!pe) {
+ err = -ENOMEM;
+ goto out_error;
+ }
+
+ protect = 1;
}
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
+ if (protect)
+ prot_tree_add(ubi, e1, pe, protect);
if (!ubi->move_to_put)
wl_tree_add(e2, &ubi->used);
else
put = 1;
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
- ubi->wl_scheduled = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
if (put) {
@@ -877,62 +889,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d was put meanwhile, erase", e2->pnum);
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
+ if (err)
+ goto out_error;
}
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
+ if (!protect) {
+ err = schedule_erase(ubi, e1, 0);
+ if (err)
+ goto out_error;
}
+
dbg_wl("done");
- return err;
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
/*
- * Some error occurred. @e1 was not changed, so return it back. @e2
- * might be changed, schedule it for erasure.
+ * For some reasons the LEB was not moved, might be an error, might be
+ * something else. @e1 was not changed, so return it back. @e2 might
+ * be changed, schedule it for erasure.
*/
-error:
- if (err)
- dbg_wl("error %d occurred, cancel operation", err);
- ubi_assert(err <= 0);
-
+out_not_moved:
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
- ubi->wl_scheduled = 0;
- if (ubi->move_from_put)
- put = 1;
+ if (scrubbing)
+ wl_tree_add(e1, &ubi->scrub);
else
wl_tree_add(e1, &ubi->used);
ubi->move_from = ubi->move_to = NULL;
- ubi->move_from_put = ubi->move_to_put = 0;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
- if (put) {
- /*
- * Well, the target PEB was put meanwhile, schedule it for
- * erasure.
- */
- dbg_wl("PEB %d was put meanwhile, erase", e1->pnum);
- err = schedule_erase(ubi, e1, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e1);
- ubi_ro_mode(ubi);
- }
- }
-
err = schedule_erase(ubi, e2, 0);
- if (err) {
- kmem_cache_free(wl_entries_slab, e2);
- ubi_ro_mode(ubi);
- }
+ if (err)
+ goto out_error;
+
+ mutex_unlock(&ubi->move_mutex);
+ return 0;
+
+out_error:
+ ubi_err("error %d while moving PEB %d to PEB %d",
+ err, e1->pnum, e2->pnum);
- yield();
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ spin_lock(&ubi->wl_lock);
+ ubi->move_from = ubi->move_to = NULL;
+ ubi->move_to_put = ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+
+ kmem_cache_free(ubi_wl_entry_slab, e1);
+ kmem_cache_free(ubi_wl_entry_slab, e2);
+ ubi_ro_mode(ubi);
+
+ mutex_unlock(&ubi->move_mutex);
return err;
+
+out_cancel:
+ ubi->wl_scheduled = 0;
+ spin_unlock(&ubi->wl_lock);
+ mutex_unlock(&ubi->move_mutex);
+ ubi_free_vid_hdr(ubi, vid_hdr);
+ return 0;
}
/**
@@ -1020,7 +1037,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
if (cancel) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
return 0;
}
@@ -1049,7 +1066,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1119,8 +1136,7 @@ out_ro:
}
/**
- * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling
- * unit.
+ * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit.
* @ubi: UBI device description object
* @pnum: physical eraseblock to return
* @torture: if this physical eraseblock has to be tortured
@@ -1128,7 +1144,7 @@ out_ro:
* This function is called to return physical eraseblock @pnum to the pool of
* free physical eraseblocks. The @torture flag has to be set if an I/O error
* occurred to this @pnum and it has to be tested. This function returns zero
- * in case of success and a negative error code in case of failure.
+ * in case of success, and a negative error code in case of failure.
*/
int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
{
@@ -1139,8 +1155,8 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
+retry:
spin_lock(&ubi->wl_lock);
-
e = ubi->lookuptbl[pnum];
if (e == ubi->move_from) {
/*
@@ -1148,17 +1164,22 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
* be moved. It will be scheduled for erasure in the
* wear-leveling worker.
*/
- dbg_wl("PEB %d is being moved", pnum);
- ubi_assert(!ubi->move_from_put);
- ubi->move_from_put = 1;
+ dbg_wl("PEB %d is being moved, wait", pnum);
spin_unlock(&ubi->wl_lock);
- return 0;
+
+ /* Wait for the WL worker by taking the @ubi->move_mutex */
+ mutex_lock(&ubi->move_mutex);
+ mutex_unlock(&ubi->move_mutex);
+ goto retry;
} else if (e == ubi->move_to) {
/*
* User is putting the physical eraseblock which was selected
* as the target the data is moved to. It may happen if the EBA
- * unit already re-mapped the LEB but the WL unit did has not
- * put the PEB to the "used" tree.
+ * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but
+ * the WL unit has not put the PEB to the "used" tree yet, but
+ * it is about to do this. So we just set a flag which will
+ * tell the WL worker that the PEB is not needed anymore and
+ * should be scheduled for erasure.
*/
dbg_wl("PEB %d is the target of data moving", pnum);
ubi_assert(!ubi->move_to_put);
@@ -1172,8 +1193,15 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub);
- } else
- prot_tree_del(ubi, e->pnum);
+ } else {
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
}
spin_unlock(&ubi->wl_lock);
@@ -1227,8 +1255,17 @@ retry:
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
- } else
- prot_tree_del(ubi, pnum);
+ } else {
+ int err;
+
+ err = prot_tree_del(ubi, e->pnum);
+ if (err) {
+ ubi_err("PEB %d not found", pnum);
+ ubi_ro_mode(ubi);
+ spin_unlock(&ubi->wl_lock);
+ return err;
+ }
+ }
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
@@ -1249,17 +1286,33 @@ retry:
*/
int ubi_wl_flush(struct ubi_device *ubi)
{
- int err, pending_count;
-
- pending_count = ubi->works_count;
-
- dbg_wl("flush (%d pending works)", pending_count);
+ int err;
/*
* Erase while the pending works queue is not empty, but not more then
* the number of currently pending works.
*/
- while (pending_count-- > 0) {
+ dbg_wl("flush (%d pending works)", ubi->works_count);
+ while (ubi->works_count) {
+ err = do_work(ubi);
+ if (err)
+ return err;
+ }
+
+ /*
+ * Make sure all the works which have been done in parallel are
+ * finished.
+ */
+ ubi_assert(ubi->ref_count > 0);
+ down_write(&ubi->work_sem);
+ up_write(&ubi->work_sem);
+
+ /*
+ * And in case last was the WL worker and it cancelled the LEB
+ * movement, flush again.
+ */
+ while (ubi->works_count) {
+ dbg_wl("flush more (%d pending works)", ubi->works_count);
err = do_work(ubi);
if (err)
return err;
@@ -1294,7 +1347,7 @@ static void tree_destroy(struct rb_root *root)
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
}
}
}
@@ -1303,7 +1356,7 @@ static void tree_destroy(struct rb_root *root)
* ubi_thread - UBI background thread.
* @u: the UBI device description object pointer
*/
-static int ubi_thread(void *u)
+int ubi_thread(void *u)
{
int failures = 0;
struct ubi_device *ubi = u;
@@ -1394,36 +1447,22 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->used = ubi->free = ubi->scrub = RB_ROOT;
ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock);
+ mutex_init(&ubi->move_mutex);
+ init_rwsem(&ubi->work_sem);
ubi->max_ec = si->max_ec;
INIT_LIST_HEAD(&ubi->works);
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
- ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
- if (IS_ERR(ubi->bgt_thread)) {
- err = PTR_ERR(ubi->bgt_thread);
- ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name,
- err);
- return err;
- }
-
- if (ubi_devices_cnt == 0) {
- wl_entries_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!wl_entries_slab)
- return -ENOMEM;
- }
-
err = -ENOMEM;
ubi->lookuptbl = kzalloc(ubi->peb_count * sizeof(void *), GFP_KERNEL);
if (!ubi->lookuptbl)
- goto out_free;
+ return err;
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1431,7 +1470,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@@ -1439,7 +1478,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->free, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1453,7 +1492,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
list_for_each_entry(seb, &si->corr, u.list) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1461,7 +1500,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->ec = seb->ec;
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, 0)) {
- kmem_cache_free(wl_entries_slab, e);
+ kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
}
@@ -1470,7 +1509,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi_rb_for_each_entry(rb2, seb, &sv->root, u.rb) {
cond_resched();
- e = kmem_cache_alloc(wl_entries_slab, GFP_KERNEL);
+ e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e)
goto out_free;
@@ -1510,8 +1549,6 @@ out_free:
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 0)
- kmem_cache_destroy(wl_entries_slab);
return err;
}
@@ -1541,7 +1578,7 @@ static void protection_trees_destroy(struct ubi_device *ubi)
rb->rb_right = NULL;
}
- kmem_cache_free(wl_entries_slab, pe->e);
+ kmem_cache_free(ubi_wl_entry_slab, pe->e);
kfree(pe);
}
}
@@ -1553,10 +1590,6 @@ static void protection_trees_destroy(struct ubi_device *ubi)
*/
void ubi_wl_close(struct ubi_device *ubi)
{
- dbg_wl("disable \"%s\"", ubi->bgt_name);
- if (ubi->bgt_thread)
- kthread_stop(ubi->bgt_thread);
-
dbg_wl("close the UBI wear-leveling unit");
cancel_pending(ubi);
@@ -1565,8 +1598,6 @@ void ubi_wl_close(struct ubi_device *ubi)
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
kfree(ubi->lookuptbl);
- if (ubi_devices_cnt == 1)
- kmem_cache_destroy(wl_entries_slab);
}
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 77fc5838609..4c80404a9ab 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -176,7 +176,7 @@ static void jffs2_iset_acl(struct inode *inode, struct posix_acl **i_acl, struct
spin_unlock(&inode->i_lock);
}
-struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
+static struct posix_acl *jffs2_get_acl(struct inode *inode, int type)
{
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct posix_acl *acl;
@@ -345,8 +345,10 @@ int jffs2_init_acl_pre(struct inode *dir_i, struct inode *inode, int *i_mode)
if (!clone)
return -ENOMEM;
rc = posix_acl_create_masq(clone, (mode_t *)i_mode);
- if (rc < 0)
+ if (rc < 0) {
+ posix_acl_release(clone);
return rc;
+ }
if (rc > 0)
jffs2_iset_acl(inode, &f->i_acl_access, clone);
diff --git a/fs/jffs2/acl.h b/fs/jffs2/acl.h
index 76c6ebd1acd..0bb7f003fd8 100644
--- a/fs/jffs2/acl.h
+++ b/fs/jffs2/acl.h
@@ -28,7 +28,6 @@ struct jffs2_acl_header {
#define JFFS2_ACL_NOT_CACHED ((void *)-1)
-extern struct posix_acl *jffs2_get_acl(struct inode *inode, int type);
extern int jffs2_permission(struct inode *, int, struct nameidata *);
extern int jffs2_acl_chmod(struct inode *);
extern int jffs2_init_acl_pre(struct inode *, struct inode *, int *);
@@ -40,7 +39,6 @@ extern struct xattr_handler jffs2_acl_default_xattr_handler;
#else
-#define jffs2_get_acl(inode, type) (NULL)
#define jffs2_permission (NULL)
#define jffs2_acl_chmod(inode) (0)
#define jffs2_init_acl_pre(dir_i,inode,mode) (0)
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index d2e06f7ea96..ee192af0b8b 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -97,11 +97,7 @@ int jffs2_do_setattr (struct inode *inode, struct iattr *iattr)
ri->gid = cpu_to_je16((ivalid & ATTR_GID)?iattr->ia_gid:inode->i_gid);
if (ivalid & ATTR_MODE)
- if (iattr->ia_mode & S_ISGID &&
- !in_group_p(je16_to_cpu(ri->gid)) && !capable(CAP_FSETID))
- ri->mode = cpu_to_jemode(iattr->ia_mode & ~S_ISGID);
- else
- ri->mode = cpu_to_jemode(iattr->ia_mode);
+ ri->mode = cpu_to_jemode(iattr->ia_mode);
else
ri->mode = cpu_to_jemode(inode->i_mode);
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 4bf86088b3a..87c6f555e1a 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -32,15 +32,18 @@ void jffs2_add_fd_to_list(struct jffs2_sb_info *c, struct jffs2_full_dirent *new
if ((*prev)->nhash == new->nhash && !strcmp((*prev)->name, new->name)) {
/* Duplicate. Free one */
if (new->version < (*prev)->version) {
- dbg_dentlist("Eep! Marking new dirent node is obsolete, old is \"%s\", ino #%u\n",
+ dbg_dentlist("Eep! Marking new dirent node obsolete, old is \"%s\", ino #%u\n",
(*prev)->name, (*prev)->ino);
jffs2_mark_node_obsolete(c, new->raw);
jffs2_free_full_dirent(new);
} else {
- dbg_dentlist("marking old dirent \"%s\", ino #%u bsolete\n",
+ dbg_dentlist("marking old dirent \"%s\", ino #%u obsolete\n",
(*prev)->name, (*prev)->ino);
new->next = (*prev)->next;
- jffs2_mark_node_obsolete(c, ((*prev)->raw));
+ /* It may have been a 'placeholder' deletion dirent,
+ if jffs2_can_mark_obsolete() (see jffs2_do_unlink()) */
+ if ((*prev)->raw)
+ jffs2_mark_node_obsolete(c, ((*prev)->raw));
jffs2_free_full_dirent(*prev);
*prev = new;
}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 2eae5d2dbeb..fb89ab5e1d5 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -37,23 +37,24 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
BUG_ON(tn->csize == 0);
- if (!jffs2_is_writebuffered(c))
- goto adj_acc;
-
/* Calculate how many bytes were already checked */
ofs = ref_offset(ref) + sizeof(struct jffs2_raw_inode);
- len = ofs % c->wbuf_pagesize;
- if (likely(len))
- len = c->wbuf_pagesize - len;
-
- if (len >= tn->csize) {
- dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
- ref_offset(ref), tn->csize, ofs);
- goto adj_acc;
- }
+ len = tn->csize;
+
+ if (jffs2_is_writebuffered(c)) {
+ int adj = ofs % c->wbuf_pagesize;
+ if (likely(adj))
+ adj = c->wbuf_pagesize - adj;
+
+ if (adj >= tn->csize) {
+ dbg_readinode("no need to check node at %#08x, data length %u, data starts at %#08x - it has already been checked.\n",
+ ref_offset(ref), tn->csize, ofs);
+ goto adj_acc;
+ }
- ofs += len;
- len = tn->csize - len;
+ ofs += adj;
+ len -= adj;
+ }
dbg_readinode("check node at %#08x, data length %u, partial CRC %#08x, correct CRC %#08x, data starts at %#08x, start checking from %#08x - %u bytes.\n",
ref_offset(ref), tn->csize, tn->partial_crc, tn->data_crc, ofs - len, ofs, len);
@@ -63,7 +64,7 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info
* adding and jffs2_flash_read_end() interface. */
if (c->mtd->point) {
err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer);
- if (!err && retlen < tn->csize) {
+ if (!err && retlen < len) {
JFFS2_WARNING("MTD point returned len too short: %zu instead of %u.\n", retlen, tn->csize);
c->mtd->unpoint(c->mtd, buffer, ofs, retlen);
} else if (err)
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 147e2cbee9e..ecdf18d0486 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -582,7 +582,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
up(&dir_f->sem);
} else {
- struct jffs2_full_dirent **prev = &dir_f->dents;
+ struct jffs2_full_dirent *fd = dir_f->dents;
uint32_t nhash = full_name_hash(name, namelen);
/* We don't actually want to reserve any space, but we do
@@ -590,21 +590,22 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
down(&c->alloc_sem);
down(&dir_f->sem);
- while ((*prev) && (*prev)->nhash <= nhash) {
- if ((*prev)->nhash == nhash &&
- !memcmp((*prev)->name, name, namelen) &&
- !(*prev)->name[namelen]) {
- struct jffs2_full_dirent *this = *prev;
+ for (fd = dir_f->dents; fd; fd = fd->next) {
+ if (fd->nhash == nhash &&
+ !memcmp(fd->name, name, namelen) &&
+ !fd->name[namelen]) {
D1(printk(KERN_DEBUG "Marking old dirent node (ino #%u) @%08x obsolete\n",
- this->ino, ref_offset(this->raw)));
-
- *prev = this->next;
- jffs2_mark_node_obsolete(c, (this->raw));
- jffs2_free_full_dirent(this);
+ fd->ino, ref_offset(fd->raw)));
+ jffs2_mark_node_obsolete(c, fd->raw);
+ /* We don't want to remove it from the list immediately,
+ because that screws up getdents()/seek() semantics even
+ more than they're screwed already. Turn it into a
+ node-less deletion dirent instead -- a placeholder */
+ fd->raw = NULL;
+ fd->ino = 0;
break;
}
- prev = &((*prev)->next);
}
up(&dir_f->sem);
}
@@ -630,7 +631,8 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
D1(printk(KERN_DEBUG "Removing deletion dirent for \"%s\" from dir ino #%u\n",
fd->name, dead_f->inocache->ino));
}
- jffs2_mark_node_obsolete(c, fd->raw);
+ if (fd->raw)
+ jffs2_mark_node_obsolete(c, fd->raw);
jffs2_free_full_dirent(fd);
}
}
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index e17c5343cf5..b0ddf4b2586 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -98,6 +98,18 @@ static inline int cfi_interleave_supported(int i)
#define CFI_DEVICETYPE_X32 (32 / 8)
#define CFI_DEVICETYPE_X64 (64 / 8)
+
+/* Device Interface Code Assignments from the "Common Flash Memory Interface
+ * Publication 100" dated December 1, 2001.
+ */
+#define CFI_INTERFACE_X8_ASYNC 0x0000
+#define CFI_INTERFACE_X16_ASYNC 0x0001
+#define CFI_INTERFACE_X8_BY_X16_ASYNC 0x0002
+#define CFI_INTERFACE_X32_ASYNC 0x0003
+#define CFI_INTERFACE_X16_BY_X32_ASYNC 0x0005
+#define CFI_INTERFACE_NOT_ALLOWED 0xffff
+
+
/* NB: We keep these structures in memory in HOST byteorder, except
* where individually noted.
*/
diff --git a/include/linux/mtd/mtdram.h b/include/linux/mtd/mtdram.h
new file mode 100644
index 00000000000..04fdc07b735
--- /dev/null
+++ b/include/linux/mtd/mtdram.h
@@ -0,0 +1,8 @@
+#ifndef __MTD_MTDRAM_H__
+#define __MTD_MTDRAM_H__
+
+#include <linux/mtd/mtd.h>
+int mtdram_init_device(struct mtd_info *mtd, void *mapped_address,
+ unsigned long size, char *name);
+
+#endif /* __MTD_MTDRAM_H__ */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
index da6b3d6f12a..7c37d7e55ab 100644
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -71,5 +71,12 @@ extern int parse_mtd_partitions(struct mtd_info *master, const char **types,
#define put_partition_parser(p) do { module_put((p)->owner); } while(0)
-#endif
+struct device;
+struct device_node;
+
+int __devinit of_mtd_parse_partitions(struct device *dev,
+ struct mtd_info *mtd,
+ struct device_node *node,
+ struct mtd_partition **pparts);
+#endif
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 3d967b6b120..c4abe035122 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -167,6 +167,7 @@ int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf,
int len, int dtype);
int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
+int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
/*
diff --git a/include/mtd/mtd-abi.h b/include/mtd/mtd-abi.h
index f71dac42039..615072c4da0 100644
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -29,7 +29,7 @@ struct mtd_oob_buf {
#define MTD_WRITEABLE 0x400 /* Device is writeable */
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
#define MTD_NO_ERASE 0x1000 /* No erase necessary */
-#define MTD_STUPID_LOCK 0x2000 /* Always locked after reset */
+#define MTD_POWERUP_LOCK 0x2000 /* Always locked after reset */
// Some common devices / combinations of capabilities
#define MTD_CAP_ROM 0
diff --git a/include/mtd/ubi-user.h b/include/mtd/ubi-user.h
index fe06ded0e6b..4d184a7f80a 100644
--- a/include/mtd/ubi-user.h
+++ b/include/mtd/ubi-user.h
@@ -22,6 +22,21 @@
#define __UBI_USER_H__
/*
+ * UBI device creation (the same as MTD device attachment)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * MTD devices may be attached using %UBI_IOCATT ioctl command of the UBI
+ * control device. The caller has to properly fill and pass
+ * &struct ubi_attach_req object - UBI will attach the MTD device specified in
+ * the request and return the newly created UBI device number as the ioctl
+ * return value.
+ *
+ * UBI device deletion (the same as MTD device detachment)
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * An UBI device maybe deleted with %UBI_IOCDET ioctl command of the UBI
+ * control device.
+ *
* UBI volume creation
* ~~~~~~~~~~~~~~~~~~~
*
@@ -60,11 +75,12 @@
*/
/*
- * When a new volume is created, users may either specify the volume number they
- * want to create or to let UBI automatically assign a volume number using this
- * constant.
+ * When a new UBI volume or UBI device is created, users may either specify the
+ * volume/device number they want to create or to let UBI automatically assign
+ * the number using these constants.
*/
#define UBI_VOL_NUM_AUTO (-1)
+#define UBI_DEV_NUM_AUTO (-1)
/* Maximum volume name length */
#define UBI_MAX_VOLUME_NAME 127
@@ -80,6 +96,15 @@
/* Re-size an UBI volume */
#define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req)
+/* IOCTL commands of the UBI control character device */
+
+#define UBI_CTRL_IOC_MAGIC 'o'
+
+/* Attach an MTD device */
+#define UBI_IOCATT _IOW(UBI_CTRL_IOC_MAGIC, 64, struct ubi_attach_req)
+/* Detach an MTD device */
+#define UBI_IOCDET _IOW(UBI_CTRL_IOC_MAGIC, 65, int32_t)
+
/* IOCTL commands of UBI volume character devices */
#define UBI_VOL_IOC_MAGIC 'O'
@@ -89,6 +114,9 @@
/* An eraseblock erasure command, used for debugging, disabled by default */
#define UBI_IOCEBER _IOW(UBI_VOL_IOC_MAGIC, 1, int32_t)
+/* Maximum MTD device name length supported by UBI */
+#define MAX_UBI_MTD_NAME_LEN 127
+
/*
* UBI volume type constants.
*
@@ -97,19 +125,55 @@
*/
enum {
UBI_DYNAMIC_VOLUME = 3,
- UBI_STATIC_VOLUME = 4
+ UBI_STATIC_VOLUME = 4,
+};
+
+/**
+ * struct ubi_attach_req - attach MTD device request.
+ * @ubi_num: UBI device number to create
+ * @mtd_num: MTD device number to attach
+ * @vid_hdr_offset: VID header offset (use defaults if %0)
+ * @padding: reserved for future, not used, has to be zeroed
+ *
+ * This data structure is used to specify MTD device UBI has to attach and the
+ * parameters it has to use. The number which should be assigned to the new UBI
+ * device is passed in @ubi_num. UBI may automatically assing the number if
+ * @UBI_DEV_NUM_AUTO is passed. In this case, the device number is returned in
+ * @ubi_num.
+ *
+ * Most applications should pass %0 in @vid_hdr_offset to make UBI use default
+ * offset of the VID header within physical eraseblocks. The default offset is
+ * the next min. I/O unit after the EC header. For example, it will be offset
+ * 512 in case of a 512 bytes page NAND flash with no sub-page support. Or
+ * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages.
+ *
+ * But in rare cases, if this optimizes things, the VID header may be placed to
+ * a different offset. For example, the boot-loader might do things faster if the
+ * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As
+ * the boot-loader would not normally need to read EC headers (unless it needs
+ * UBI in RW mode), it might be faster to calculate ECC. This is weird example,
+ * but it real-life example. So, in this example, @vid_hdr_offer would be
+ * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes
+ * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page
+ * of the first page and add needed padding.
+ */
+struct ubi_attach_req {
+ int32_t ubi_num;
+ int32_t mtd_num;
+ int32_t vid_hdr_offset;
+ uint8_t padding[12];
};
/**
* struct ubi_mkvol_req - volume description data structure used in
- * volume creation requests.
+ * volume creation requests.
* @vol_id: volume number
* @alignment: volume alignment
* @bytes: volume size in bytes
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
- * @padding1: reserved for future, not used
+ * @padding1: reserved for future, not used, has to be zeroed
* @name_len: volume name length
- * @padding2: reserved for future, not used
+ * @padding2: reserved for future, not used, has to be zeroed
* @name: volume name
*
* This structure is used by userspace programs when creating new volumes. The
@@ -139,7 +203,7 @@ struct ubi_mkvol_req {
int8_t padding1;
int16_t name_len;
int8_t padding2[4];
- char name[UBI_MAX_VOLUME_NAME+1];
+ char name[UBI_MAX_VOLUME_NAME + 1];
} __attribute__ ((packed));
/**