diff options
Diffstat (limited to 'drivers/mtd/ubi/eba.c')
-rw-r--r-- | drivers/mtd/ubi/eba.c | 77 |
1 files changed, 22 insertions, 55 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 7ce91ca742b..e04bcf1dff8 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c @@ -19,20 +19,20 @@ */ /* - * The UBI Eraseblock Association (EBA) unit. + * The UBI Eraseblock Association (EBA) sub-system. * - * This unit is responsible for I/O to/from logical eraseblock. + * This sub-system is responsible for I/O to/from logical eraseblock. * * Although in this implementation the EBA table is fully kept and managed in * RAM, which assumes poor scalability, it might be (partially) maintained on * flash in future implementations. * - * The EBA unit implements per-logical eraseblock locking. Before accessing a - * logical eraseblock it is locked for reading or writing. The per-logical - * eraseblock locking is implemented by means of the lock tree. The lock tree - * is an RB-tree which refers all the currently locked logical eraseblocks. The - * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by - * (@vol_id, @lnum) pairs. + * The EBA sub-system implements per-logical eraseblock locking. Before + * accessing a logical eraseblock it is locked for reading or writing. The + * per-logical eraseblock locking is implemented by means of the lock tree. The + * lock tree is an RB-tree which refers all the currently locked logical + * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. + * They are indexed by (@vol_id, @lnum) pairs. * * EBA also maintains the global sequence counter which is incremented each * time a logical eraseblock is mapped to a physical eraseblock and it is @@ -189,9 +189,7 @@ static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, le->users += 1; spin_unlock(&ubi->ltree_lock); - if (le_free) - kfree(le_free); - + kfree(le_free); return le; } @@ -223,22 +221,18 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) */ static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) { - int free = 0; struct ubi_ltree_entry *le; spin_lock(&ubi->ltree_lock); le = ltree_lookup(ubi, vol_id, lnum); le->users -= 1; ubi_assert(le->users >= 0); + up_read(&le->mutex); if (le->users == 0) { rb_erase(&le->rb, &ubi->ltree); - free = 1; + kfree(le); } spin_unlock(&ubi->ltree_lock); - - up_read(&le->mutex); - if (free) - kfree(le); } /** @@ -274,7 +268,6 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) */ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) { - int free; struct ubi_ltree_entry *le; le = ltree_add_entry(ubi, vol_id, lnum); @@ -289,12 +282,9 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) ubi_assert(le->users >= 0); if (le->users == 0) { rb_erase(&le->rb, &ubi->ltree); - free = 1; - } else - free = 0; - spin_unlock(&ubi->ltree_lock); - if (free) kfree(le); + } + spin_unlock(&ubi->ltree_lock); return 1; } @@ -307,23 +297,18 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) */ static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) { - int free; struct ubi_ltree_entry *le; spin_lock(&ubi->ltree_lock); le = ltree_lookup(ubi, vol_id, lnum); le->users -= 1; ubi_assert(le->users >= 0); + up_write(&le->mutex); if (le->users == 0) { rb_erase(&le->rb, &ubi->ltree); - free = 1; - } else - free = 0; - spin_unlock(&ubi->ltree_lock); - - up_write(&le->mutex); - if (free) kfree(le); + } + spin_unlock(&ubi->ltree_lock); } /** @@ -516,9 +501,8 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, struct ubi_vid_hdr *vid_hdr; vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); - if (!vid_hdr) { + if (!vid_hdr) return -ENOMEM; - } mutex_lock(&ubi->buf_mutex); @@ -752,7 +736,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, /* If this is the last LEB @len may be unaligned */ len = ALIGN(data_size, ubi->min_io_size); else - ubi_assert(len % ubi->min_io_size == 0); + ubi_assert(!(len & (ubi->min_io_size - 1))); vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); if (!vid_hdr) @@ -919,7 +903,7 @@ retry: } if (vol->eba_tbl[lnum] >= 0) { - err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); + err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0); if (err) goto out_leb_unlock; } @@ -1141,7 +1125,7 @@ out_unlock_leb: } /** - * ubi_eba_init_scan - initialize the EBA unit using scanning information. + * ubi_eba_init_scan - initialize the EBA sub-system using scanning information. * @ubi: UBI device description object * @si: scanning information * @@ -1156,7 +1140,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) struct ubi_scan_leb *seb; struct rb_node *rb; - dbg_eba("initialize EBA unit"); + dbg_eba("initialize EBA sub-system"); spin_lock_init(&ubi->ltree_lock); mutex_init(&ubi->alc_mutex); @@ -1222,7 +1206,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ubi->rsvd_pebs += ubi->beb_rsvd_pebs; } - dbg_eba("EBA unit is initialized"); + dbg_eba("EBA sub-system is initialized"); return 0; out_free: @@ -1233,20 +1217,3 @@ out_free: } return err; } - -/** - * ubi_eba_close - close EBA unit. - * @ubi: UBI device description object - */ -void ubi_eba_close(const struct ubi_device *ubi) -{ - int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; - - dbg_eba("close EBA unit"); - - for (i = 0; i < num_volumes; i++) { - if (!ubi->volumes[i]) - continue; - kfree(ubi->volumes[i]->eba_tbl); - } -} |