提交 f248488b 编写于 作者: L Linus Torvalds

Merge git://git.infradead.org/mtd-2.6

* git://git.infradead.org/mtd-2.6:
  [MTD] fix mtdconcat for subpage-write NAND
  [MTD] [OneNAND] Avoid deadlock in erase callback; release chip lock first.
  [MTD] [OneNAND] Return only negative error codes
  [MTD] [OneNAND] Synchronize block locking operations
  UBI: return correct error code
  UBI: remove useless inlines
  UBI: fix atomic LEB change problems
  UBI: use byte hexdump
  UBI: do not use vmalloc on I/O path
  UBI: allocate memory with GFP_NOFS
  UBI: use linux print_hex_dump(), not home-grown one
  UBI: don't use array index before testing if it is negative
  UBI: add more prints
  UBI: fix sparse warnings
  UBI: fix leak in ubi_scan_erase_peb
......@@ -726,6 +726,7 @@ struct mtd_info *mtd_concat_create(struct mtd_info *subdev[], /* subdevices to c
concat->mtd.size = subdev[0]->size;
concat->mtd.erasesize = subdev[0]->erasesize;
concat->mtd.writesize = subdev[0]->writesize;
concat->mtd.subpage_sft = subdev[0]->subpage_sft;
concat->mtd.oobsize = subdev[0]->oobsize;
concat->mtd.oobavail = subdev[0]->oobavail;
if (subdev[0]->writev)
......
......@@ -327,7 +327,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
printk(KERN_ERR "onenand_wait: controller error = 0x%04x\n", ctrl);
if (ctrl & ONENAND_CTRL_LOCK)
printk(KERN_ERR "onenand_wait: it's locked error.\n");
return ctrl;
return -EIO;
}
if (interrupt & ONENAND_INT_READ) {
......@@ -336,7 +336,7 @@ static int onenand_wait(struct mtd_info *mtd, int state)
if (ecc & ONENAND_ECC_2BIT_ALL) {
printk(KERN_ERR "onenand_wait: ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.failed++;
return ecc;
return -EBADMSG;
} else if (ecc & ONENAND_ECC_1BIT_ALL) {
printk(KERN_INFO "onenand_wait: correctable ECC error = 0x%04x\n", ecc);
mtd->ecc_stats.corrected++;
......@@ -1711,13 +1711,14 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr)
erase_exit:
ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
/* Do call back function */
if (!ret)
mtd_erase_callback(instr);
/* Deselect and wake up anyone waiting on the device */
onenand_release_device(mtd);
/* Do call back function */
if (!ret)
mtd_erase_callback(instr);
return ret;
}
......@@ -1904,7 +1905,12 @@ static int onenand_do_lock_cmd(struct mtd_info *mtd, loff_t ofs, size_t len, int
*/
static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
return onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
int ret;
onenand_get_device(mtd, FL_LOCKING);
ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_LOCK);
onenand_release_device(mtd);
return ret;
}
/**
......@@ -1917,7 +1923,12 @@ static int onenand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
*/
static int onenand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
{
return onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
int ret;
onenand_get_device(mtd, FL_LOCKING);
ret = onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
onenand_release_device(mtd);
return ret;
}
/**
......@@ -1979,7 +1990,7 @@ static int onenand_unlock_all(struct mtd_info *mtd)
loff_t ofs = this->chipsize >> 1;
size_t len = mtd->erasesize;
onenand_unlock(mtd, ofs, len);
onenand_do_lock_cmd(mtd, ofs, len, ONENAND_CMD_UNLOCK);
}
onenand_check_lock_status(this);
......@@ -1987,7 +1998,7 @@ static int onenand_unlock_all(struct mtd_info *mtd)
return 0;
}
onenand_unlock(mtd, 0x0, this->chipsize);
onenand_do_lock_cmd(mtd, 0x0, this->chipsize, ONENAND_CMD_UNLOCK);
return 0;
}
......
......@@ -565,7 +565,7 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
}
ubi = ubi_devices[ubi_devices_cnt] = kzalloc(sizeof(struct ubi_device),
GFP_KERNEL);
GFP_KERNEL);
if (!ubi) {
err = -ENOMEM;
goto out_mtd;
......@@ -583,6 +583,22 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
if (err)
goto out_free;
mutex_init(&ubi->buf_mutex);
ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1)
goto out_free;
ubi->peb_buf2 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf2)
goto out_free;
#ifdef CONFIG_MTD_UBI_DEBUG
mutex_init(&ubi->dbg_buf_mutex);
ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
if (!ubi->dbg_peb_buf)
goto out_free;
#endif
err = attach_by_scanning(ubi);
if (err) {
dbg_err("failed to attach by scanning, error %d", err);
......@@ -630,6 +646,11 @@ static int attach_mtd_dev(const char *mtd_dev, int vid_hdr_offset,
ubi_wl_close(ubi);
vfree(ubi->vtbl);
out_free:
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
#ifdef CONFIG_MTD_UBI_DEBUG
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi);
out_mtd:
put_mtd_device(mtd);
......@@ -651,6 +672,11 @@ static void detach_mtd_dev(struct ubi_device *ubi)
ubi_wl_close(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
vfree(ubi->peb_buf1);
vfree(ubi->peb_buf2);
#ifdef CONFIG_MTD_UBI_DEBUG
vfree(ubi->dbg_peb_buf);
#endif
kfree(ubi_devices[ubi_num]);
ubi_devices[ubi_num] = NULL;
ubi_devices_cnt -= 1;
......
......@@ -42,7 +42,8 @@ void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr)
dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset));
dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc));
dbg_msg("erase counter header hexdump:");
ubi_dbg_hexdump(ec_hdr, UBI_EC_HDR_SIZE);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ec_hdr, UBI_EC_HDR_SIZE, 1);
}
/**
......@@ -187,38 +188,4 @@ void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req)
dbg_msg("the 1st 16 characters of the name: %s", nm);
}
#define BYTES_PER_LINE 32
/**
* ubi_dbg_hexdump - dump a buffer.
* @ptr: the buffer to dump
* @size: buffer size which must be multiple of 4 bytes
*/
void ubi_dbg_hexdump(const void *ptr, int size)
{
int i, k = 0, rows, columns;
const uint8_t *p = ptr;
size = ALIGN(size, 4);
rows = size/BYTES_PER_LINE + size % BYTES_PER_LINE;
for (i = 0; i < rows; i++) {
int j;
cond_resched();
columns = min(size - k, BYTES_PER_LINE) / 4;
if (columns == 0)
break;
printk(KERN_DEBUG "%5d: ", i * BYTES_PER_LINE);
for (j = 0; j < columns; j++) {
int n, N;
N = size - k > 4 ? 4 : size - k;
for (n = 0; n < N; n++)
printk("%02x", p[k++]);
printk(" ");
}
printk("\n");
}
}
#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
......@@ -59,7 +59,6 @@ void ubi_dbg_dump_vtbl_record(const struct ubi_vtbl_record *r, int idx);
void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv);
void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type);
void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req);
void ubi_dbg_hexdump(const void *buf, int size);
#else
......@@ -72,7 +71,6 @@ void ubi_dbg_hexdump(const void *buf, int size);
#define ubi_dbg_dump_sv(sv) ({})
#define ubi_dbg_dump_seb(seb, type) ({})
#define ubi_dbg_dump_mkvol_req(req) ({})
#define ubi_dbg_hexdump(buf, size) ({})
#endif /* CONFIG_MTD_UBI_DEBUG_MSG */
......
......@@ -46,6 +46,9 @@
#include <linux/err.h>
#include "ubi.h"
/* Number of physical eraseblocks reserved for atomic LEB change operation */
#define EBA_RESERVED_PEBS 1
/**
* struct ltree_entry - an entry in the lock tree.
* @rb: links RB-tree nodes
......@@ -157,7 +160,7 @@ static struct ltree_entry *ltree_add_entry(struct ubi_device *ubi, int vol_id,
{
struct ltree_entry *le, *le1, *le_free;
le = kmem_cache_alloc(ltree_slab, GFP_KERNEL);
le = kmem_cache_alloc(ltree_slab, GFP_NOFS);
if (!le)
return ERR_PTR(-ENOMEM);
......@@ -397,7 +400,7 @@ int ubi_eba_read_leb(struct ubi_device *ubi, int vol_id, int lnum, void *buf,
retry:
if (check) {
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
err = -ENOMEM;
goto out_unlock;
......@@ -495,16 +498,18 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
struct ubi_volume *vol = ubi->volumes[idx];
struct ubi_vid_hdr *vid_hdr;
unsigned char *new_buf;
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
return -ENOMEM;
}
mutex_lock(&ubi->buf_mutex);
retry:
new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
if (new_pnum < 0) {
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum;
}
......@@ -524,31 +529,22 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
goto write_error;
data_size = offset + len;
new_buf = vmalloc(data_size);
if (!new_buf) {
err = -ENOMEM;
goto out_put;
}
memset(new_buf + offset, 0xFF, len);
memset(ubi->peb_buf1 + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */
if (offset > 0) {
err = ubi_io_read_data(ubi, new_buf, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) {
vfree(new_buf);
err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS)
goto out_put;
}
}
memcpy(new_buf + offset, buf, len);
memcpy(ubi->peb_buf1 + offset, buf, len);
err = ubi_io_write_data(ubi, new_buf, new_pnum, 0, data_size);
if (err) {
vfree(new_buf);
err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
if (err)
goto write_error;
}
vfree(new_buf);
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
vol->eba_tbl[lnum] = new_pnum;
......@@ -558,6 +554,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
return 0;
out_put:
mutex_unlock(&ubi->buf_mutex);
ubi_wl_put_peb(ubi, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
......@@ -570,6 +567,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
ubi_warn("failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) {
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
......@@ -627,7 +625,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, int vol_id, int lnum,
* The logical eraseblock is not mapped. We have to get a free physical
* eraseblock and write the volume identifier header there first.
*/
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr) {
leb_write_unlock(ubi, vol_id, lnum);
return -ENOMEM;
......@@ -738,7 +736,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
else
ubi_assert(len % ubi->min_io_size == 0);
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
......@@ -832,6 +830,9 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, int vol_id, int lnum,
* data, which has to be aligned. This function guarantees that in case of an
* unclean reboot the old contents is preserved. Returns zero in case of
* success and a negative error code in case of failure.
*
* UBI reserves one LEB for the "atomic LEB change" operation, so only one
* LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
*/
int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
const void *buf, int len, int dtype)
......@@ -844,15 +845,14 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
if (ubi->ro_mode)
return -EROFS;
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
mutex_lock(&ubi->alc_mutex);
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
}
if (err)
goto out_mutex;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
......@@ -869,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
retry:
pnum = ubi_wl_get_peb(ubi, dtype);
if (pnum < 0) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return pnum;
err = pnum;
goto out_leb_unlock;
}
dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
......@@ -893,17 +892,18 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
if (vol->eba_tbl[lnum] >= 0) {
err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
if (err) {
ubi_free_vid_hdr(ubi, vid_hdr);
leb_write_unlock(ubi, vol_id, lnum);
return err;
}
if (err)
goto out_leb_unlock;
}
vol->eba_tbl[lnum] = pnum;
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
out_mutex:
mutex_unlock(&ubi->alc_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
return err;
write_error:
if (err != -EIO || !ubi->bad_allowed) {
......@@ -913,17 +913,13 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
* mode just in case.
*/
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
goto out_leb_unlock;
}
err = ubi_wl_put_peb(ubi, pnum, 1);
if (err || ++tries > UBI_IO_RETRIES) {
ubi_ro_mode(ubi);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
goto out_leb_unlock;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
......@@ -965,7 +961,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
int err, vol_id, lnum, data_size, aldata_size, pnum, idx;
struct ubi_volume *vol;
uint32_t crc;
void *buf, *buf1 = NULL;
vol_id = be32_to_cpu(vid_hdr->vol_id);
lnum = be32_to_cpu(vid_hdr->lnum);
......@@ -979,19 +974,15 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
data_size = aldata_size =
ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
buf = vmalloc(aldata_size);
if (!buf)
return -ENOMEM;
/*
* We do not want anybody to write to this logical eraseblock while we
* are moving it, so we lock it.
*/
err = leb_write_lock(ubi, vol_id, lnum);
if (err) {
vfree(buf);
if (err)
return err;
}
mutex_lock(&ubi->buf_mutex);
/*
* But the logical eraseblock might have been put by this time.
......@@ -1023,7 +1014,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* OK, now the LEB is locked and we can safely start moving it */
dbg_eba("read %d bytes of data", aldata_size);
err = ubi_io_read_data(ubi, buf, from, 0, aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size);
if (err && err != UBI_IO_BITFLIPS) {
ubi_warn("error %d while reading data from PEB %d",
err, from);
......@@ -1042,10 +1033,10 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
*/
if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
aldata_size = data_size =
ubi_calc_data_len(ubi, buf, data_size);
ubi_calc_data_len(ubi, ubi->peb_buf1, data_size);
cond_resched();
crc = crc32(UBI_CRC32_INIT, buf, data_size);
crc = crc32(UBI_CRC32_INIT, ubi->peb_buf1, data_size);
cond_resched();
/*
......@@ -1076,23 +1067,18 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
if (data_size > 0) {
err = ubi_io_write_data(ubi, buf, to, 0, aldata_size);
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err)
goto out_unlock;
cond_resched();
/*
* We've written the data and are going to read it back to make
* sure it was written correctly.
*/
buf1 = vmalloc(aldata_size);
if (!buf1) {
err = -ENOMEM;
goto out_unlock;
}
cond_resched();
err = ubi_io_read_data(ubi, buf1, to, 0, aldata_size);
err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size);
if (err) {
if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read data back from PEB %d",
......@@ -1102,7 +1088,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
cond_resched();
if (memcmp(buf, buf1, aldata_size)) {
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
ubi_warn("read data back from PEB %d - it is different",
to);
goto out_unlock;
......@@ -1112,16 +1098,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi_assert(vol->eba_tbl[lnum] == from);
vol->eba_tbl[lnum] = to;
leb_write_unlock(ubi, vol_id, lnum);
vfree(buf);
vfree(buf1);
return 0;
out_unlock:
mutex_unlock(&ubi->buf_mutex);
leb_write_unlock(ubi, vol_id, lnum);
vfree(buf);
vfree(buf1);
return err;
}
......@@ -1144,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
dbg_eba("initialize EBA unit");
spin_lock_init(&ubi->ltree_lock);
mutex_init(&ubi->alc_mutex);
ubi->ltree = RB_ROOT;
if (ubi_devices_cnt == 0) {
......@@ -1205,6 +1185,15 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
}
if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, EBA_RESERVED_PEBS);
err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= EBA_RESERVED_PEBS;
ubi->rsvd_pebs += EBA_RESERVED_PEBS;
dbg_eba("EBA unit is initialized");
return 0;
......
......@@ -98,8 +98,8 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr);
static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
int offset, int len);
static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len);
#else
#define paranoid_check_not_bad(ubi, pnum) 0
#define paranoid_check_peb_ec_hdr(ubi, pnum) 0
......@@ -202,8 +202,8 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
* Note, in case of an error, it is possible that something was still written
* to the flash media, but may be some garbage.
*/
int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len)
int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
int len)
{
int err;
size_t written;
......@@ -285,7 +285,7 @@ static void erase_callback(struct erase_info *ei)
* zero in case of success and a negative error code in case of failure. If
* %-EIO is returned, the physical eraseblock most probably went bad.
*/
static int do_sync_erase(const struct ubi_device *ubi, int pnum)
static int do_sync_erase(struct ubi_device *ubi, int pnum)
{
int err, retries = 0;
struct erase_info ei;
......@@ -377,29 +377,25 @@ static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
* test, a positive number of erase operations done if the test was
* successfully passed, and other negative error codes in case of other errors.
*/
static int torture_peb(const struct ubi_device *ubi, int pnum)
static int torture_peb(struct ubi_device *ubi, int pnum)
{
void *buf;
int err, i, patt_count;
buf = vmalloc(ubi->peb_size);
if (!buf)
return -ENOMEM;
patt_count = ARRAY_SIZE(patterns);
ubi_assert(patt_count > 0);
mutex_lock(&ubi->buf_mutex);
for (i = 0; i < patt_count; i++) {
err = do_sync_erase(ubi, pnum);
if (err)
goto out;
/* Make sure the PEB contains only 0xFF bytes */
err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size);
err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
if (err)
goto out;
err = check_pattern(buf, 0xFF, ubi->peb_size);
err = check_pattern(ubi->peb_buf1, 0xFF, ubi->peb_size);
if (err == 0) {
ubi_err("erased PEB %d, but a non-0xFF byte found",
pnum);
......@@ -408,17 +404,17 @@ static int torture_peb(const struct ubi_device *ubi, int pnum)
}
/* Write a pattern and check it */
memset(buf, patterns[i], ubi->peb_size);
err = ubi_io_write(ubi, buf, pnum, 0, ubi->peb_size);
memset(ubi->peb_buf1, patterns[i], ubi->peb_size);
err = ubi_io_write(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
if (err)
goto out;
memset(buf, ~patterns[i], ubi->peb_size);
err = ubi_io_read(ubi, buf, pnum, 0, ubi->peb_size);
memset(ubi->peb_buf1, ~patterns[i], ubi->peb_size);
err = ubi_io_read(ubi, ubi->peb_buf1, pnum, 0, ubi->peb_size);
if (err)
goto out;
err = check_pattern(buf, patterns[i], ubi->peb_size);
err = check_pattern(ubi->peb_buf1, patterns[i], ubi->peb_size);
if (err == 0) {
ubi_err("pattern %x checking failed for PEB %d",
patterns[i], pnum);
......@@ -430,14 +426,17 @@ static int torture_peb(const struct ubi_device *ubi, int pnum)
err = patt_count;
out:
if (err == UBI_IO_BITFLIPS || err == -EBADMSG)
mutex_unlock(&ubi->buf_mutex);
if (err == UBI_IO_BITFLIPS || err == -EBADMSG) {
/*
* If a bit-flip or data integrity error was detected, the test
* has not passed because it happened on a freshly erased
* physical eraseblock which means something is wrong with it.
*/
ubi_err("read problems on freshly erased PEB %d, must be bad",
pnum);
err = -EIO;
vfree(buf);
}
return err;
}
......@@ -457,7 +456,7 @@ static int torture_peb(const struct ubi_device *ubi, int pnum)
* codes in case of other errors. Note, %-EIO means that the physical
* eraseblock is bad.
*/
int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture)
int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
{
int err, ret = 0;
......@@ -614,7 +613,7 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
* o %UBI_IO_PEB_EMPTY if the physical eraseblock is empty;
* o a negative error code in case of failure.
*/
int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr, int verbose)
{
int err, read_err = 0;
......@@ -720,7 +719,7 @@ int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
* case of failure. If %-EIO is returned, the physical eraseblock most probably
* went bad.
*/
int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr)
{
int err;
......@@ -886,7 +885,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
* header there);
* o a negative error code in case of failure.
*/
int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr, int verbose)
{
int err, read_err = 0;
......@@ -993,7 +992,7 @@ int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
* case of failure. If %-EIO is returned, the physical eraseblock probably went
* bad.
*/
int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr)
{
int err;
......@@ -1096,7 +1095,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
uint32_t crc, hdr_crc;
struct ubi_ec_hdr *ec_hdr;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
......@@ -1176,7 +1175,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
struct ubi_vid_hdr *vid_hdr;
void *p;
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
......@@ -1216,44 +1215,40 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
* @offset of the physical eraseblock @pnum, %1 if not, and a negative error
* code if an error occurred.
*/
static int paranoid_check_all_ff(const struct ubi_device *ubi, int pnum,
int offset, int len)
static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset,
int len)
{
size_t read;
int err;
void *buf;
loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
buf = vmalloc(len);
if (!buf)
return -ENOMEM;
memset(buf, 0, len);
err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
mutex_lock(&ubi->dbg_buf_mutex);
err = ubi->mtd->read(ubi->mtd, addr, len, &read, ubi->dbg_peb_buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
goto error;
}
err = check_pattern(buf, 0xFF, len);
err = check_pattern(ubi->dbg_peb_buf, 0xFF, len);
if (err == 0) {
ubi_err("flash region at PEB %d:%d, length %d does not "
"contain all 0xFF bytes", pnum, offset, len);
goto fail;
}
mutex_unlock(&ubi->dbg_buf_mutex);
vfree(buf);
return 0;
fail:
ubi_err("paranoid check failed for PEB %d", pnum);
dbg_msg("hex dump of the %d-%d region", offset, offset + len);
ubi_dbg_hexdump(buf, len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->dbg_peb_buf, len, 1);
err = 1;
error:
ubi_dbg_dump_stack();
vfree(buf);
mutex_unlock(&ubi->dbg_buf_mutex);
return err;
}
......
......@@ -99,16 +99,21 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
{
int err;
struct ubi_volume_desc *desc;
struct ubi_device *ubi = ubi_devices[ubi_num];
struct ubi_device *ubi;
struct ubi_volume *vol;
dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
err = -ENODEV;
if (ubi_num < 0)
return ERR_PTR(err);
ubi = ubi_devices[ubi_num];
if (!try_module_get(THIS_MODULE))
return ERR_PTR(err);
if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES || !ubi)
if (ubi_num >= UBI_MAX_DEVICES || !ubi)
goto out_put;
err = -EINVAL;
......
......@@ -45,8 +45,7 @@
#include "ubi.h"
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
static int paranoid_check_si(const struct ubi_device *ubi,
struct ubi_scan_info *si);
static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si);
#else
#define paranoid_check_si(ubi, si) 0
#endif
......@@ -259,14 +258,13 @@ static struct ubi_scan_volume *add_volume(struct ubi_scan_info *si, int vol_id,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
static int compare_lebs(const struct ubi_device *ubi,
const struct ubi_scan_leb *seb, int pnum,
const struct ubi_vid_hdr *vid_hdr)
static int compare_lebs(struct ubi_device *ubi, const struct ubi_scan_leb *seb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
void *buf;
int len, err, second_is_newer, bitflips = 0, corrupted = 0;
uint32_t data_crc, crc;
struct ubi_vid_hdr *vidh = NULL;
struct ubi_vid_hdr *vh = NULL;
unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
if (seb->sqnum == 0 && sqnum2 == 0) {
......@@ -323,11 +321,11 @@ static int compare_lebs(const struct ubi_device *ubi,
} else {
pnum = seb->pnum;
vidh = ubi_zalloc_vid_hdr(ubi);
if (!vidh)
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh)
return -ENOMEM;
err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (err) {
if (err == UBI_IO_BITFLIPS)
bitflips = 1;
......@@ -341,7 +339,7 @@ static int compare_lebs(const struct ubi_device *ubi,
}
}
if (!vidh->copy_flag) {
if (!vh->copy_flag) {
/* It is not a copy, so it is newer */
dbg_bld("first PEB %d is newer, copy_flag is unset",
pnum);
......@@ -349,7 +347,7 @@ static int compare_lebs(const struct ubi_device *ubi,
goto out_free_vidh;
}
vid_hdr = vidh;
vid_hdr = vh;
}
/* Read the data of the copy and check the CRC */
......@@ -379,7 +377,7 @@ static int compare_lebs(const struct ubi_device *ubi,
}
vfree(buf);
ubi_free_vid_hdr(ubi, vidh);
ubi_free_vid_hdr(ubi, vh);
if (second_is_newer)
dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
......@@ -391,7 +389,7 @@ static int compare_lebs(const struct ubi_device *ubi,
out_free_buf:
vfree(buf);
out_free_vidh:
ubi_free_vid_hdr(ubi, vidh);
ubi_free_vid_hdr(ubi, vh);
ubi_assert(err < 0);
return err;
}
......@@ -413,7 +411,7 @@ static int compare_lebs(const struct ubi_device *ubi,
* to be picked, while the older one has to be dropped. This function returns
* zero in case of success and a negative error code in case of failure.
*/
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
int bitflips)
{
......@@ -667,16 +665,12 @@ void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv)
* function returns zero in case of success and a negative error code in case
* of failure.
*/
int ubi_scan_erase_peb(const struct ubi_device *ubi,
const struct ubi_scan_info *si, int pnum, int ec)
int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
int pnum, int ec)
{
int err;
struct ubi_ec_hdr *ec_hdr;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
/*
* Erase counter overflow. Upgrade UBI and use 64-bit
......@@ -686,6 +680,10 @@ int ubi_scan_erase_peb(const struct ubi_device *ubi,
return -EINVAL;
}
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
ec_hdr->ec = cpu_to_be64(ec);
err = ubi_io_sync_erase(ubi, pnum, 0);
......@@ -712,7 +710,7 @@ int ubi_scan_erase_peb(const struct ubi_device *ubi,
* This function returns scanning physical eraseblock information in case of
* success and an error code in case of failure.
*/
struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
struct ubi_scan_info *si)
{
int err = 0, i;
......@@ -948,7 +946,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
if (!ech)
goto out_si;
vidh = ubi_zalloc_vid_hdr(ubi);
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
......@@ -1110,8 +1108,7 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
* This function returns zero if the scanning information is all right, %1 if
* not and a negative error code if an error occurred.
*/
static int paranoid_check_si(const struct ubi_device *ubi,
struct ubi_scan_info *si)
static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
{
int pnum, err, vols_found = 0;
struct rb_node *rb1, *rb2;
......
......@@ -147,7 +147,7 @@ static inline void ubi_scan_move_to_list(struct ubi_scan_volume *sv,
list_add_tail(&seb->u.list, list);
}
int ubi_scan_add_used(const struct ubi_device *ubi, struct ubi_scan_info *si,
int ubi_scan_add_used(struct ubi_device *ubi, struct ubi_scan_info *si,
int pnum, int ec, const struct ubi_vid_hdr *vid_hdr,
int bitflips);
struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
......@@ -155,10 +155,10 @@ struct ubi_scan_volume *ubi_scan_find_sv(const struct ubi_scan_info *si,
struct ubi_scan_leb *ubi_scan_find_seb(const struct ubi_scan_volume *sv,
int lnum);
void ubi_scan_rm_volume(struct ubi_scan_info *si, struct ubi_scan_volume *sv);
struct ubi_scan_leb *ubi_scan_get_free_peb(const struct ubi_device *ubi,
struct ubi_scan_leb *ubi_scan_get_free_peb(struct ubi_device *ubi,
struct ubi_scan_info *si);
int ubi_scan_erase_peb(const struct ubi_device *ubi,
const struct ubi_scan_info *si, int pnum, int ec);
int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si,
int pnum, int ec);
struct ubi_scan_info *ubi_scan(struct ubi_device *ubi);
void ubi_scan_destroy_si(struct ubi_scan_info *si);
......
......@@ -221,14 +221,15 @@ struct ubi_wl_entry;
* @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy
* @vtbl_mutex: protects on-flash volume table
*
* @max_ec: current highest erase counter value
* @mean_ec: current mean erase counter value
*
* global_sqnum: global sequence number
* @global_sqnum: global sequence number
* @ltree_lock: protects the lock tree and @global_sqnum
* @ltree: the lock tree
* @vtbl_mutex: protects on-flash volume table
* @alc_mutex: serializes "atomic LEB change" operations
*
* @used: RB-tree of used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
......@@ -274,6 +275,12 @@ struct ubi_wl_entry;
* @bad_allowed: whether the MTD device admits of bad physical eraseblocks or
* not
* @mtd: MTD device descriptor
*
* @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: proptects @peb_buf1 and @peb_buf2
* @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: proptects @dbg_peb_buf
*/
struct ubi_device {
struct cdev cdev;
......@@ -302,6 +309,7 @@ struct ubi_device {
unsigned long long global_sqnum;
spinlock_t ltree_lock;
struct rb_root ltree;
struct mutex alc_mutex;
/* Wear-leveling unit's stuff */
struct rb_root used;
......@@ -343,6 +351,14 @@ struct ubi_device {
int vid_hdr_shift;
int bad_allowed;
struct mtd_info *mtd;
void *peb_buf1;
void *peb_buf2;
struct mutex buf_mutex;
#ifdef CONFIG_MTD_UBI_DEBUG
void *dbg_peb_buf;
struct mutex dbg_buf_mutex;
#endif
};
extern struct file_operations ubi_cdev_operations;
......@@ -409,18 +425,18 @@ void ubi_wl_close(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
int len);
int ubi_io_write(const struct ubi_device *ubi, const void *buf, int pnum,
int offset, int len);
int ubi_io_sync_erase(const struct ubi_device *ubi, int pnum, int torture);
int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
int len);
int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture);
int ubi_io_is_bad(const struct ubi_device *ubi, int pnum);
int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum);
int ubi_io_read_ec_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr, int verbose);
int ubi_io_write_ec_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
struct ubi_ec_hdr *ec_hdr);
int ubi_io_read_vid_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr, int verbose);
int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
struct ubi_vid_hdr *vid_hdr);
/*
......@@ -439,16 +455,18 @@ int ubi_io_write_vid_hdr(const struct ubi_device *ubi, int pnum,
/**
* ubi_zalloc_vid_hdr - allocate a volume identifier header object.
* @ubi: UBI device description object
* @gfp_flags: GFP flags to allocate with
*
* This function returns a pointer to the newly allocated and zero-filled
* volume identifier header object in case of success and %NULL in case of
* failure.
*/
static inline struct ubi_vid_hdr *ubi_zalloc_vid_hdr(const struct ubi_device *ubi)
static inline struct ubi_vid_hdr *
ubi_zalloc_vid_hdr(const struct ubi_device *ubi, gfp_t gfp_flags)
{
void *vid_hdr;
vid_hdr = kzalloc(ubi->vid_hdr_alsize, GFP_KERNEL);
vid_hdr = kzalloc(ubi->vid_hdr_alsize, gfp_flags);
if (!vid_hdr)
return NULL;
......@@ -492,7 +510,7 @@ static inline int ubi_io_read_data(const struct ubi_device *ubi, void *buf,
* the beginning of the logical eraseblock, not to the beginning of the
* physical eraseblock.
*/
static inline int ubi_io_write_data(const struct ubi_device *ubi, const void *buf,
static inline int ubi_io_write_data(struct ubi_device *ubi, const void *buf,
int pnum, int offset, int len)
{
ubi_assert(offset >= 0);
......
......@@ -37,21 +37,21 @@ static ssize_t vol_attribute_show(struct device *dev,
struct device_attribute *attr, char *buf);
/* Device attributes corresponding to files in '/<sysfs>/class/ubi/ubiX_Y' */
static struct device_attribute vol_reserved_ebs =
static struct device_attribute attr_vol_reserved_ebs =
__ATTR(reserved_ebs, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_type =
static struct device_attribute attr_vol_type =
__ATTR(type, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_name =
static struct device_attribute attr_vol_name =
__ATTR(name, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_corrupted =
static struct device_attribute attr_vol_corrupted =
__ATTR(corrupted, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_alignment =
static struct device_attribute attr_vol_alignment =
__ATTR(alignment, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_usable_eb_size =
static struct device_attribute attr_vol_usable_eb_size =
__ATTR(usable_eb_size, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_data_bytes =
static struct device_attribute attr_vol_data_bytes =
__ATTR(data_bytes, S_IRUGO, vol_attribute_show, NULL);
static struct device_attribute vol_upd_marker =
static struct device_attribute attr_vol_upd_marker =
__ATTR(upd_marker, S_IRUGO, vol_attribute_show, NULL);
/*
......@@ -78,23 +78,27 @@ static ssize_t vol_attribute_show(struct device *dev,
spin_unlock(&vol->ubi->volumes_lock);
return -ENODEV;
}
if (attr == &vol_reserved_ebs)
if (attr == &attr_vol_reserved_ebs)
ret = sprintf(buf, "%d\n", vol->reserved_pebs);
else if (attr == &vol_type) {
else if (attr == &attr_vol_type) {
const char *tp;
tp = vol->vol_type == UBI_DYNAMIC_VOLUME ? "dynamic" : "static";
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
tp = "dynamic";
else
tp = "static";
ret = sprintf(buf, "%s\n", tp);
} else if (attr == &vol_name)
} else if (attr == &attr_vol_name)
ret = sprintf(buf, "%s\n", vol->name);
else if (attr == &vol_corrupted)
else if (attr == &attr_vol_corrupted)
ret = sprintf(buf, "%d\n", vol->corrupted);
else if (attr == &vol_alignment)
else if (attr == &attr_vol_alignment)
ret = sprintf(buf, "%d\n", vol->alignment);
else if (attr == &vol_usable_eb_size) {
else if (attr == &attr_vol_usable_eb_size) {
ret = sprintf(buf, "%d\n", vol->usable_leb_size);
} else if (attr == &vol_data_bytes)
} else if (attr == &attr_vol_data_bytes)
ret = sprintf(buf, "%lld\n", vol->used_bytes);
else if (attr == &vol_upd_marker)
else if (attr == &attr_vol_upd_marker)
ret = sprintf(buf, "%d\n", vol->upd_marker);
else
BUG();
......@@ -126,28 +130,28 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
{
int err;
err = device_create_file(&vol->dev, &vol_reserved_ebs);
err = device_create_file(&vol->dev, &attr_vol_reserved_ebs);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_type);
err = device_create_file(&vol->dev, &attr_vol_type);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_name);
err = device_create_file(&vol->dev, &attr_vol_name);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_corrupted);
err = device_create_file(&vol->dev, &attr_vol_corrupted);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_alignment);
err = device_create_file(&vol->dev, &attr_vol_alignment);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_usable_eb_size);
err = device_create_file(&vol->dev, &attr_vol_usable_eb_size);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_data_bytes);
err = device_create_file(&vol->dev, &attr_vol_data_bytes);
if (err)
return err;
err = device_create_file(&vol->dev, &vol_upd_marker);
err = device_create_file(&vol->dev, &attr_vol_upd_marker);
if (err)
return err;
return 0;
......@@ -159,14 +163,14 @@ static int volume_sysfs_init(struct ubi_device *ubi, struct ubi_volume *vol)
*/
static void volume_sysfs_close(struct ubi_volume *vol)
{
device_remove_file(&vol->dev, &vol_upd_marker);
device_remove_file(&vol->dev, &vol_data_bytes);
device_remove_file(&vol->dev, &vol_usable_eb_size);
device_remove_file(&vol->dev, &vol_alignment);
device_remove_file(&vol->dev, &vol_corrupted);
device_remove_file(&vol->dev, &vol_name);
device_remove_file(&vol->dev, &vol_type);
device_remove_file(&vol->dev, &vol_reserved_ebs);
device_remove_file(&vol->dev, &attr_vol_upd_marker);
device_remove_file(&vol->dev, &attr_vol_data_bytes);
device_remove_file(&vol->dev, &attr_vol_usable_eb_size);
device_remove_file(&vol->dev, &attr_vol_alignment);
device_remove_file(&vol->dev, &attr_vol_corrupted);
device_remove_file(&vol->dev, &attr_vol_name);
device_remove_file(&vol->dev, &attr_vol_type);
device_remove_file(&vol->dev, &attr_vol_reserved_ebs);
device_unregister(&vol->dev);
}
......
......@@ -254,7 +254,7 @@ static int vtbl_check(const struct ubi_device *ubi,
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si,
static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
int copy, void *vtbl)
{
int err, tries = 0;
......@@ -264,7 +264,7 @@ static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si,
ubi_msg("create volume table (copy #%d)", copy + 1);
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vid_hdr)
return -ENOMEM;
......@@ -339,7 +339,7 @@ static int create_vtbl(const struct ubi_device *ubi, struct ubi_scan_info *si,
* not corrupted, and recovering from corruptions if needed. Returns volume
* table in case of success and a negative error code in case of failure.
*/
static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
struct ubi_scan_info *si,
struct ubi_scan_volume *sv)
{
......@@ -453,7 +453,7 @@ static struct ubi_vtbl_record *process_lvol(const struct ubi_device *ubi,
* This function returns volume table contents in case of success and a
* negative error code in case of failure.
*/
static struct ubi_vtbl_record *create_empty_lvol(const struct ubi_device *ubi,
static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi,
struct ubi_scan_info *si)
{
int i;
......
......@@ -208,7 +208,7 @@ struct ubi_work {
};
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root);
#else
......@@ -219,17 +219,6 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
/* Slab cache for wear-leveling entries */
static struct kmem_cache *wl_entries_slab;
/**
* tree_empty - a helper function to check if an RB-tree is empty.
* @root: the root of the tree
*
* This function returns non-zero if the RB-tree is empty and zero if not.
*/
static inline int tree_empty(struct rb_root *root)
{
return root->rb_node == NULL;
}
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
......@@ -266,45 +255,6 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
rb_insert_color(&e->rb, root);
}
/*
* Helper functions to add and delete wear-leveling entries from different
* trees.
*/
static void free_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
wl_tree_add(e, &ubi->free);
}
static inline void used_tree_add(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
wl_tree_add(e, &ubi->used);
}
static inline void scrub_tree_add(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
wl_tree_add(e, &ubi->scrub);
}
static inline void free_tree_del(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
paranoid_check_in_wl_tree(e, &ubi->free);
rb_erase(&e->rb, &ubi->free);
}
static inline void used_tree_del(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
}
static inline void scrub_tree_del(struct ubi_device *ubi,
struct ubi_wl_entry *e)
{
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub);
}
/**
* do_work - do one pending work.
* @ubi: UBI device description object
......@@ -358,7 +308,7 @@ static int produce_free_peb(struct ubi_device *ubi)
int err;
spin_lock(&ubi->wl_lock);
while (tree_empty(&ubi->free)) {
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
......@@ -508,13 +458,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
dtype == UBI_UNKNOWN);
pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL);
pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
if (!pe)
return -ENOMEM;
retry:
spin_lock(&ubi->wl_lock);
if (tree_empty(&ubi->free)) {
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
......@@ -585,7 +535,8 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
* Move the physical eraseblock to the protection trees where it will
* be protected from being moved for some time.
*/
free_tree_del(ubi, e);
paranoid_check_in_wl_tree(e, &ubi->free);
rb_erase(&e->rb, &ubi->free);
prot_tree_add(ubi, e, pe, protect);
dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
......@@ -645,7 +596,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur
if (err > 0)
return -EINVAL;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
......@@ -704,7 +655,7 @@ static void check_protection_over(struct ubi_device *ubi)
*/
while (1) {
spin_lock(&ubi->wl_lock);
if (tree_empty(&ubi->prot.aec)) {
if (!ubi->prot.aec.rb_node) {
spin_unlock(&ubi->wl_lock);
break;
}
......@@ -721,7 +672,7 @@ static void check_protection_over(struct ubi_device *ubi)
pe->e->pnum, ubi->abs_ec, pe->abs_ec);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
used_tree_add(ubi, pe->e);
wl_tree_add(pe->e, &ubi->used);
spin_unlock(&ubi->wl_lock);
kfree(pe);
......@@ -768,7 +719,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wl_wrk)
return -ENOMEM;
......@@ -802,7 +753,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (cancel)
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi);
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
if (!vid_hdr)
return -ENOMEM;
......@@ -812,8 +763,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* Only one WL worker at a time is supported at this implementation, so
* make sure a PEB is not being moved already.
*/
if (ubi->move_to || tree_empty(&ubi->free) ||
(tree_empty(&ubi->used) && tree_empty(&ubi->scrub))) {
if (ubi->move_to || !ubi->free.rb_node ||
(!ubi->used.rb_node && !ubi->scrub.rb_node)) {
/*
* Only one WL worker at a time is supported at this
* implementation, so if a LEB is already being moved, cancel.
......@@ -828,14 +779,14 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* triggered again.
*/
dbg_wl("cancel WL, a list is empty: free %d, used %d",
tree_empty(&ubi->free), tree_empty(&ubi->used));
!ubi->free.rb_node, !ubi->used.rb_node);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
}
if (tree_empty(&ubi->scrub)) {
if (!ubi->scrub.rb_node) {
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
......@@ -852,17 +803,20 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_free_vid_hdr(ubi, vid_hdr);
return 0;
}
used_tree_del(ubi, e1);
paranoid_check_in_wl_tree(e1, &ubi->used);
rb_erase(&e1->rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec);
} else {
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
scrub_tree_del(ubi, e1);
paranoid_check_in_wl_tree(e1, &ubi->scrub);
rb_erase(&e1->rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
free_tree_del(ubi, e2);
paranoid_check_in_wl_tree(e2, &ubi->free);
rb_erase(&e2->rb, &ubi->free);
ubi_assert(!ubi->move_from && !ubi->move_to);
ubi_assert(!ubi->move_to_put && !ubi->move_from_put);
ubi->move_from = e1;
......@@ -908,7 +862,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi_free_vid_hdr(ubi, vid_hdr);
spin_lock(&ubi->wl_lock);
if (!ubi->move_to_put)
used_tree_add(ubi, e2);
wl_tree_add(e2, &ubi->used);
else
put = 1;
ubi->move_from = ubi->move_to = NULL;
......@@ -953,7 +907,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
if (ubi->move_from_put)
put = 1;
else
used_tree_add(ubi, e1);
wl_tree_add(e1, &ubi->used);
ubi->move_from = ubi->move_to = NULL;
ubi->move_from_put = ubi->move_to_put = 0;
spin_unlock(&ubi->wl_lock);
......@@ -1005,8 +959,8 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* If the ubi->scrub tree is not empty, scrubbing is needed, and the
* the WL worker has to be scheduled anyway.
*/
if (tree_empty(&ubi->scrub)) {
if (tree_empty(&ubi->used) || tree_empty(&ubi->free))
if (!ubi->scrub.rb_node) {
if (!ubi->used.rb_node || !ubi->free.rb_node)
/* No physical eraseblocks - no deal */
goto out_unlock;
......@@ -1028,7 +982,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
err = -ENOMEM;
goto out_cancel;
......@@ -1079,7 +1033,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
ubi->abs_ec += 1;
free_tree_add(ubi, e);
wl_tree_add(e, &ubi->free);
spin_unlock(&ubi->wl_lock);
/*
......@@ -1093,6 +1047,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
return err;
}
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
kmem_cache_free(wl_entries_slab, e);
......@@ -1211,11 +1166,13 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
spin_unlock(&ubi->wl_lock);
return 0;
} else {
if (in_wl_tree(e, &ubi->used))
used_tree_del(ubi, e);
else if (in_wl_tree(e, &ubi->scrub))
scrub_tree_del(ubi, e);
else
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub);
} else
prot_tree_del(ubi, e->pnum);
}
spin_unlock(&ubi->wl_lock);
......@@ -1223,7 +1180,7 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
err = schedule_erase(ubi, e, torture);
if (err) {
spin_lock(&ubi->wl_lock);
used_tree_add(ubi, e);
wl_tree_add(e, &ubi->used);
spin_unlock(&ubi->wl_lock);
}
......@@ -1267,12 +1224,13 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
goto retry;
}
if (in_wl_tree(e, &ubi->used))
used_tree_del(ubi, e);
else
if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used);
} else
prot_tree_del(ubi, pnum);
scrub_tree_add(ubi, e);
wl_tree_add(e, &ubi->scrub);
spin_unlock(&ubi->wl_lock);
/*
......@@ -1488,7 +1446,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
e->pnum = seb->pnum;
e->ec = seb->ec;
ubi_assert(e->ec >= 0);
free_tree_add(ubi, e);
wl_tree_add(e, &ubi->free);
ubi->lookuptbl[e->pnum] = e;
}
......@@ -1522,16 +1480,16 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (!seb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
used_tree_add(ubi, e);
wl_tree_add(e, &ubi->used);
} else {
dbg_wl("add PEB %d EC %d to the scrub tree",
e->pnum, e->ec);
scrub_tree_add(ubi, e);
wl_tree_add(e, &ubi->scrub);
}
}
}
if (WL_RESERVED_PEBS > ubi->avail_pebs) {
if (ubi->avail_pebs < WL_RESERVED_PEBS) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, WL_RESERVED_PEBS);
goto out_free;
......@@ -1624,13 +1582,13 @@ void ubi_wl_close(struct ubi_device *ubi)
* is equivalent to @ec, %1 if not, and a negative error code if an error
* occurred.
*/
static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec)
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
int err;
long long read_ec;
struct ubi_ec_hdr *ec_hdr;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
if (!ec_hdr)
return -ENOMEM;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册