diff options
author | Artem Bityutskiy <artem.bityutskiy@linux.intel.com> | 2012-02-03 14:34:14 +0200 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2012-03-27 00:28:05 +0100 |
commit | 8273a0c911d8e068297ef70aa7241ee78db4c712 (patch) | |
tree | 3aa97da098ac7417bc3ef8bc9b1a3e5b531ed268 /drivers/mtd/mtdcore.c | |
parent | 5e4e6e3fdf48c1b012e2b6e80ed1d7e99d4fa6d1 (diff) |
mtd: add offset and length checks to the API function
Add verification of the offset and length to MTD API functions and verify that
MTD device offset and length are within MTD device size.
The modified API functions are:
'mtd_erase()'
'mtd_point()'
'mtd_unpoint()'
'mtd_get_unmapped_area()'
'mtd_read()'
'mtd_write()'
'mtd_panic_write()'
'mtd_lock()'
'mtd_unlock()'
'mtd_is_locked()'
'mtd_block_isbad()'
'mtd_block_markbad()'
This patch also uninlines these functions and exports in mtdcore.c because they
are not performance-critical and do not have to be inlined.
Signed-off-by: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/mtdcore.c')
-rw-r--r-- | drivers/mtd/mtdcore.c | 148 |
1 files changed, 148 insertions, 0 deletions
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 5ea22cf357fb..8d5e103695f9 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -683,6 +683,154 @@ void __put_mtd_device(struct mtd_info *mtd) EXPORT_SYMBOL_GPL(__put_mtd_device); /* + * Erase is an asynchronous operation. Device drivers are supposed + * to call instr->callback() whenever the operation completes, even + * if it completes with a failure. + * Callers are supposed to pass a callback function and wait for it + * to be called before writing to the block. + */ +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + if (instr->addr > mtd->size || instr->len > mtd->size - instr->addr) + return -EINVAL; + return mtd->_erase(mtd, instr); +} +EXPORT_SYMBOL_GPL(mtd_erase); + +/* + * This stuff for eXecute-In-Place. phys is optional and may be set to NULL. + */ +int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + void **virt, resource_size_t *phys) +{ + *retlen = 0; + if (!mtd->_point) + return -EOPNOTSUPP; + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + return mtd->_point(mtd, from, len, retlen, virt, phys); +} +EXPORT_SYMBOL_GPL(mtd_point); + +/* We probably shouldn't allow XIP if the unpoint isn't a NULL */ +int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) +{ + if (!mtd->_point) + return -EOPNOTSUPP; + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + return mtd->_unpoint(mtd, from, len); +} +EXPORT_SYMBOL_GPL(mtd_unpoint); + +/* + * Allow NOMMU mmap() to directly map the device (if not NULL) + * - return the address to which the offset maps + * - return -ENOSYS to indicate refusal to do the mapping + */ +unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, + unsigned long offset, unsigned long flags) +{ + if (!mtd->_get_unmapped_area) + return -EOPNOTSUPP; + if (offset > mtd->size || len > mtd->size - offset) + return -EINVAL; + return mtd->_get_unmapped_area(mtd, len, offset, flags); +} +EXPORT_SYMBOL_GPL(mtd_get_unmapped_area); + +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + u_char *buf) +{ + if (from < 0 || from > mtd->size || len > mtd->size - from) + return -EINVAL; + return mtd->_read(mtd, from, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_read); + +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf) +{ + *retlen = 0; + if (!mtd->_write) + return -EROFS; + if (to < 0 || to > mtd->size || len > mtd->size - to) + return -EINVAL; + return mtd->_write(mtd, to, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_write); + +/* + * In blackbox flight recorder like scenarios we want to make successful writes + * in interrupt context. panic_write() is only intended to be called when its + * known the kernel is about to panic and we need the write to succeed. Since + * the kernel is not going to be running for much longer, this function can + * break locks and delay to ensure the write succeeds (but not sleep). + */ +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf) +{ + *retlen = 0; + if (!mtd->_panic_write) + return -EOPNOTSUPP; + if (to < 0 || to > mtd->size || len > mtd->size - to) + return -EINVAL; + return mtd->_panic_write(mtd, to, len, retlen, buf); +} +EXPORT_SYMBOL_GPL(mtd_panic_write); + +/* Chip-supported device locking */ +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_lock) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + return mtd->_lock(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_lock); + +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_unlock) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + return mtd->_unlock(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_unlock); + +int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) +{ + if (!mtd->_is_locked) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size || len > mtd->size - ofs) + return -EINVAL; + return mtd->_is_locked(mtd, ofs, len); +} +EXPORT_SYMBOL_GPL(mtd_is_locked); + +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->_block_isbad) + return 0; + if (ofs < 0 || ofs > mtd->size) + return -EINVAL; + return mtd->_block_isbad(mtd, ofs); +} +EXPORT_SYMBOL_GPL(mtd_block_isbad); + +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs) +{ + if (!mtd->_block_markbad) + return -EOPNOTSUPP; + if (ofs < 0 || ofs > mtd->size) + return -EINVAL; + return mtd->_block_markbad(mtd, ofs); +} +EXPORT_SYMBOL_GPL(mtd_block_markbad); + +/* * default_mtd_writev - the default writev method * @mtd: mtd device description object pointer * @vecs: the vectors to write |