diff options
Diffstat (limited to 'include/linux/mtd/nand.h')
-rw-r--r-- | include/linux/mtd/nand.h | 222 |
1 files changed, 211 insertions, 11 deletions
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 697ea2474a7c..07486168d104 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -21,7 +21,7 @@ struct nand_device; * @oobsize: OOB area size * @pages_per_eraseblock: number of pages per eraseblock * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) - * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN + * @max_bad_eraseblocks_per_lun: maximum number of bad eraseblocks per LUN * @planes_per_lun: number of planes per LUN * @luns_per_target: number of LUN per target (target is a synonym for die) * @ntargets: total number of targets exposed by the NAND device @@ -103,6 +103,8 @@ enum nand_page_io_req_type { * @ooblen: the number of OOB bytes to read from/write to this page * @oobbuf: buffer to store OOB data in or get OOB data from * @mode: one of the %MTD_OPS_XXX mode + * @continuous: no need to start over the operation at the end of each page, the + * NAND device will automatically prepare the next one * * This object is used to pass per-page I/O requests to NAND sub-layers. This * way all useful information are already formatted in a useful way and @@ -125,6 +127,7 @@ struct nand_page_io_req { void *in; } oobbuf; int mode; + bool continuous; }; const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void); @@ -231,12 +234,14 @@ struct nand_ops { /** * struct nand_ecc_context - Context for the ECC engine * @conf: basic ECC engine parameters + * @nsteps: number of ECC steps * @total: total number of bytes used for storing ECC codes, this is used by * generic OOB layouts * @priv: ECC engine driver private data */ struct nand_ecc_context { struct nand_ecc_props conf; + unsigned int nsteps; unsigned int total; void *priv; }; @@ -262,11 +267,35 @@ struct nand_ecc_engine_ops { }; /** + * enum nand_ecc_engine_integration - How the NAND ECC engine is integrated + * @NAND_ECC_ENGINE_INTEGRATION_INVALID: Invalid value + * @NAND_ECC_ENGINE_INTEGRATION_PIPELINED: Pipelined engine, performs on-the-fly + * correction, does not need to copy + * data around + * @NAND_ECC_ENGINE_INTEGRATION_EXTERNAL: External engine, needs to bring the + * data into its own area before use + */ +enum nand_ecc_engine_integration { + NAND_ECC_ENGINE_INTEGRATION_INVALID, + NAND_ECC_ENGINE_INTEGRATION_PIPELINED, + NAND_ECC_ENGINE_INTEGRATION_EXTERNAL, +}; + +/** * struct nand_ecc_engine - ECC engine abstraction for NAND devices + * @dev: Host device + * @node: Private field for registration time * @ops: ECC engine operations + * @integration: How the engine is integrated with the host + * (only relevant on %NAND_ECC_ENGINE_TYPE_ON_HOST engines) + * @priv: Private data */ struct nand_ecc_engine { - struct nand_ecc_engine_ops *ops; + struct device *dev; + struct list_head node; + const struct nand_ecc_engine_ops *ops; + enum nand_ecc_engine_integration integration; + void *priv; }; void of_get_nand_ecc_user_config(struct nand_device *nand); @@ -278,6 +307,78 @@ int nand_ecc_finish_io_req(struct nand_device *nand, struct nand_page_io_req *req); bool nand_ecc_is_strong_enough(struct nand_device *nand); +#if IS_REACHABLE(CONFIG_MTD_NAND_CORE) +int nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine); +int nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine); +#else +static inline int +nand_ecc_register_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +static inline int +nand_ecc_unregister_on_host_hw_engine(struct nand_ecc_engine *engine) +{ + return -ENOTSUPP; +} +#endif + +struct nand_ecc_engine *nand_ecc_get_sw_engine(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_on_die_hw_engine(struct nand_device *nand); +struct nand_ecc_engine *nand_ecc_get_on_host_hw_engine(struct nand_device *nand); +void nand_ecc_put_on_host_hw_engine(struct nand_device *nand); +struct device *nand_ecc_get_engine_dev(struct device *host); + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING) +struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void); +#else +static inline struct nand_ecc_engine *nand_ecc_sw_hamming_get_engine(void) +{ + return NULL; +} +#endif /* CONFIG_MTD_NAND_ECC_SW_HAMMING */ + +#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH) +struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void); +#else +static inline struct nand_ecc_engine *nand_ecc_sw_bch_get_engine(void) +{ + return NULL; +} +#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */ + +/** + * struct nand_ecc_req_tweak_ctx - Help for automatically tweaking requests + * @orig_req: Pointer to the original IO request + * @nand: Related NAND device, to have access to its memory organization + * @page_buffer_size: Real size of the page buffer to use (can be set by the + * user before the tweaking mechanism initialization) + * @oob_buffer_size: Real size of the OOB buffer to use (can be set by the + * user before the tweaking mechanism initialization) + * @spare_databuf: Data bounce buffer + * @spare_oobbuf: OOB bounce buffer + * @bounce_data: Flag indicating a data bounce buffer is used + * @bounce_oob: Flag indicating an OOB bounce buffer is used + */ +struct nand_ecc_req_tweak_ctx { + struct nand_page_io_req orig_req; + struct nand_device *nand; + unsigned int page_buffer_size; + unsigned int oob_buffer_size; + void *spare_databuf; + void *spare_oobbuf; + bool bounce_data; + bool bounce_oob; +}; + +int nand_ecc_init_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_device *nand); +void nand_ecc_cleanup_req_tweaking(struct nand_ecc_req_tweak_ctx *ctx); +void nand_ecc_tweak_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req); +void nand_ecc_restore_req(struct nand_ecc_req_tweak_ctx *ctx, + struct nand_page_io_req *req); + /** * struct nand_ecc - Information relative to the ECC * @defaults: Default values, depend on the underlying subsystem @@ -534,6 +635,26 @@ nanddev_get_ecc_conf(struct nand_device *nand) } /** + * nanddev_get_ecc_nsteps() - Extract the number of ECC steps + * @nand: NAND device + */ +static inline unsigned int +nanddev_get_ecc_nsteps(struct nand_device *nand) +{ + return nand->ecc.ctx.nsteps; +} + +/** + * nanddev_get_ecc_bytes_per_step() - Extract the number of ECC bytes per step + * @nand: NAND device + */ +static inline unsigned int +nanddev_get_ecc_bytes_per_step(struct nand_device *nand) +{ + return nand->ecc.ctx.total / nand->ecc.ctx.nsteps; +} + +/** * nanddev_get_ecc_requirements() - Extract the ECC requirements from a NAND * device * @nand: NAND device @@ -788,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand, } /** - * nand_io_iter_init - Initialize a NAND I/O iterator + * nand_io_page_iter_init - Initialize a NAND I/O iterator * @nand: NAND device * @offs: absolute offset * @req: MTD request * @iter: NAND I/O iterator * * Initializes a NAND iterator based on the information passed by the MTD - * layer. + * layer for page jumps. */ -static inline void nanddev_io_iter_init(struct nand_device *nand, - enum nand_page_io_req_type reqtype, - loff_t offs, struct mtd_oob_ops *req, - struct nand_io_iter *iter) +static inline void nanddev_io_page_iter_init(struct nand_device *nand, + enum nand_page_io_req_type reqtype, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) { struct mtd_info *mtd = nanddev_to_mtd(nand); @@ -819,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand, iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page - iter->req.ooboffs, iter->oobleft); + iter->req.continuous = false; +} + +/** + * nand_io_block_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer for block jumps (no OOB) + * + * In practice only reads may leverage this iterator. + */ +static inline void nanddev_io_block_iter_init(struct nand_device *nand, + enum nand_page_io_req_type reqtype, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + unsigned int offs_in_eb; + + iter->req.type = reqtype; + iter->req.mode = req->mode; + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = 0; + iter->oobbytes_per_page = 0; + iter->dataleft = req->len; + iter->oobleft = 0; + iter->req.databuf.in = req->datbuf; + offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs; + iter->req.datalen = min_t(unsigned int, + nanddev_eraseblock_size(nand) - offs_in_eb, + iter->dataleft); + iter->req.oobbuf.in = NULL; + iter->req.ooblen = 0; + iter->req.continuous = true; } /** @@ -845,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand, } /** + * nand_io_iter_next_block - Move to the next block + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next block. + * No OOB handling available. + */ +static inline void nanddev_io_iter_next_block(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_eraseblock(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->req.dataoffs = 0; + iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand), + iter->dataleft); +} + +/** * nand_io_iter_end - Should end iteration or not * @nand: NAND device * @iter: NAND I/O iterator @@ -872,18 +1049,41 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand, * @req: MTD I/O request * @iter: NAND I/O iterator * - * Should be used for iterate over pages that are contained in an MTD request. + * Should be used for iterating over pages that are contained in an MTD request. */ #define nanddev_io_for_each_page(nand, type, start, req, iter) \ - for (nanddev_io_iter_init(nand, type, start, req, iter); \ + for (nanddev_io_page_iter_init(nand, type, start, req, iter); \ !nanddev_io_iter_end(nand, iter); \ nanddev_io_iter_next_page(nand, iter)) +/** + * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O + * request, one block at a time + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterating over blocks that are contained in an MTD request. + */ +#define nanddev_io_for_each_block(nand, type, start, req, iter) \ + for (nanddev_io_block_iter_init(nand, type, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_block(nand, iter)) + bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); -int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); +/* ECC related functions */ +int nanddev_ecc_engine_init(struct nand_device *nand); +void nanddev_ecc_engine_cleanup(struct nand_device *nand); + +static inline void *nand_to_ecc_ctx(struct nand_device *nand) +{ + return nand->ecc.ctx.priv; +} + /* BBT related functions */ enum nand_bbt_block_status { NAND_BBT_BLOCK_STATUS_UNKNOWN, |