diff options
author | Igor Konopko <igor.j.konopko@intel.com> | 2019-05-04 20:38:10 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-05-06 10:19:19 -0600 |
commit | a96de64a24e5035018c5a912f2b877da8797277e (patch) | |
tree | 3d3a80a71cce270dca5ebed4cbe6c9133e0d7752 /drivers/lightnvm/pblk.h | |
parent | 843f2edbdde085b4d2764baf6fd8a1fccb1ab26a (diff) |
lightnvm: pblk: simplify partial read path
This patch changes the approach to handling partial read path.
In old approach merging of data from round buffer and drive was fully
made by drive. This had some disadvantages - code was complex and
relies on bio internals, so it was hard to maintain and was strongly
dependent on bio changes.
In new approach most of the handling is done mostly by block layer
functions such as bio_split(), bio_chain() and generic_make request()
and generally is less complex and easier to maintain. Below some more
details of the new approach.
When read bio arrives, it is cloned for pblk internal purposes. All
the L2P mapping, which includes copying data from round buffer to bio
and thus bio_advance() calls is done on the cloned bio, so the original
bio is untouched. If we found that we have partial read case, we
still have original bio untouched, so we can split it and continue to
process only first part of it in current context, when the rest will be
called as separate bio request which is passed to generic_make_request()
for further processing.
Signed-off-by: Igor Konopko <igor.j.konopko@intel.com>
Reviewed-by: Heiner Litz <hlitz@ucsc.edu>
Reviewed-by: Javier González <javier@javigon.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/lightnvm/pblk.h')
-rw-r--r-- | drivers/lightnvm/pblk.h | 18 |
1 files changed, 3 insertions, 15 deletions
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 17ced12db7dd..a67855387f53 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -121,18 +121,6 @@ struct pblk_g_ctx { u64 lba; }; -/* partial read context */ -struct pblk_pr_ctx { - struct bio *orig_bio; - DECLARE_BITMAP(bitmap, NVM_MAX_VLBA); - unsigned int orig_nr_secs; - unsigned int bio_init_idx; - void *ppa_ptr; - dma_addr_t dma_ppa_list; - u64 lba_list_mem[NVM_MAX_VLBA]; - u64 lba_list_media[NVM_MAX_VLBA]; -}; - /* Pad context */ struct pblk_pad_rq { struct pblk *pblk; @@ -759,7 +747,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, unsigned int pos, unsigned int nr_entries, unsigned int count); int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, - struct ppa_addr ppa, int bio_iter, bool advanced_bio); + struct ppa_addr ppa); unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); @@ -859,8 +847,8 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa, struct pblk_line *gc_line, u64 paddr); void pblk_lookup_l2p_rand(struct pblk *pblk, struct ppa_addr *ppas, u64 *lba_list, int nr_secs); -void pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, - sector_t blba, int nr_secs); +int pblk_lookup_l2p_seq(struct pblk *pblk, struct ppa_addr *ppas, + sector_t blba, int nr_secs, bool *from_cache); void *pblk_get_meta_for_writes(struct pblk *pblk, struct nvm_rq *rqd); void pblk_get_packed_meta(struct pblk *pblk, struct nvm_rq *rqd); |