diff options
Diffstat (limited to 'drivers')
93 files changed, 2847 insertions, 1327 deletions
diff --git a/drivers/Makefile b/drivers/Makefile index 24cd47014657..a6abd7a856c6 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -76,7 +76,7 @@ obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf/ obj-$(CONFIG_NUBUS) += nubus/ obj-y += macintosh/ obj-$(CONFIG_IDE) += ide/ -obj-$(CONFIG_SCSI) += scsi/ +obj-y += scsi/ obj-y += nvme/ obj-$(CONFIG_ATA) += ata/ obj-$(CONFIG_TARGET_CORE) += target/ diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index aad1b01447de..8e270962b2f3 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -597,8 +597,9 @@ static int ata_get_identity(struct ata_port *ap, struct scsi_device *sdev, int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) { int rc = 0; + u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; u8 scsi_cmd[MAX_COMMAND_SIZE]; - u8 args[4], *argbuf = NULL, *sensebuf = NULL; + u8 args[4], *argbuf = NULL; int argsize = 0; enum dma_data_direction data_dir; struct scsi_sense_hdr sshdr; @@ -610,10 +611,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) if (copy_from_user(args, arg, sizeof(args))) return -EFAULT; - sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); - if (!sensebuf) - return -ENOMEM; - + memset(sensebuf, 0, sizeof(sensebuf)); memset(scsi_cmd, 0, sizeof(scsi_cmd)); if (args[3]) { @@ -685,7 +683,6 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) && copy_to_user(arg + sizeof(args), argbuf, argsize)) rc = -EFAULT; error: - kfree(sensebuf); kfree(argbuf); return rc; } @@ -704,8 +701,9 @@ error: int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) { int rc = 0; + u8 sensebuf[SCSI_SENSE_BUFFERSIZE]; u8 scsi_cmd[MAX_COMMAND_SIZE]; - u8 args[7], *sensebuf = NULL; + u8 args[7]; struct scsi_sense_hdr sshdr; int cmd_result; @@ -715,10 +713,7 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) if (copy_from_user(args, arg, sizeof(args))) return -EFAULT; - sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); - if (!sensebuf) - return -ENOMEM; - + memset(sensebuf, 0, sizeof(sensebuf)); memset(scsi_cmd, 0, sizeof(scsi_cmd)); scsi_cmd[0] = ATA_16; scsi_cmd[1] = (3 << 1); /* Non-data */ @@ -769,7 +764,6 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) } error: - kfree(sensebuf); return rc; } diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index f6518067aa7d..f99e5c883368 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c @@ -21,6 +21,7 @@ #define DAC960_DriverDate "21 Aug 2007" +#include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> #include <linux/miscdevice.h> @@ -6426,7 +6427,7 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, return true; } -static int dac960_proc_show(struct seq_file *m, void *v) +static int __maybe_unused dac960_proc_show(struct seq_file *m, void *v) { unsigned char *StatusMessage = "OK\n"; int ControllerNumber; @@ -6446,14 +6447,16 @@ static int dac960_proc_show(struct seq_file *m, void *v) return 0; } -static int dac960_initial_status_proc_show(struct seq_file *m, void *v) +static int __maybe_unused dac960_initial_status_proc_show(struct seq_file *m, + void *v) { DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); return 0; } -static int dac960_current_status_proc_show(struct seq_file *m, void *v) +static int __maybe_unused dac960_current_status_proc_show(struct seq_file *m, + void *v) { DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; unsigned char *StatusMessage = diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index ad9b687a236a..d4913516823f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -74,12 +74,12 @@ config AMIGA_Z2RAM config CDROM tristate + select BLK_SCSI_REQUEST config GDROM tristate "SEGA Dreamcast GD-ROM drive" depends on SH_DREAMCAST select CDROM - select BLK_SCSI_REQUEST # only for the generic cdrom code help A standard SEGA Dreamcast comes with a modified CD ROM drive called a "GD-ROM" by SEGA to signify it is capable of reading special disks diff --git a/drivers/block/Makefile b/drivers/block/Makefile index dc061158b403..8566b188368b 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile @@ -36,8 +36,11 @@ obj-$(CONFIG_BLK_DEV_RBD) += rbd.o obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ -obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o obj-$(CONFIG_ZRAM) += zram/ +obj-$(CONFIG_BLK_DEV_NULL_BLK) += null_blk.o +null_blk-objs := null_blk_main.o +null_blk-$(CONFIG_BLK_DEV_ZONED) += null_blk_zoned.o + skd-y := skd_main.o swim_mod-y := swim.o swim_asm.o diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 096882e54095..136dc507d020 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -1137,6 +1137,7 @@ noskb: if (buf) break; } bvcpy(skb, f->buf->bio, f->iter, n); + /* fall through */ case ATA_CMD_PIO_WRITE: case ATA_CMD_PIO_WRITE_EXT: spin_lock_irq(&d->lock); diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 697f735b07a4..41060e9cedf2 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -284,8 +284,8 @@ freedev(struct aoedev *d) e = t + d->ntargets; for (; t < e && *t; t++) freetgt(d, *t); - if (d->bufpool) - mempool_destroy(d->bufpool); + + mempool_destroy(d->bufpool); skbpoolfree(d); minor_free(d->sysminor); diff --git a/drivers/block/brd.c b/drivers/block/brd.c index bb976598ee43..df8103dd40ac 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -254,20 +254,20 @@ static void copy_from_brd(void *dst, struct brd_device *brd, * Process a single bvec of a bio. */ static int brd_do_bvec(struct brd_device *brd, struct page *page, - unsigned int len, unsigned int off, bool is_write, + unsigned int len, unsigned int off, unsigned int op, sector_t sector) { void *mem; int err = 0; - if (is_write) { + if (op_is_write(op)) { err = copy_to_brd_setup(brd, sector, len); if (err) goto out; } mem = kmap_atomic(page); - if (!is_write) { + if (!op_is_write(op)) { copy_from_brd(mem + off, brd, sector, len); flush_dcache_page(page); } else { @@ -296,7 +296,7 @@ static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) int err; err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset, - op_is_write(bio_op(bio)), sector); + bio_op(bio), sector); if (err) goto io_error; sector += len >> SECTOR_SHIFT; @@ -310,15 +310,15 @@ io_error: } static int brd_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, bool is_write) + struct page *page, unsigned int op) { struct brd_device *brd = bdev->bd_disk->private_data; int err; if (PageTransHuge(page)) return -ENOTSUPP; - err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector); - page_endio(page, is_write, err); + err = brd_do_bvec(brd, page, PAGE_SIZE, 0, op, sector); + page_endio(page, op_is_write(op), err); return err; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index bc4ed2ed40a2..e35a234b0a8f 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -55,12 +55,10 @@ # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) -# define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) #else # define __protected_by(x) # define __protected_read_by(x) # define __protected_write_by(x) -# define __must_hold(x) #endif /* shared module parameters, defined in drbd_main.c */ diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index a80809bd3057..ef8212a4b73e 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2103,14 +2103,10 @@ static void drbd_destroy_mempools(void) mempool_exit(&drbd_md_io_page_pool); mempool_exit(&drbd_ee_mempool); mempool_exit(&drbd_request_mempool); - if (drbd_ee_cache) - kmem_cache_destroy(drbd_ee_cache); - if (drbd_request_cache) - kmem_cache_destroy(drbd_request_cache); - if (drbd_bm_ext_cache) - kmem_cache_destroy(drbd_bm_ext_cache); - if (drbd_al_ext_cache) - kmem_cache_destroy(drbd_al_ext_cache); + kmem_cache_destroy(drbd_ee_cache); + kmem_cache_destroy(drbd_request_cache); + kmem_cache_destroy(drbd_bm_ext_cache); + kmem_cache_destroy(drbd_al_ext_cache); drbd_ee_cache = NULL; drbd_request_cache = NULL; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index be9450f5ad1c..75f6b47169e6 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2674,8 +2674,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) if (c_min_rate == 0) return false; - curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + - (int)part_stat_read(&disk->part0, sectors[1]) - + curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - atomic_read(&device->rs_sect_ev); if (atomic_read(&device->ap_actlog_cnt) @@ -2790,6 +2789,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet then we would do something smarter here than reading the block... */ peer_req->flags |= EE_RS_THIN_REQ; + /* fall through */ case P_RS_DATA_REQUEST: peer_req->w.cb = w_e_end_rsdata_req; fault_type = DRBD_FAULT_RS_RD; @@ -2968,6 +2968,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold /* Else fall through to one of the other strategies... */ drbd_warn(device, "Discard younger/older primary did not find a decision\n" "Using discard-least-changes instead\n"); + /* fall through */ case ASB_DISCARD_ZERO_CHG: if (ch_peer == 0 && ch_self == 0) { rv = test_bit(RESOLVE_CONFLICTS, &peer_device->connection->flags) @@ -2979,6 +2980,7 @@ static int drbd_asb_recover_0p(struct drbd_peer_device *peer_device) __must_hold } if (after_sb_0p == ASB_DISCARD_ZERO_CHG) break; + /* else: fall through */ case ASB_DISCARD_LEAST_CHG: if (ch_self < ch_peer) rv = -1; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index d146fedc38bb..19cac36e9737 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -38,7 +38,7 @@ static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request { struct request_queue *q = device->rq_queue; - generic_start_io_acct(q, bio_data_dir(req->master_bio), + generic_start_io_acct(q, bio_op(req->master_bio), req->i.size >> 9, &device->vdisk->part0); } @@ -47,7 +47,7 @@ static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *r { struct request_queue *q = device->rq_queue; - generic_end_io_acct(q, bio_data_dir(req->master_bio), + generic_end_io_acct(q, bio_op(req->master_bio), &device->vdisk->part0, req->start_jif); } diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 5e793dd7adfb..b8f77e83d456 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -1690,9 +1690,7 @@ void drbd_rs_controller_reset(struct drbd_device *device) atomic_set(&device->rs_sect_in, 0); atomic_set(&device->rs_sect_ev, 0); device->rs_in_flight = 0; - device->rs_last_events = - (int)part_stat_read(&disk->part0, sectors[0]) + - (int)part_stat_read(&disk->part0, sectors[1]); + device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors); /* Updating the RCU protected object in place is necessary since this function gets called from atomic context. diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 8871b5044d9e..48f622728ce6 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -1461,7 +1461,6 @@ static void setup_rw_floppy(void) int i; int r; int flags; - int dflags; unsigned long ready_date; void (*function)(void); @@ -1485,8 +1484,6 @@ static void setup_rw_floppy(void) if (fd_wait_for_completion(ready_date, function)) return; } - dflags = DRS->flags; - if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE)) setup_DMA(); diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 4cb1d1be3cfb..ea9debf59b22 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -690,7 +690,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, unsigned int arg) { struct file *file, *old_file; - struct inode *inode; int error; error = -ENXIO; @@ -711,7 +710,6 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, if (error) goto out_putf; - inode = file->f_mapping->host; old_file = lo->lo_backing_file; error = -EINVAL; @@ -1611,6 +1609,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode, case LOOP_GET_STATUS64: case LOOP_SET_STATUS64: arg = (unsigned long) compat_ptr(arg); + /* fall through */ case LOOP_SET_FD: case LOOP_CHANGE_FD: case LOOP_SET_BLOCK_SIZE: diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index c73626decb46..db253cd5b32a 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -2575,8 +2575,7 @@ static int mtip_hw_debugfs_init(struct driver_data *dd) static void mtip_hw_debugfs_exit(struct driver_data *dd) { - if (dd->dfs_node) - debugfs_remove_recursive(dd->dfs_node); + debugfs_remove_recursive(dd->dfs_node); } /* diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h new file mode 100644 index 000000000000..d81781f22dba --- /dev/null +++ b/drivers/block/null_blk.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BLK_NULL_BLK_H +#define __BLK_NULL_BLK_H + +#include <linux/blkdev.h> +#include <linux/slab.h> +#include <linux/blk-mq.h> +#include <linux/hrtimer.h> +#include <linux/configfs.h> +#include <linux/badblocks.h> +#include <linux/fault-inject.h> + +struct nullb_cmd { + struct list_head list; + struct llist_node ll_list; + struct __call_single_data csd; + struct request *rq; + struct bio *bio; + unsigned int tag; + blk_status_t error; + struct nullb_queue *nq; + struct hrtimer timer; +}; + +struct nullb_queue { + unsigned long *tag_map; + wait_queue_head_t wait; + unsigned int queue_depth; + struct nullb_device *dev; + unsigned int requeue_selection; + + struct nullb_cmd *cmds; +}; + +struct nullb_device { + struct nullb *nullb; + struct config_item item; + struct radix_tree_root data; /* data stored in the disk */ + struct radix_tree_root cache; /* disk cache data */ + unsigned long flags; /* device flags */ + unsigned int curr_cache; + struct badblocks badblocks; + + unsigned int nr_zones; + struct blk_zone *zones; + sector_t zone_size_sects; + + unsigned long size; /* device size in MB */ + unsigned long completion_nsec; /* time in ns to complete a request */ + unsigned long cache_size; /* disk cache size in MB */ + unsigned long zone_size; /* zone size in MB if device is zoned */ + unsigned int submit_queues; /* number of submission queues */ + unsigned int home_node; /* home node for the device */ + unsigned int queue_mode; /* block interface */ + unsigned int blocksize; /* block size */ + unsigned int irqmode; /* IRQ completion handler */ + unsigned int hw_queue_depth; /* queue depth */ + unsigned int index; /* index of the disk, only valid with a disk */ + unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ + bool blocking; /* blocking blk-mq device */ + bool use_per_node_hctx; /* use per-node allocation for hardware context */ + bool power; /* power on/off the device */ + bool memory_backed; /* if data is stored in memory */ + bool discard; /* if support discard */ + bool zoned; /* if device is zoned */ +}; + +struct nullb { + struct nullb_device *dev; + struct list_head list; + unsigned int index; + struct request_queue *q; + struct gendisk *disk; + struct blk_mq_tag_set *tag_set; + struct blk_mq_tag_set __tag_set; + unsigned int queue_depth; + atomic_long_t cur_bytes; + struct hrtimer bw_timer; + unsigned long cache_flush_pos; + spinlock_t lock; + + struct nullb_queue *queues; + unsigned int nr_queues; + char disk_name[DISK_NAME_LEN]; +}; + +#ifdef CONFIG_BLK_DEV_ZONED +int null_zone_init(struct nullb_device *dev); +void null_zone_exit(struct nullb_device *dev); +blk_status_t null_zone_report(struct nullb *nullb, + struct nullb_cmd *cmd); +void null_zone_write(struct nullb_cmd *cmd); +void null_zone_reset(struct nullb_cmd *cmd); +#else +static inline int null_zone_init(struct nullb_device *dev) +{ + return -EINVAL; +} +static inline void null_zone_exit(struct nullb_device *dev) {} +static inline blk_status_t null_zone_report(struct nullb *nullb, + struct nullb_cmd *cmd) +{ + return BLK_STS_NOTSUPP; +} +static inline void null_zone_write(struct nullb_cmd *cmd) {} +static inline void null_zone_reset(struct nullb_cmd *cmd) {} +#endif /* CONFIG_BLK_DEV_ZONED */ +#endif /* __NULL_BLK_H */ diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk_main.c index 042c778e5a4e..6127e3ff7b4b 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk_main.c @@ -7,14 +7,8 @@ #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/fs.h> -#include <linux/blkdev.h> #include <linux/init.h> -#include <linux/slab.h> -#include <linux/blk-mq.h> -#include <linux/hrtimer.h> -#include <linux/configfs.h> -#include <linux/badblocks.h> -#include <linux/fault-inject.h> +#include "null_blk.h" #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) @@ -35,28 +29,6 @@ static inline u64 mb_per_tick(int mbps) return (1 << 20) / TICKS_PER_SEC * ((u64) mbps); } -struct nullb_cmd { - struct list_head list; - struct llist_node ll_list; - struct __call_single_data csd; - struct request *rq; - struct bio *bio; - unsigned int tag; - blk_status_t error; - struct nullb_queue *nq; - struct hrtimer timer; -}; - -struct nullb_queue { - unsigned long *tag_map; - wait_queue_head_t wait; - unsigned int queue_depth; - struct nullb_device *dev; - unsigned int requeue_selection; - - struct nullb_cmd *cmds; -}; - /* * Status flags for nullb_device. * @@ -92,52 +64,6 @@ struct nullb_page { #define NULLB_PAGE_LOCK (MAP_SZ - 1) #define NULLB_PAGE_FREE (MAP_SZ - 2) -struct nullb_device { - struct nullb *nullb; - struct config_item item; - struct radix_tree_root data; /* data stored in the disk */ - struct radix_tree_root cache; /* disk cache data */ - unsigned long flags; /* device flags */ - unsigned int curr_cache; - struct badblocks badblocks; - - unsigned long size; /* device size in MB */ - unsigned long completion_nsec; /* time in ns to complete a request */ - unsigned long cache_size; /* disk cache size in MB */ - unsigned int submit_queues; /* number of submission queues */ - unsigned int home_node; /* home node for the device */ - unsigned int queue_mode; /* block interface */ - unsigned int blocksize; /* block size */ - unsigned int irqmode; /* IRQ completion handler */ - unsigned int hw_queue_depth; /* queue depth */ - unsigned int index; /* index of the disk, only valid with a disk */ - unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ - bool blocking; /* blocking blk-mq device */ - bool use_per_node_hctx; /* use per-node allocation for hardware context */ - bool power; /* power on/off the device */ - bool memory_backed; /* if data is stored in memory */ - bool discard; /* if support discard */ -}; - -struct nullb { - struct nullb_device *dev; - struct list_head list; - unsigned int index; - struct request_queue *q; - struct gendisk *disk; - struct blk_mq_tag_set *tag_set; - struct blk_mq_tag_set __tag_set; - unsigned int queue_depth; - atomic_long_t cur_bytes; - struct hrtimer bw_timer; - unsigned long cache_flush_pos; - spinlock_t lock; - - struct nullb_queue *queues; - unsigned int nr_queues; - char disk_name[DISK_NAME_LEN]; -}; - static LIST_HEAD(nullb_list); static struct mutex lock; static int null_major; @@ -254,6 +180,14 @@ static bool g_use_per_node_hctx; module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, 0444); MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false"); +static bool g_zoned; +module_param_named(zoned, g_zoned, bool, S_IRUGO); +MODULE_PARM_DESC(zoned, "Make device as a host-managed zoned block device. Default: false"); + +static unsigned long g_zone_size = 256; +module_param_named(zone_size, g_zone_size, ulong, S_IRUGO); +MODULE_PARM_DESC(zone_size, "Zone size in MB when block device is zoned. Must be power-of-two: Default: 256"); + static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); @@ -357,6 +291,8 @@ NULLB_DEVICE_ATTR(memory_backed, bool); NULLB_DEVICE_ATTR(discard, bool); NULLB_DEVICE_ATTR(mbps, uint); NULLB_DEVICE_ATTR(cache_size, ulong); +NULLB_DEVICE_ATTR(zoned, bool); +NULLB_DEVICE_ATTR(zone_size, ulong); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { @@ -390,6 +326,7 @@ static ssize_t nullb_device_power_store(struct config_item *item, null_del_dev(dev->nullb); mutex_unlock(&lock); clear_bit(NULLB_DEV_FL_UP, &dev->flags); + clear_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags); } return count; @@ -468,6 +405,8 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_mbps, &nullb_device_attr_cache_size, &nullb_device_attr_badblocks, + &nullb_device_attr_zoned, + &nullb_device_attr_zone_size, NULL, }; @@ -520,7 +459,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item) static ssize_t memb_group_features_show(struct config_item *item, char *page) { - return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n"); + return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -579,6 +518,8 @@ static struct nullb_device *null_alloc_dev(void) dev->hw_queue_depth = g_hw_queue_depth; dev->blocking = g_blocking; dev->use_per_node_hctx = g_use_per_node_hctx; + dev->zoned = g_zoned; + dev->zone_size = g_zone_size; return dev; } @@ -587,6 +528,7 @@ static void null_free_dev(struct nullb_device *dev) if (!dev) return; + null_zone_exit(dev); badblocks_exit(&dev->badblocks); kfree(dev); } @@ -862,7 +804,9 @@ static struct nullb_page *null_lookup_page(struct nullb *nullb, } static struct nullb_page *null_insert_page(struct nullb *nullb, - sector_t sector, bool ignore_cache) + sector_t sector, bool ignore_cache) + __releases(&nullb->lock) + __acquires(&nullb->lock) { u64 idx; struct nullb_page *t_page; @@ -1219,6 +1163,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) struct nullb *nullb = dev->nullb; int err = 0; + if (req_op(cmd->rq) == REQ_OP_ZONE_REPORT) { + cmd->error = null_zone_report(nullb, cmd); + goto out; + } + if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) { struct request *rq = cmd->rq; @@ -1283,6 +1232,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd) } } cmd->error = errno_to_blk_status(err); + + if (!cmd->error && dev->zoned) { + if (req_op(cmd->rq) == REQ_OP_WRITE) + null_zone_write(cmd); + else if (req_op(cmd->rq) == REQ_OP_ZONE_RESET) + null_zone_reset(cmd); + } out: /* Complete IO by inline, softirq or timer */ switch (dev->irqmode) { @@ -1810,6 +1766,15 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_flush_queueable(nullb->q, true); } + if (dev->zoned) { + rv = null_zone_init(dev); + if (rv) + goto out_cleanup_blk_queue; + + blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); + nullb->q->limits.zoned = BLK_ZONED_HM; + } + nullb->q->queuedata = nullb; blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); @@ -1828,13 +1793,16 @@ static int null_add_dev(struct nullb_device *dev) rv = null_gendisk_register(nullb); if (rv) - goto out_cleanup_blk_queue; + goto out_cleanup_zone; mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); mutex_unlock(&lock); return 0; +out_cleanup_zone: + if (dev->zoned) + null_zone_exit(dev); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: @@ -1861,6 +1829,11 @@ static int __init null_init(void) g_bs = PAGE_SIZE; } + if (!is_power_of_2(g_zone_size)) { + pr_err("null_blk: zone_size must be power-of-two\n"); + return -EINVAL; + } + if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) { if (g_submit_queues != nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.\n", diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c new file mode 100644 index 000000000000..a979ca00d7be --- /dev/null +++ b/drivers/block/null_blk_zoned.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/vmalloc.h> +#include "null_blk.h" + +/* zone_size in MBs to sectors. */ +#define ZONE_SIZE_SHIFT 11 + +static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) +{ + return sect >> ilog2(dev->zone_size_sects); +} + +int null_zone_init(struct nullb_device *dev) +{ + sector_t dev_size = (sector_t)dev->size * 1024 * 1024; + sector_t sector = 0; + unsigned int i; + + if (!is_power_of_2(dev->zone_size)) { + pr_err("null_blk: zone_size must be power-of-two\n"); + return -EINVAL; + } + + dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT; + dev->nr_zones = dev_size >> + (SECTOR_SHIFT + ilog2(dev->zone_size_sects)); + dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct blk_zone), + GFP_KERNEL | __GFP_ZERO); + if (!dev->zones) + return -ENOMEM; + + for (i = 0; i < dev->nr_zones; i++) { + struct blk_zone *zone = &dev->zones[i]; + + zone->start = zone->wp = sector; + zone->len = dev->zone_size_sects; + zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ; + zone->cond = BLK_ZONE_COND_EMPTY; + + sector += dev->zone_size_sects; + } + + return 0; +} + +void null_zone_exit(struct nullb_device *dev) +{ + kvfree(dev->zones); +} + +static void null_zone_fill_rq(struct nullb_device *dev, struct request *rq, + unsigned int zno, unsigned int nr_zones) +{ + struct blk_zone_report_hdr *hdr = NULL; + struct bio_vec bvec; + struct bvec_iter iter; + void *addr; + unsigned int zones_to_cpy; + + bio_for_each_segment(bvec, rq->bio, iter) { + addr = kmap_atomic(bvec.bv_page); + + zones_to_cpy = bvec.bv_len / sizeof(struct blk_zone); + + if (!hdr) { + hdr = (struct blk_zone_report_hdr *)addr; + hdr->nr_zones = nr_zones; + zones_to_cpy--; + addr += sizeof(struct blk_zone_report_hdr); + } + + zones_to_cpy = min_t(unsigned int, zones_to_cpy, nr_zones); + + memcpy(addr, &dev->zones[zno], + zones_to_cpy * sizeof(struct blk_zone)); + + kunmap_atomic(addr); + + nr_zones -= zones_to_cpy; + zno += zones_to_cpy; + + if (!nr_zones) + break; + } +} + +blk_status_t null_zone_report(struct nullb *nullb, + struct nullb_cmd *cmd) +{ + struct nullb_device *dev = nullb->dev; + struct request *rq = cmd->rq; + unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); + unsigned int nr_zones = dev->nr_zones - zno; + unsigned int max_zones = (blk_rq_bytes(rq) / + sizeof(struct blk_zone)) - 1; + + nr_zones = min_t(unsigned int, nr_zones, max_zones); + + null_zone_fill_rq(nullb->dev, rq, zno, nr_zones); + + return BLK_STS_OK; +} + +void null_zone_write(struct nullb_cmd *cmd) +{ + struct nullb_device *dev = cmd->nq->dev; + struct request *rq = cmd->rq; + sector_t sector = blk_rq_pos(rq); + unsigned int rq_sectors = blk_rq_sectors(rq); + unsigned int zno = null_zone_no(dev, sector); + struct blk_zone *zone = &dev->zones[zno]; + + switch (zone->cond) { + case BLK_ZONE_COND_FULL: + /* Cannot write to a full zone */ + cmd->error = BLK_STS_IOERR; + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_IMP_OPEN: + /* Writes must be at the write pointer position */ + if (blk_rq_pos(rq) != zone->wp) { + cmd->error = BLK_STS_IOERR; + break; + } + + if (zone->cond == BLK_ZONE_COND_EMPTY) + zone->cond = BLK_ZONE_COND_IMP_OPEN; + + zone->wp += rq_sectors; + if (zone->wp == zone->start + zone->len) + zone->cond = BLK_ZONE_COND_FULL; + break; + default: + /* Invalid zone condition */ + cmd->error = BLK_STS_IOERR; + break; + } +} + +void null_zone_reset(struct nullb_cmd *cmd) +{ + struct nullb_device *dev = cmd->nq->dev; + struct request *rq = cmd->rq; + unsigned int zno = null_zone_no(dev, blk_rq_pos(rq)); + struct blk_zone *zone = &dev->zones[zno]; + + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; +} diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c index 4f27e7392e38..f5f63ca2889d 100644 --- a/drivers/block/paride/bpck.c +++ b/drivers/block/paride/bpck.c @@ -347,7 +347,7 @@ static int bpck_test_proto( PIA *pi, char * scratch, int verbose ) static void bpck_read_eeprom ( PIA *pi, char * buf ) -{ int i,j,k,n,p,v,f, om, od; +{ int i, j, k, p, v, f, om, od; bpck_force_spp(pi); @@ -356,7 +356,6 @@ static void bpck_read_eeprom ( PIA *pi, char * buf ) bpck_connect(pi); - n = 0; WR(4,0); for (i=0;i<64;i++) { WR(6,8); diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 8961b190e256..7cf947586fe4 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -426,6 +426,7 @@ static void run_fsm(void) pd_claimed = 1; if (!pi_schedule_claimed(pi_current, run_fsm)) return; + /* fall through */ case 1: pd_claimed = 2; pi_current->proto->connect(pi_current); @@ -445,6 +446,7 @@ static void run_fsm(void) spin_unlock_irqrestore(&pd_lock, saved_flags); if (stop) return; + /* fall through */ case Hold: schedule_fsm(); return; diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index b3f83cd96f33..e285413d4a75 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -67,7 +67,7 @@ #include <scsi/scsi.h> #include <linux/debugfs.h> #include <linux/device.h> - +#include <linux/nospec.h> #include <linux/uaccess.h> #define DRIVER_NAME "pktcdvd" @@ -748,13 +748,13 @@ static const char *sense_key_string(__u8 index) static void pkt_dump_sense(struct pktcdvd_device *pd, struct packet_command *cgc) { - struct request_sense *sense = cgc->sense; + struct scsi_sense_hdr *sshdr = cgc->sshdr; - if (sense) + if (sshdr) pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", CDROM_PACKET_SIZE, cgc->cmd, - sense->sense_key, sense->asc, sense->ascq, - sense_key_string(sense->sense_key)); + sshdr->sense_key, sshdr->asc, sshdr->ascq, + sense_key_string(sshdr->sense_key)); else pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); } @@ -787,18 +787,19 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; int ret; init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.sense = &sense; + cgc.sshdr = &sshdr; cgc.cmd[0] = GPCMD_SET_SPEED; cgc.cmd[2] = (read_speed >> 8) & 0xff; cgc.cmd[3] = read_speed & 0xff; cgc.cmd[4] = (write_speed >> 8) & 0xff; cgc.cmd[5] = write_speed & 0xff; - if ((ret = pkt_generic_packet(pd, &cgc))) + ret = pkt_generic_packet(pd, &cgc); + if (ret) pkt_dump_sense(pd, &cgc); return ret; @@ -1562,7 +1563,8 @@ static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) cgc.cmd[8] = cgc.buflen = 2; cgc.quiet = 1; - if ((ret = pkt_generic_packet(pd, &cgc))) + ret = pkt_generic_packet(pd, &cgc); + if (ret) return ret; /* not all drives have the same disc_info length, so requeue @@ -1591,7 +1593,8 @@ static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, cgc.cmd[8] = 8; cgc.quiet = 1; - if ((ret = pkt_generic_packet(pd, &cgc))) + ret = pkt_generic_packet(pd, &cgc); + if (ret) return ret; cgc.buflen = be16_to_cpu(ti->track_information_length) + @@ -1612,17 +1615,20 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, __u32 last_track; int ret = -1; - if ((ret = pkt_get_disc_info(pd, &di))) + ret = pkt_get_disc_info(pd, &di); + if (ret) return ret; last_track = (di.last_track_msb << 8) | di.last_track_lsb; - if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) + ret = pkt_get_track_info(pd, last_track, 1, &ti); + if (ret) return ret; /* if this track is blank, try the previous. */ if (ti.blank) { last_track--; - if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) + ret = pkt_get_track_info(pd, last_track, 1, &ti); + if (ret) return ret; } @@ -1645,7 +1651,7 @@ static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; write_param_page *wp; char buffer[128]; int ret, size; @@ -1656,8 +1662,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) memset(buffer, 0, sizeof(buffer)); init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ); - cgc.sense = &sense; - if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { + cgc.sshdr = &sshdr; + ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); + if (ret) { pkt_dump_sense(pd, &cgc); return ret; } @@ -1671,8 +1678,9 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) * now get it all */ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ); - cgc.sense = &sense; - if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { + cgc.sshdr = &sshdr; + ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); + if (ret) { pkt_dump_sense(pd, &cgc); return ret; } @@ -1714,7 +1722,8 @@ static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) wp->packet_size = cpu_to_be32(pd->settings.size >> 2); cgc.buflen = cgc.cmd[8] = size; - if ((ret = pkt_mode_select(pd, &cgc))) { + ret = pkt_mode_select(pd, &cgc); + if (ret) { pkt_dump_sense(pd, &cgc); return ret; } @@ -1819,7 +1828,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) memset(&di, 0, sizeof(disc_information)); memset(&ti, 0, sizeof(track_information)); - if ((ret = pkt_get_disc_info(pd, &di))) { + ret = pkt_get_disc_info(pd, &di); + if (ret) { pkt_err(pd, "failed get_disc\n"); return ret; } @@ -1830,7 +1840,8 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ - if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { + ret = pkt_get_track_info(pd, track, 1, &ti); + if (ret) { pkt_err(pd, "failed get_track\n"); return ret; } @@ -1905,12 +1916,12 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, int set) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; unsigned char buf[64]; int ret; init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ); - cgc.sense = &sense; + cgc.sshdr = &sshdr; cgc.buflen = pd->mode_offset + 12; /* @@ -1918,7 +1929,8 @@ static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, */ cgc.quiet = 1; - if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0))) + ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); + if (ret) return ret; buf[pd->mode_offset + 10] |= (!!set << 2); @@ -1950,14 +1962,14 @@ static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; unsigned char buf[256+18]; unsigned char *cap_buf; int ret, offset; cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN); - cgc.sense = &sense; + cgc.sshdr = &sshdr; ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); if (ret) { @@ -2011,13 +2023,13 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; unsigned char buf[64]; unsigned int size, st, sp; int ret; init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ); - cgc.sense = &sense; + cgc.sshdr = &sshdr; cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cgc.cmd[1] = 2; cgc.cmd[2] = 4; /* READ ATIP */ @@ -2032,7 +2044,7 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, size = sizeof(buf); init_cdrom_command(&cgc, buf, size, CGC_DATA_READ); - cgc.sense = &sense; + cgc.sshdr = &sshdr; cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP; cgc.cmd[1] = 2; cgc.cmd[2] = 4; @@ -2083,17 +2095,18 @@ static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) { struct packet_command cgc; - struct request_sense sense; + struct scsi_sense_hdr sshdr; int ret; pkt_dbg(2, pd, "Performing OPC\n"); init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); - cgc.sense = &sense; + cgc.sshdr = &sshdr; cgc.timeout = 60*HZ; cgc.cmd[0] = GPCMD_SEND_OPC; cgc.cmd[1] = 1; - if ((ret = pkt_generic_packet(pd, &cgc))) + ret = pkt_generic_packet(pd, &cgc); + if (ret) pkt_dump_sense(pd, &cgc); return ret; } @@ -2103,19 +2116,22 @@ static int pkt_open_write(struct pktcdvd_device *pd) int ret; unsigned int write_speed, media_write_speed, read_speed; - if ((ret = pkt_probe_settings(pd))) { + ret = pkt_probe_settings(pd); + if (ret) { pkt_dbg(2, pd, "failed probe\n"); return ret; } - if ((ret = pkt_set_write_settings(pd))) { + ret = pkt_set_write_settings(pd); + if (ret) { pkt_dbg(1, pd, "failed saving write settings\n"); return -EIO; } pkt_write_caching(pd, USE_WCACHING); - if ((ret = pkt_get_max_speed(pd, &write_speed))) + ret = pkt_get_max_speed(pd, &write_speed); + if (ret) write_speed = 16 * 177; switch (pd->mmc3_profile) { case 0x13: /* DVD-RW */ @@ -2124,7 +2140,8 @@ static int pkt_open_write(struct pktcdvd_device *pd) pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); break; default: - if ((ret = pkt_media_speed(pd, &media_write_speed))) + ret = pkt_media_speed(pd, &media_write_speed); + if (ret) media_write_speed = 16; write_speed = min(write_speed, media_write_speed * 177); pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); @@ -2132,14 +2149,16 @@ static int pkt_open_write(struct pktcdvd_device *pd) } read_speed = write_speed; - if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { + ret = pkt_set_speed(pd, write_speed, read_speed); + if (ret) { pkt_dbg(1, pd, "couldn't set write speed\n"); return -EIO; } pd->write_speed = write_speed; pd->read_speed = read_speed; - if ((ret = pkt_perform_opc(pd))) { + ret = pkt_perform_opc(pd); + if (ret) { pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); } @@ -2161,10 +2180,12 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) * so bdget() can't fail. */ bdget(pd->bdev->bd_dev); - if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) + ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd); + if (ret) goto out; - if ((ret = pkt_get_last_written(pd, &lba))) { + ret = pkt_get_last_written(pd, &lba); + if (ret) { pkt_err(pd, "pkt_get_last_written failed\n"); goto out_putdev; } @@ -2175,7 +2196,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) q = bdev_get_queue(pd->bdev); if (write) { - if ((ret = pkt_open_write(pd))) + ret = pkt_open_write(pd); + if (ret) goto out_putdev; /* * Some CDRW drives can not handle writes larger than one packet, @@ -2190,7 +2212,8 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) clear_bit(PACKET_WRITABLE, &pd->flags); } - if ((ret = pkt_set_segment_merging(pd, q))) + ret = pkt_set_segment_merging(pd, q); + if (ret) goto out_putdev; if (write) { @@ -2231,6 +2254,8 @@ static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor) { if (dev_minor >= MAX_WRITERS) return NULL; + + dev_minor = array_index_nospec(dev_minor, MAX_WRITERS); return pkt_devs[dev_minor]; } diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index dddb3f2490b6..1a92f9e65937 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = { static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio) { - generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio), + generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio), &card->gendisk->part0); } @@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card, struct bio *bio, unsigned long start_time) { - generic_end_io_acct(card->queue, bio_data_dir(bio), - &card->gendisk->part0, start_time); + generic_end_io_acct(card->queue, bio_op(bio), + &card->gendisk->part0, start_time); } static void bio_dma_done_cb(struct rsxx_cardinfo *card, diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index bc7aea6d7b7c..87b9e7fbf062 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -657,8 +657,8 @@ static bool skd_preop_sg_list(struct skd_device *skdev, if (unlikely(skdev->dbg_level > 1)) { dev_dbg(&skdev->pdev->dev, - "skreq=%x sksg_list=%p sksg_dma=%llx\n", - skreq->id, skreq->sksg_list, skreq->sksg_dma_address); + "skreq=%x sksg_list=%p sksg_dma=%pad\n", + skreq->id, skreq->sksg_list, &skreq->sksg_dma_address); for (i = 0; i < n_sg; i++) { struct fit_sg_descriptor *sgd = &skreq->sksg_list[i]; @@ -1190,8 +1190,8 @@ static void skd_send_fitmsg(struct skd_device *skdev, { u64 qcmd; - dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n", - skmsg->mb_dma_address, skd_in_flight(skdev)); + dev_dbg(&skdev->pdev->dev, "dma address %pad, busy=%d\n", + &skmsg->mb_dma_address, skd_in_flight(skdev)); dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf); qcmd = skmsg->mb_dma_address; @@ -1250,9 +1250,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev, } dev_dbg(&skdev->pdev->dev, - "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n", + "skspcl=%p id=%04x sksg_list=%p sksg_dma=%pad\n", skspcl, skspcl->req.id, skspcl->req.sksg_list, - skspcl->req.sksg_dma_address); + &skspcl->req.sksg_dma_address); for (i = 0; i < skspcl->req.n_sg; i++) { struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[i]; @@ -2685,8 +2685,8 @@ static int skd_cons_skmsg(struct skd_device *skdev) WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) & (FIT_QCMD_ALIGN - 1), - "not aligned: msg_buf %p mb_dma_address %#llx\n", - skmsg->msg_buf, skmsg->mb_dma_address); + "not aligned: msg_buf %p mb_dma_address %pad\n", + skmsg->msg_buf, &skmsg->mb_dma_address); memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES); } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index b5cedccb5d7d..8986adab9bf5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -251,14 +251,9 @@ static DEFINE_SPINLOCK(minor_lock); #define GRANTS_PER_INDIRECT_FRAME \ (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment)) -#define PSEGS_PER_INDIRECT_FRAME \ - (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS) - #define INDIRECT_GREFS(_grants) \ DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME) -#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG) - static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); static void blkfront_gather_backend_features(struct blkfront_info *info); static int negotiate_mq(struct blkfront_info *info); @@ -1441,7 +1436,7 @@ static bool blkif_completion(unsigned long *id, /* Wait the second response if not yet here. */ if (s2->status == REQ_WAITING) - return 0; + return false; bret->status = blkif_get_final_status(s->status, s2->status); @@ -1542,7 +1537,7 @@ static bool blkif_completion(unsigned long *id, } } - return 1; + return true; } static irqreturn_t blkif_interrupt(int irq, void *dev_id) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index a390c6d4f72d..c7acf74253a1 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1287,17 +1287,16 @@ static void zram_bio_discard(struct zram *zram, u32 index, * Returns 1 if IO request was successfully submitted. */ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, - int offset, bool is_write, struct bio *bio) + int offset, unsigned int op, struct bio *bio) { unsigned long start_time = jiffies; - int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ; struct request_queue *q = zram->disk->queue; int ret; - generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT, + generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT, &zram->disk->part0); - if (!is_write) { + if (!op_is_write(op)) { atomic64_inc(&zram->stats.num_reads); ret = zram_bvec_read(zram, bvec, index, offset, bio); flush_dcache_page(bvec->bv_page); @@ -1306,14 +1305,14 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index, ret = zram_bvec_write(zram, bvec, index, offset, bio); } - generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time); + generic_end_io_acct(q, op, &zram->disk->part0, start_time); zram_slot_lock(zram, index); zram_accessed(zram, index); zram_slot_unlock(zram, index); if (unlikely(ret < 0)) { - if (!is_write) + if (!op_is_write(op)) atomic64_inc(&zram->stats.failed_reads); else atomic64_inc(&zram->stats.failed_writes); @@ -1351,7 +1350,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio) bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset, unwritten); if (zram_bvec_rw(zram, &bv, index, offset, - op_is_write(bio_op(bio)), bio) < 0) + bio_op(bio), bio) < 0) goto out; bv.bv_offset += bv.bv_len; @@ -1403,7 +1402,7 @@ static void zram_slot_free_notify(struct block_device *bdev, } static int zram_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, bool is_write) + struct page *page, unsigned int op) { int offset, ret; u32 index; @@ -1427,7 +1426,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector, bv.bv_len = PAGE_SIZE; bv.bv_offset = 0; - ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL); + ret = zram_bvec_rw(zram, &bv, index, offset, op, NULL); out: /* * If I/O fails, just return error(ie, non-zero) without @@ -1442,7 +1441,7 @@ out: switch (ret) { case 0: - page_endio(page, is_write, 0); + page_endio(page, op_is_write(op), 0); break; case 1: ret = 0; diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index a78b8e7085e9..113fc6edb2b0 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -282,6 +282,7 @@ #include <linux/blkdev.h> #include <linux/times.h> #include <linux/uaccess.h> +#include <scsi/scsi_common.h> #include <scsi/scsi_request.h> /* used to tell the module to turn on full debugging messages */ @@ -345,10 +346,10 @@ static LIST_HEAD(cdrom_list); int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi, struct packet_command *cgc) { - if (cgc->sense) { - cgc->sense->sense_key = 0x05; - cgc->sense->asc = 0x20; - cgc->sense->ascq = 0x00; + if (cgc->sshdr) { + cgc->sshdr->sense_key = 0x05; + cgc->sshdr->asc = 0x20; + cgc->sshdr->ascq = 0x00; } cgc->stat = -EIO; @@ -2222,9 +2223,12 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf, blk_execute_rq(q, cdi->disk, rq, 0); if (scsi_req(rq)->result) { - struct request_sense *s = req->sense; + struct scsi_sense_hdr sshdr; + ret = -EIO; - cdi->last_sense = s->sense_key; + scsi_normalize_sense(req->sense, req->sense_len, + &sshdr); + cdi->last_sense = sshdr.sense_key; } if (blk_rq_unmap_user(bio)) @@ -2943,7 +2947,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, struct packet_command *cgc, int cmd) { - struct request_sense sense; + struct scsi_sense_hdr sshdr; struct cdrom_msf msf; int blocksize = 0, format = 0, lba; int ret; @@ -2971,13 +2975,13 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, if (cgc->buffer == NULL) return -ENOMEM; - memset(&sense, 0, sizeof(sense)); - cgc->sense = &sense; + memset(&sshdr, 0, sizeof(sshdr)); + cgc->sshdr = &sshdr; cgc->data_direction = CGC_DATA_READ; ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize); - if (ret && sense.sense_key == 0x05 && - sense.asc == 0x20 && - sense.ascq == 0x00) { + if (ret && sshdr.sense_key == 0x05 && + sshdr.asc == 0x20 && + sshdr.ascq == 0x00) { /* * SCSI-II devices are not required to support * READ_CD, so let's try switching block size @@ -2986,7 +2990,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, ret = cdrom_switch_blocksize(cdi, blocksize); if (ret) goto out; - cgc->sense = NULL; + cgc->sshdr = NULL; ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1); ret |= cdrom_switch_blocksize(cdi, blocksize); } diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 5f178384876f..44a7a255ef74 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -419,10 +419,11 @@ static void ide_cd_request_sense_fixup(ide_drive_t *drive, struct ide_cmd *cmd) int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, int write, void *buffer, unsigned *bufflen, - struct request_sense *sense, int timeout, + struct scsi_sense_hdr *sshdr, int timeout, req_flags_t rq_flags) { struct cdrom_info *info = drive->driver_data; + struct scsi_sense_hdr local_sshdr; int retries = 10; bool failed; @@ -430,6 +431,9 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, "rq_flags: 0x%x", cmd[0], write, timeout, rq_flags); + if (!sshdr) + sshdr = &local_sshdr; + /* start of retry loop */ do { struct request *rq; @@ -456,8 +460,8 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, if (buffer) *bufflen = scsi_req(rq)->resid_len; - if (sense) - memcpy(sense, scsi_req(rq)->sense, sizeof(*sense)); + scsi_normalize_sense(scsi_req(rq)->sense, + scsi_req(rq)->sense_len, sshdr); /* * FIXME: we should probably abort/retry or something in case of @@ -469,12 +473,10 @@ int ide_cd_queue_pc(ide_drive_t *drive, const unsigned char *cmd, * The request failed. Retry if it was due to a unit * attention status (usually means media was changed). */ - struct request_sense *reqbuf = scsi_req(rq)->sense; - - if (reqbuf->sense_key == UNIT_ATTENTION) + if (sshdr->sense_key == UNIT_ATTENTION) cdrom_saw_media_change(drive); - else if (reqbuf->sense_key == NOT_READY && - reqbuf->asc == 4 && reqbuf->ascq != 4) { + else if (sshdr->sense_key == NOT_READY && + sshdr->asc == 4 && sshdr->ascq != 4) { /* * The drive is in the process of loading * a disk. Retry, but wait a little to give @@ -864,7 +866,7 @@ static void msf_from_bcd(struct atapi_msf *msf) msf->frame = bcd2bin(msf->frame); } -int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) +int cdrom_check_status(ide_drive_t *drive, struct scsi_sense_hdr *sshdr) { struct cdrom_info *info = drive->driver_data; struct cdrom_device_info *cdi; @@ -886,12 +888,11 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense) */ cmd[7] = cdi->sanyo_slot % 3; - return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, RQF_QUIET); + return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sshdr, 0, RQF_QUIET); } static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, - unsigned long *sectors_per_frame, - struct request_sense *sense) + unsigned long *sectors_per_frame) { struct { __be32 lba; @@ -908,7 +909,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, memset(cmd, 0, BLK_MAX_CDB); cmd[0] = GPCMD_READ_CDVD_CAPACITY; - stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, sense, 0, + stat = ide_cd_queue_pc(drive, cmd, 0, &capbuf, &len, NULL, 0, RQF_QUIET); if (stat) return stat; @@ -944,8 +945,7 @@ static int cdrom_read_capacity(ide_drive_t *drive, unsigned long *capacity, } static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, - int format, char *buf, int buflen, - struct request_sense *sense) + int format, char *buf, int buflen) { unsigned char cmd[BLK_MAX_CDB]; @@ -962,11 +962,11 @@ static int cdrom_read_tocentry(ide_drive_t *drive, int trackno, int msf_flag, if (msf_flag) cmd[1] = 2; - return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, sense, 0, RQF_QUIET); + return ide_cd_queue_pc(drive, cmd, 0, buf, &buflen, NULL, 0, RQF_QUIET); } /* Try to read the entire TOC for the disk into our internal buffer. */ -int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) +int ide_cd_read_toc(ide_drive_t *drive) { int stat, ntracks, i; struct cdrom_info *info = drive->driver_data; @@ -996,14 +996,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) * Check to see if the existing data is still valid. If it is, * just return. */ - (void) cdrom_check_status(drive, sense); + (void) cdrom_check_status(drive, NULL); if (drive->atapi_flags & IDE_AFLAG_TOC_VALID) return 0; /* try to get the total cdrom capacity and sector size */ - stat = cdrom_read_capacity(drive, &toc->capacity, §ors_per_frame, - sense); + stat = cdrom_read_capacity(drive, &toc->capacity, §ors_per_frame); if (stat) toc->capacity = 0x1fffff; @@ -1016,7 +1015,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) /* first read just the header, so we know how long the TOC is */ stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, - sizeof(struct atapi_toc_header), sense); + sizeof(struct atapi_toc_header)); if (stat) return stat; @@ -1036,7 +1035,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * - sizeof(struct atapi_toc_entry), sense); + sizeof(struct atapi_toc_entry)); if (stat && toc->hdr.first_track > 1) { /* @@ -1056,8 +1055,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) (char *)&toc->hdr, sizeof(struct atapi_toc_header) + (ntracks + 1) * - sizeof(struct atapi_toc_entry), - sense); + sizeof(struct atapi_toc_entry)); if (stat) return stat; @@ -1094,7 +1092,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) if (toc->hdr.first_track != CDROM_LEADOUT) { /* read the multisession information */ stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp, - sizeof(ms_tmp), sense); + sizeof(ms_tmp)); if (stat) return stat; @@ -1108,7 +1106,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense) if (drive->atapi_flags & IDE_AFLAG_TOCADDR_AS_BCD) { /* re-read multisession information using MSF format */ stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp, - sizeof(ms_tmp), sense); + sizeof(ms_tmp)); if (stat) return stat; @@ -1412,7 +1410,7 @@ static sector_t ide_cdrom_capacity(ide_drive_t *drive) { unsigned long capacity, sectors_per_frame; - if (cdrom_read_capacity(drive, &capacity, §ors_per_frame, NULL)) + if (cdrom_read_capacity(drive, &capacity, §ors_per_frame)) return 0; return capacity * sectors_per_frame; @@ -1710,9 +1708,8 @@ static unsigned int idecd_check_events(struct gendisk *disk, static int idecd_revalidate_disk(struct gendisk *disk) { struct cdrom_info *info = ide_drv_g(disk, cdrom_info); - struct request_sense sense; - ide_cd_read_toc(info->drive, &sense); + ide_cd_read_toc(info->drive); return 0; } @@ -1736,7 +1733,6 @@ static int ide_cd_probe(ide_drive_t *drive) { struct cdrom_info *info; struct gendisk *g; - struct request_sense sense; ide_debug_log(IDE_DBG_PROBE, "driver_req: %s, media: 0x%x", drive->driver_req, drive->media); @@ -1785,7 +1781,7 @@ static int ide_cd_probe(ide_drive_t *drive) goto failed; } - ide_cd_read_toc(drive, &sense); + ide_cd_read_toc(drive); g->fops = &idecd_ops; g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE; device_add_disk(&drive->gendev, g); diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h index 04f0f310a856..a69dc7f61c4d 100644 --- a/drivers/ide/ide-cd.h +++ b/drivers/ide/ide-cd.h @@ -98,11 +98,11 @@ void ide_cd_log_error(const char *, struct request *, struct request_sense *); /* ide-cd.c functions used by ide-cd_ioctl.c */ int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *, - unsigned *, struct request_sense *, int, req_flags_t); -int ide_cd_read_toc(ide_drive_t *, struct request_sense *); + unsigned *, struct scsi_sense_hdr *, int, req_flags_t); +int ide_cd_read_toc(ide_drive_t *); int ide_cdrom_get_capabilities(ide_drive_t *, u8 *); void ide_cdrom_update_speed(ide_drive_t *, u8 *); -int cdrom_check_status(ide_drive_t *, struct request_sense *); +int cdrom_check_status(ide_drive_t *, struct scsi_sense_hdr *); /* ide-cd_ioctl.c */ int ide_cdrom_open_real(struct cdrom_device_info *, int); diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index b1322400887b..4a6e1a413ead 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c @@ -43,14 +43,14 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) { ide_drive_t *drive = cdi->handle; struct media_event_desc med; - struct request_sense sense; + struct scsi_sense_hdr sshdr; int stat; if (slot_nr != CDSL_CURRENT) return -EINVAL; - stat = cdrom_check_status(drive, &sense); - if (!stat || sense.sense_key == UNIT_ATTENTION) + stat = cdrom_check_status(drive, &sshdr); + if (!stat || sshdr.sense_key == UNIT_ATTENTION) return CDS_DISC_OK; if (!cdrom_get_media_event(cdi, &med)) { @@ -62,8 +62,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) return CDS_NO_DISC; } - if (sense.sense_key == NOT_READY && sense.asc == 0x04 - && sense.ascq == 0x04) + if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04 + && sshdr.ascq == 0x04) return CDS_DISC_OK; /* @@ -71,8 +71,8 @@ int ide_cdrom_drive_status(struct cdrom_device_info *cdi, int slot_nr) * just return TRAY_OPEN since ATAPI doesn't provide * any other way to detect this... */ - if (sense.sense_key == NOT_READY) { - if (sense.asc == 0x3a && sense.ascq == 1) + if (sshdr.sense_key == NOT_READY) { + if (sshdr.asc == 0x3a && sshdr.ascq == 1) return CDS_NO_DISC; else return CDS_TRAY_OPEN; @@ -105,8 +105,7 @@ unsigned int ide_cdrom_check_events_real(struct cdrom_device_info *cdi, /* Eject the disk if EJECTFLAG is 0. If EJECTFLAG is 1, try to reload the disk. */ static -int cdrom_eject(ide_drive_t *drive, int ejectflag, - struct request_sense *sense) +int cdrom_eject(ide_drive_t *drive, int ejectflag) { struct cdrom_info *cd = drive->driver_data; struct cdrom_device_info *cdi = &cd->devinfo; @@ -129,20 +128,16 @@ int cdrom_eject(ide_drive_t *drive, int ejectflag, cmd[0] = GPCMD_START_STOP_UNIT; cmd[4] = loej | (ejectflag != 0); - return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, sense, 0, 0); + return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0); } /* Lock the door if LOCKFLAG is nonzero; unlock it otherwise. */ static -int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, - struct request_sense *sense) +int ide_cd_lockdoor(ide_drive_t *drive, int lockflag) { - struct request_sense my_sense; + struct scsi_sense_hdr sshdr; int stat; - if (sense == NULL) - sense = &my_sense; - /* If the drive cannot lock the door, just pretend. */ if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0) { stat = 0; @@ -155,14 +150,14 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, cmd[4] = lockflag ? 1 : 0; stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, - sense, 0, 0); + &sshdr, 0, 0); } /* If we got an illegal field error, the drive probably cannot lock the door. */ if (stat != 0 && - sense->sense_key == ILLEGAL_REQUEST && - (sense->asc == 0x24 || sense->asc == 0x20)) { + sshdr.sense_key == ILLEGAL_REQUEST && + (sshdr.asc == 0x24 || sshdr.asc == 0x20)) { printk(KERN_ERR "%s: door locking not supported\n", drive->name); drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; @@ -170,7 +165,7 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, } /* no medium, that's alright. */ - if (stat != 0 && sense->sense_key == NOT_READY && sense->asc == 0x3a) + if (stat != 0 && sshdr.sense_key == NOT_READY && sshdr.asc == 0x3a) stat = 0; if (stat == 0) { @@ -186,23 +181,22 @@ int ide_cd_lockdoor(ide_drive_t *drive, int lockflag, int ide_cdrom_tray_move(struct cdrom_device_info *cdi, int position) { ide_drive_t *drive = cdi->handle; - struct request_sense sense; if (position) { - int stat = ide_cd_lockdoor(drive, 0, &sense); + int stat = ide_cd_lockdoor(drive, 0); if (stat) return stat; } - return cdrom_eject(drive, !position, &sense); + return cdrom_eject(drive, !position); } int ide_cdrom_lock_door(struct cdrom_device_info *cdi, int lock) { ide_drive_t *drive = cdi->handle; - return ide_cd_lockdoor(drive, lock, NULL); + return ide_cd_lockdoor(drive, lock); } /* @@ -213,7 +207,6 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed) { ide_drive_t *drive = cdi->handle; struct cdrom_info *cd = drive->driver_data; - struct request_sense sense; u8 buf[ATAPI_CAPABILITIES_PAGE_SIZE]; int stat; unsigned char cmd[BLK_MAX_CDB]; @@ -236,7 +229,7 @@ int ide_cdrom_select_speed(struct cdrom_device_info *cdi, int speed) cmd[5] = speed & 0xff; } - stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0); + stat = ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0); if (!ide_cdrom_get_capabilities(drive, buf)) { ide_cdrom_update_speed(drive, buf); @@ -252,11 +245,10 @@ int ide_cdrom_get_last_session(struct cdrom_device_info *cdi, struct atapi_toc *toc; ide_drive_t *drive = cdi->handle; struct cdrom_info *info = drive->driver_data; - struct request_sense sense; int ret; if ((drive->atapi_flags & IDE_AFLAG_TOC_VALID) == 0 || !info->toc) { - ret = ide_cd_read_toc(drive, &sense); + ret = ide_cd_read_toc(drive); if (ret) return ret; } @@ -300,7 +292,6 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) { ide_drive_t *drive = cdi->handle; struct cdrom_info *cd = drive->driver_data; - struct request_sense sense; struct request *rq; int ret; @@ -315,7 +306,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) * lock it again. */ if (drive->atapi_flags & IDE_AFLAG_DOOR_LOCKED) - (void)ide_cd_lockdoor(drive, 1, &sense); + (void)ide_cd_lockdoor(drive, 1); return ret; } @@ -355,7 +346,6 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg) struct atapi_toc_entry *first_toc, *last_toc; unsigned long lba_start, lba_end; int stat; - struct request_sense sense; unsigned char cmd[BLK_MAX_CDB]; stat = ide_cd_get_toc_entry(drive, ti->cdti_trk0, &first_toc); @@ -380,7 +370,7 @@ static int ide_cd_fake_play_trkind(ide_drive_t *drive, void *arg) lba_to_msf(lba_start, &cmd[3], &cmd[4], &cmd[5]); lba_to_msf(lba_end - 1, &cmd[6], &cmd[7], &cmd[8]); - return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, &sense, 0, 0); + return ide_cd_queue_pc(drive, cmd, 0, NULL, NULL, NULL, 0, 0); } static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) @@ -391,7 +381,7 @@ static int ide_cd_read_tochdr(ide_drive_t *drive, void *arg) int stat; /* Make sure our saved TOC is valid. */ - stat = ide_cd_read_toc(drive, NULL); + stat = ide_cd_read_toc(drive); if (stat) return stat; @@ -461,8 +451,8 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, layer. the packet must be complete, as we do not touch it at all. */ - if (cgc->sense) - memset(cgc->sense, 0, sizeof(struct request_sense)); + if (cgc->sshdr) + memset(cgc->sshdr, 0, sizeof(*cgc->sshdr)); if (cgc->quiet) flags |= RQF_QUIET; @@ -470,7 +460,7 @@ int ide_cdrom_packet(struct cdrom_device_info *cdi, cgc->stat = ide_cd_queue_pc(drive, cgc->cmd, cgc->data_direction == CGC_DATA_WRITE, cgc->buffer, &len, - cgc->sense, cgc->timeout, flags); + cgc->sshdr, cgc->timeout, flags); if (!cgc->stat) cgc->buflen -= len; return cgc->stat; diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index ca844a926e6a..130bf163f066 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -311,7 +311,7 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs, { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.pi_interval = scsi_prot_interval(sc); - domain->sig.dif.ref_tag = scsi_prot_ref_tag(sc); + domain->sig.dif.ref_tag = t10_pi_ref_tag(sc->request); /* * At the moment we hard code those, but in the future * we will take them from sc. diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 9c03f35d9df1..439bf90d084d 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -17,23 +17,25 @@ menuconfig NVM if NVM -config NVM_DEBUG - bool "Open-Channel SSD debugging support" - default n - ---help--- - Exposes a debug management interface to create/remove targets at: +config NVM_PBLK + tristate "Physical Block Device Open-Channel SSD target" + help + Allows an open-channel SSD to be exposed as a block device to the + host. The target assumes the device exposes raw flash and must be + explicitly managed by the host. - /sys/module/lnvm/parameters/configure_debug + Please note the disk format is considered EXPERIMENTAL for now. - It is required to create/remove targets without IOCTLs. +if NVM_PBLK -config NVM_PBLK - tristate "Physical Block Device Open-Channel SSD target" - ---help--- - Allows an open-channel SSD to be exposed as a block device to the - host. The target assumes the device exposes raw flash and must be - explicitly managed by the host. +config NVM_PBLK_DEBUG + bool "PBlk Debug Support" + default n + help + Enables debug support for pblk. This includes extra checks, more + vocal error messages, and extra tracking fields in the pblk sysfs + entries. - Please note the disk format is considered EXPERIMENTAL for now. +endif # NVM_PBLK_DEBUG endif # NVM diff --git a/drivers/lightnvm/pblk-cache.c b/drivers/lightnvm/pblk-cache.c index b1c6d7eb6115..f565a56b898a 100644 --- a/drivers/lightnvm/pblk-cache.c +++ b/drivers/lightnvm/pblk-cache.c @@ -27,7 +27,8 @@ int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags) int nr_entries = pblk_get_secs(bio); int i, ret; - generic_start_io_acct(q, WRITE, bio_sectors(bio), &pblk->disk->part0); + generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio), + &pblk->disk->part0); /* Update the write buffer head (mem) with the entries that we can * write. The write in itself cannot fail, so there is no need to @@ -67,7 +68,7 @@ retry: atomic64_add(nr_entries, &pblk->user_wa); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(nr_entries, &pblk->inflight_writes); atomic_long_add(nr_entries, &pblk->req_writes); #endif @@ -75,7 +76,7 @@ retry: pblk_rl_inserted(&pblk->rl, nr_entries); out: - generic_end_io_acct(q, WRITE, &pblk->disk->part0, start_time); + generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time); pblk_write_should_kick(pblk); return ret; } @@ -123,7 +124,7 @@ retry: atomic64_add(valid_entries, &pblk->gc_wa); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(valid_entries, &pblk->inflight_writes); atomic_long_add(valid_entries, &pblk->recov_gc_writes); #endif diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index ed9cc977c8b3..00984b486fea 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -35,7 +35,7 @@ static void pblk_line_mark_bb(struct work_struct *work) line = &pblk->lines[pblk_ppa_to_line(*ppa)]; pos = pblk_ppa_to_pos(&dev->geo, *ppa); - pr_err("pblk: failed to mark bb, line:%d, pos:%d\n", + pblk_err(pblk, "failed to mark bb, line:%d, pos:%d\n", line->id, pos); } @@ -51,12 +51,12 @@ static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line, struct ppa_addr *ppa; int pos = pblk_ppa_to_pos(geo, ppa_addr); - pr_debug("pblk: erase failed: line:%d, pos:%d\n", line->id, pos); + pblk_debug(pblk, "erase failed: line:%d, pos:%d\n", line->id, pos); atomic_long_inc(&pblk->erase_failed); atomic_dec(&line->blk_in_line); if (test_and_set_bit(pos, line->blk_bitmap)) - pr_err("pblk: attempted to erase bb: line:%d, pos:%d\n", + pblk_err(pblk, "attempted to erase bb: line:%d, pos:%d\n", line->id, pos); /* Not necessary to mark bad blocks on 2.0 spec. */ @@ -194,7 +194,7 @@ void pblk_map_invalidate(struct pblk *pblk, struct ppa_addr ppa) u64 paddr; int line_id; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Callers must ensure that the ppa points to a device address */ BUG_ON(pblk_addr_in_cache(ppa)); BUG_ON(pblk_ppa_empty(ppa)); @@ -264,6 +264,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) switch (type) { case PBLK_WRITE: kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap); + /* fall through */ case PBLK_WRITE_INT: pool = &pblk->w_rq_pool; break; @@ -274,7 +275,7 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type) pool = &pblk->e_rq_pool; break; default: - pr_err("pblk: trying to free unknown rqd type\n"); + pblk_err(pblk, "trying to free unknown rqd type\n"); return; } @@ -310,7 +311,7 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags, ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); if (ret != PBLK_EXPOSED_PAGE_SIZE) { - pr_err("pblk: could not add page to bio\n"); + pblk_err(pblk, "could not add page to bio\n"); mempool_free(page, &pblk->page_bio_pool); goto err; } @@ -410,7 +411,7 @@ struct list_head *pblk_line_gc_list(struct pblk *pblk, struct pblk_line *line) line->state = PBLK_LINESTATE_CORRUPT; line->gc_group = PBLK_LINEGC_NONE; move_list = &l_mg->corrupt_list; - pr_err("pblk: corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n", + pblk_err(pblk, "corrupted vsc for line %d, vsc:%d (%d/%d/%d)\n", line->id, vsc, line->sec_in_line, lm->high_thrs, lm->mid_thrs); @@ -430,7 +431,7 @@ void pblk_discard(struct pblk *pblk, struct bio *bio) void pblk_log_write_err(struct pblk *pblk, struct nvm_rq *rqd) { atomic_long_inc(&pblk->write_failed); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG pblk_print_failed_rqd(pblk, rqd, rqd->error); #endif } @@ -452,9 +453,9 @@ void pblk_log_read_err(struct pblk *pblk, struct nvm_rq *rqd) atomic_long_inc(&pblk->read_failed); break; default: - pr_err("pblk: unknown read error:%d\n", rqd->error); + pblk_err(pblk, "unknown read error:%d\n", rqd->error); } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG pblk_print_failed_rqd(pblk, rqd, rqd->error); #endif } @@ -470,7 +471,7 @@ int pblk_submit_io(struct pblk *pblk, struct nvm_rq *rqd) atomic_inc(&pblk->inflight_io); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG if (pblk_check_io(pblk, rqd)) return NVM_IO_ERR; #endif @@ -484,7 +485,7 @@ int pblk_submit_io_sync(struct pblk *pblk, struct nvm_rq *rqd) atomic_inc(&pblk->inflight_io); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG if (pblk_check_io(pblk, rqd)) return NVM_IO_ERR; #endif @@ -517,7 +518,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, for (i = 0; i < nr_secs; i++) { page = vmalloc_to_page(kaddr); if (!page) { - pr_err("pblk: could not map vmalloc bio\n"); + pblk_err(pblk, "could not map vmalloc bio\n"); bio_put(bio); bio = ERR_PTR(-ENOMEM); goto out; @@ -525,7 +526,7 @@ struct bio *pblk_bio_map_addr(struct pblk *pblk, void *data, ret = bio_add_pc_page(dev->q, bio, page, PAGE_SIZE, 0); if (ret != PAGE_SIZE) { - pr_err("pblk: could not add page to bio\n"); + pblk_err(pblk, "could not add page to bio\n"); bio_put(bio); bio = ERR_PTR(-ENOMEM); goto out; @@ -711,7 +712,7 @@ next_rq: while (test_bit(pos, line->blk_bitmap)) { paddr += min; if (pblk_boundary_paddr_checks(pblk, paddr)) { - pr_err("pblk: corrupt emeta line:%d\n", + pblk_err(pblk, "corrupt emeta line:%d\n", line->id); bio_put(bio); ret = -EINTR; @@ -723,7 +724,7 @@ next_rq: } if (pblk_boundary_paddr_checks(pblk, paddr + min)) { - pr_err("pblk: corrupt emeta line:%d\n", + pblk_err(pblk, "corrupt emeta line:%d\n", line->id); bio_put(bio); ret = -EINTR; @@ -738,7 +739,7 @@ next_rq: ret = pblk_submit_io_sync(pblk, &rqd); if (ret) { - pr_err("pblk: emeta I/O submission failed: %d\n", ret); + pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); bio_put(bio); goto free_rqd_dma; } @@ -843,7 +844,7 @@ static int pblk_line_submit_smeta_io(struct pblk *pblk, struct pblk_line *line, */ ret = pblk_submit_io_sync(pblk, &rqd); if (ret) { - pr_err("pblk: smeta I/O submission failed: %d\n", ret); + pblk_err(pblk, "smeta I/O submission failed: %d\n", ret); bio_put(bio); goto free_ppa_list; } @@ -905,7 +906,7 @@ static int pblk_blk_erase_sync(struct pblk *pblk, struct ppa_addr ppa) struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - pr_err("pblk: could not sync erase line:%d,blk:%d\n", + pblk_err(pblk, "could not sync erase line:%d,blk:%d\n", pblk_ppa_to_line(ppa), pblk_ppa_to_pos(geo, ppa)); @@ -945,7 +946,7 @@ int pblk_line_erase(struct pblk *pblk, struct pblk_line *line) ret = pblk_blk_erase_sync(pblk, ppa); if (ret) { - pr_err("pblk: failed to erase line %d\n", line->id); + pblk_err(pblk, "failed to erase line %d\n", line->id); return ret; } } while (1); @@ -1012,7 +1013,7 @@ static int pblk_line_init_metadata(struct pblk *pblk, struct pblk_line *line, list_add_tail(&line->list, &l_mg->bad_list); spin_unlock(&l_mg->free_lock); - pr_debug("pblk: line %d is bad\n", line->id); + pblk_debug(pblk, "line %d is bad\n", line->id); return 0; } @@ -1122,7 +1123,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, line->cur_sec = off + lm->smeta_sec; if (init && pblk_line_submit_smeta_io(pblk, line, off, PBLK_WRITE)) { - pr_debug("pblk: line smeta I/O failed. Retry\n"); + pblk_debug(pblk, "line smeta I/O failed. Retry\n"); return 0; } @@ -1154,7 +1155,7 @@ static int pblk_line_init_bb(struct pblk *pblk, struct pblk_line *line, spin_unlock(&line->lock); list_add_tail(&line->list, &l_mg->bad_list); - pr_err("pblk: unexpected line %d is bad\n", line->id); + pblk_err(pblk, "unexpected line %d is bad\n", line->id); return 0; } @@ -1299,7 +1300,7 @@ struct pblk_line *pblk_line_get(struct pblk *pblk) retry: if (list_empty(&l_mg->free_list)) { - pr_err("pblk: no free lines\n"); + pblk_err(pblk, "no free lines\n"); return NULL; } @@ -1315,7 +1316,7 @@ retry: list_add_tail(&line->list, &l_mg->bad_list); - pr_debug("pblk: line %d is bad\n", line->id); + pblk_debug(pblk, "line %d is bad\n", line->id); goto retry; } @@ -1329,7 +1330,7 @@ retry: list_add(&line->list, &l_mg->corrupt_list); goto retry; default: - pr_err("pblk: failed to prepare line %d\n", line->id); + pblk_err(pblk, "failed to prepare line %d\n", line->id); list_add(&line->list, &l_mg->free_list); l_mg->nr_free_lines++; return NULL; @@ -1477,7 +1478,7 @@ static void pblk_line_close_meta_sync(struct pblk *pblk) ret = pblk_submit_meta_io(pblk, line); if (ret) { - pr_err("pblk: sync meta line %d failed (%d)\n", + pblk_err(pblk, "sync meta line %d failed (%d)\n", line->id, ret); return; } @@ -1507,7 +1508,7 @@ void __pblk_pipeline_flush(struct pblk *pblk) ret = pblk_recov_pad(pblk); if (ret) { - pr_err("pblk: could not close data on teardown(%d)\n", ret); + pblk_err(pblk, "could not close data on teardown(%d)\n", ret); return; } @@ -1687,7 +1688,7 @@ int pblk_blk_erase_async(struct pblk *pblk, struct ppa_addr ppa) struct nvm_tgt_dev *dev = pblk->dev; struct nvm_geo *geo = &dev->geo; - pr_err("pblk: could not async erase line:%d,blk:%d\n", + pblk_err(pblk, "could not async erase line:%d,blk:%d\n", pblk_ppa_to_line(ppa), pblk_ppa_to_pos(geo, ppa)); } @@ -1726,7 +1727,7 @@ void pblk_line_close(struct pblk *pblk, struct pblk_line *line) struct list_head *move_list; int i; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG WARN(!bitmap_full(line->map_bitmap, lm->sec_per_line), "pblk: corrupt closed line %d\n", line->id); #endif @@ -1856,7 +1857,7 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, * Only send one inflight I/O per LUN. Since we map at a page * granurality, all ppas in the I/O will map to the same LUN */ -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG int i; for (i = 1; i < nr_ppas; i++) @@ -1866,7 +1867,8 @@ static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000)); if (ret == -ETIME || ret == -EINTR) - pr_err("pblk: taking lun semaphore timed out: err %d\n", -ret); + pblk_err(pblk, "taking lun semaphore timed out: err %d\n", + -ret); } void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) @@ -1901,7 +1903,7 @@ void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) struct pblk_lun *rlun; int pos = pblk_ppa_to_pos(geo, ppa_list[0]); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG int i; for (i = 1; i < nr_ppas; i++) @@ -1951,7 +1953,7 @@ void pblk_update_map(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) void pblk_update_map_cache(struct pblk *pblk, sector_t lba, struct ppa_addr ppa) { -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Callers must ensure that the ppa points to a cache address */ BUG_ON(!pblk_addr_in_cache(ppa)); BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa))); @@ -1966,7 +1968,7 @@ int pblk_update_map_gc(struct pblk *pblk, sector_t lba, struct ppa_addr ppa_new, struct ppa_addr ppa_l2p, ppa_gc; int ret = 1; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Callers must ensure that the ppa points to a cache address */ BUG_ON(!pblk_addr_in_cache(ppa_new)); BUG_ON(pblk_rb_pos_oob(&pblk->rwb, pblk_addr_to_cacheline(ppa_new))); @@ -2003,14 +2005,14 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba, { struct ppa_addr ppa_l2p; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Callers must ensure that the ppa points to a device address */ BUG_ON(pblk_addr_in_cache(ppa_mapped)); #endif /* Invalidate and discard padded entries */ if (lba == ADDR_EMPTY) { atomic64_inc(&pblk->pad_wa); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->padded_wb); #endif if (!pblk_ppa_empty(ppa_mapped)) @@ -2036,7 +2038,7 @@ void pblk_update_map_dev(struct pblk *pblk, sector_t lba, goto out; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG WARN_ON(!pblk_addr_in_cache(ppa_l2p) && !pblk_ppa_empty(ppa_l2p)); #endif diff --git a/drivers/lightnvm/pblk-gc.c b/drivers/lightnvm/pblk-gc.c index 080469d90b40..157c2567c9e8 100644 --- a/drivers/lightnvm/pblk-gc.c +++ b/drivers/lightnvm/pblk-gc.c @@ -90,7 +90,7 @@ static void pblk_gc_line_ws(struct work_struct *work) gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs)); if (!gc_rq->data) { - pr_err("pblk: could not GC line:%d (%d/%d)\n", + pblk_err(pblk, "could not GC line:%d (%d/%d)\n", line->id, *line->vsc, gc_rq->nr_secs); goto out; } @@ -98,7 +98,7 @@ static void pblk_gc_line_ws(struct work_struct *work) /* Read from GC victim block */ ret = pblk_submit_read_gc(pblk, gc_rq); if (ret) { - pr_err("pblk: failed GC read in line:%d (err:%d)\n", + pblk_err(pblk, "failed GC read in line:%d (err:%d)\n", line->id, ret); goto out; } @@ -146,7 +146,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk, ret = pblk_line_read_emeta(pblk, line, emeta_buf); if (ret) { - pr_err("pblk: line %d read emeta failed (%d)\n", + pblk_err(pblk, "line %d read emeta failed (%d)\n", line->id, ret); pblk_mfree(emeta_buf, l_mg->emeta_alloc_type); return NULL; @@ -160,7 +160,7 @@ static __le64 *get_lba_list_from_emeta(struct pblk *pblk, ret = pblk_recov_check_emeta(pblk, emeta_buf); if (ret) { - pr_err("pblk: inconsistent emeta (line %d)\n", + pblk_err(pblk, "inconsistent emeta (line %d)\n", line->id); pblk_mfree(emeta_buf, l_mg->emeta_alloc_type); return NULL; @@ -201,7 +201,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work) } else { lba_list = get_lba_list_from_emeta(pblk, line); if (!lba_list) { - pr_err("pblk: could not interpret emeta (line %d)\n", + pblk_err(pblk, "could not interpret emeta (line %d)\n", line->id); goto fail_free_invalid_bitmap; } @@ -213,7 +213,7 @@ static void pblk_gc_line_prepare_ws(struct work_struct *work) spin_unlock(&line->lock); if (sec_left < 0) { - pr_err("pblk: corrupted GC line (%d)\n", line->id); + pblk_err(pblk, "corrupted GC line (%d)\n", line->id); goto fail_free_lba_list; } @@ -289,7 +289,7 @@ fail_free_ws: kref_put(&line->ref, pblk_line_put); atomic_dec(&gc->read_inflight_gc); - pr_err("pblk: Failed to GC line %d\n", line->id); + pblk_err(pblk, "failed to GC line %d\n", line->id); } static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line) @@ -297,7 +297,7 @@ static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line) struct pblk_gc *gc = &pblk->gc; struct pblk_line_ws *line_ws; - pr_debug("pblk: line '%d' being reclaimed for GC\n", line->id); + pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id); line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL); if (!line_ws) @@ -351,7 +351,7 @@ static int pblk_gc_read(struct pblk *pblk) pblk_gc_kick(pblk); if (pblk_gc_line(pblk, line)) - pr_err("pblk: failed to GC line %d\n", line->id); + pblk_err(pblk, "failed to GC line %d\n", line->id); return 0; } @@ -522,8 +522,8 @@ static int pblk_gc_reader_ts(void *data) io_schedule(); } -#ifdef CONFIG_NVM_DEBUG - pr_info("pblk: flushing gc pipeline, %d lines left\n", +#ifdef CONFIG_NVM_PBLK_DEBUG + pblk_info(pblk, "flushing gc pipeline, %d lines left\n", atomic_read(&gc->pipeline_gc)); #endif @@ -540,7 +540,7 @@ static int pblk_gc_reader_ts(void *data) static void pblk_gc_start(struct pblk *pblk) { pblk->gc.gc_active = 1; - pr_debug("pblk: gc start\n"); + pblk_debug(pblk, "gc start\n"); } void pblk_gc_should_start(struct pblk *pblk) @@ -605,14 +605,14 @@ int pblk_gc_init(struct pblk *pblk) gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts"); if (IS_ERR(gc->gc_ts)) { - pr_err("pblk: could not allocate GC main kthread\n"); + pblk_err(pblk, "could not allocate GC main kthread\n"); return PTR_ERR(gc->gc_ts); } gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk, "pblk-gc-writer-ts"); if (IS_ERR(gc->gc_writer_ts)) { - pr_err("pblk: could not allocate GC writer kthread\n"); + pblk_err(pblk, "could not allocate GC writer kthread\n"); ret = PTR_ERR(gc->gc_writer_ts); goto fail_free_main_kthread; } @@ -620,7 +620,7 @@ int pblk_gc_init(struct pblk *pblk) gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk, "pblk-gc-reader-ts"); if (IS_ERR(gc->gc_reader_ts)) { - pr_err("pblk: could not allocate GC reader kthread\n"); + pblk_err(pblk, "could not allocate GC reader kthread\n"); ret = PTR_ERR(gc->gc_reader_ts); goto fail_free_writer_kthread; } @@ -641,7 +641,7 @@ int pblk_gc_init(struct pblk *pblk) gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq", WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS); if (!gc->gc_line_reader_wq) { - pr_err("pblk: could not allocate GC line reader workqueue\n"); + pblk_err(pblk, "could not allocate GC line reader workqueue\n"); ret = -ENOMEM; goto fail_free_reader_kthread; } @@ -650,7 +650,7 @@ int pblk_gc_init(struct pblk *pblk) gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq", WQ_MEM_RECLAIM | WQ_UNBOUND, 1); if (!gc->gc_reader_wq) { - pr_err("pblk: could not allocate GC reader workqueue\n"); + pblk_err(pblk, "could not allocate GC reader workqueue\n"); ret = -ENOMEM; goto fail_free_reader_line_wq; } diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index b57f764d6a16..537e98f2b24a 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -91,7 +91,7 @@ static size_t pblk_trans_map_size(struct pblk *pblk) return entry_size * pblk->rl.nr_secs; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG static u32 pblk_l2p_crc(struct pblk *pblk) { size_t map_size; @@ -117,13 +117,13 @@ static int pblk_l2p_recover(struct pblk *pblk, bool factory_init) } else { line = pblk_recov_l2p(pblk); if (IS_ERR(line)) { - pr_err("pblk: could not recover l2p table\n"); + pblk_err(pblk, "could not recover l2p table\n"); return -EFAULT; } } -#ifdef CONFIG_NVM_DEBUG - pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk)); +#ifdef CONFIG_NVM_PBLK_DEBUG + pblk_info(pblk, "init: L2P CRC: %x\n", pblk_l2p_crc(pblk)); #endif /* Free full lines directly as GC has not been started yet */ @@ -166,7 +166,7 @@ static int pblk_l2p_init(struct pblk *pblk, bool factory_init) static void pblk_rwb_free(struct pblk *pblk) { if (pblk_rb_tear_down_check(&pblk->rwb)) - pr_err("pblk: write buffer error on tear down\n"); + pblk_err(pblk, "write buffer error on tear down\n"); pblk_rb_data_free(&pblk->rwb); vfree(pblk_rb_entries_ref(&pblk->rwb)); @@ -179,11 +179,14 @@ static int pblk_rwb_init(struct pblk *pblk) struct pblk_rb_entry *entries; unsigned long nr_entries, buffer_size; unsigned int power_size, power_seg_sz; + int pgs_in_buffer; - if (write_buffer_size && (write_buffer_size > pblk->pgs_in_buffer)) + pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns; + + if (write_buffer_size && (write_buffer_size > pgs_in_buffer)) buffer_size = write_buffer_size; else - buffer_size = pblk->pgs_in_buffer; + buffer_size = pgs_in_buffer; nr_entries = pblk_rb_calculate_size(buffer_size); @@ -200,7 +203,8 @@ static int pblk_rwb_init(struct pblk *pblk) /* Minimum pages needed within a lun */ #define ADDR_POOL_SIZE 64 -static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) +static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo, + struct nvm_addrf_12 *dst) { struct nvm_addrf_12 *src = (struct nvm_addrf_12 *)&geo->addrf; int power_len; @@ -208,14 +212,14 @@ static int pblk_set_addrf_12(struct nvm_geo *geo, struct nvm_addrf_12 *dst) /* Re-calculate channel and lun format to adapt to configuration */ power_len = get_count_order(geo->num_ch); if (1 << power_len != geo->num_ch) { - pr_err("pblk: supports only power-of-two channel config.\n"); + pblk_err(pblk, "supports only power-of-two channel config.\n"); return -EINVAL; } dst->ch_len = power_len; power_len = get_count_order(geo->num_lun); if (1 << power_len != geo->num_lun) { - pr_err("pblk: supports only power-of-two LUN config.\n"); + pblk_err(pblk, "supports only power-of-two LUN config.\n"); return -EINVAL; } dst->lun_len = power_len; @@ -282,18 +286,19 @@ static int pblk_set_addrf(struct pblk *pblk) case NVM_OCSSD_SPEC_12: div_u64_rem(geo->clba, pblk->min_write_pgs, &mod); if (mod) { - pr_err("pblk: bad configuration of sectors/pages\n"); + pblk_err(pblk, "bad configuration of sectors/pages\n"); return -EINVAL; } - pblk->addrf_len = pblk_set_addrf_12(geo, (void *)&pblk->addrf); + pblk->addrf_len = pblk_set_addrf_12(pblk, geo, + (void *)&pblk->addrf); break; case NVM_OCSSD_SPEC_20: pblk->addrf_len = pblk_set_addrf_20(geo, (void *)&pblk->addrf, - &pblk->uaddrf); + &pblk->uaddrf); break; default: - pr_err("pblk: OCSSD revision not supported (%d)\n", + pblk_err(pblk, "OCSSD revision not supported (%d)\n", geo->version); return -EINVAL; } @@ -366,15 +371,13 @@ static int pblk_core_init(struct pblk *pblk) atomic64_set(&pblk->nr_flush, 0); pblk->nr_flush_rst = 0; - pblk->pgs_in_buffer = geo->mw_cunits * geo->all_luns; - pblk->min_write_pgs = geo->ws_opt * (geo->csecs / PAGE_SIZE); max_write_ppas = pblk->min_write_pgs * geo->all_luns; pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA); pblk_set_sec_per_write(pblk, pblk->min_write_pgs); if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) { - pr_err("pblk: vector list too big(%u > %u)\n", + pblk_err(pblk, "vector list too big(%u > %u)\n", pblk->max_write_pgs, PBLK_MAX_REQ_ADDRS); return -EINVAL; } @@ -607,7 +610,7 @@ static int pblk_luns_init(struct pblk *pblk) /* TODO: Implement unbalanced LUN support */ if (geo->num_lun < 0) { - pr_err("pblk: unbalanced LUN config.\n"); + pblk_err(pblk, "unbalanced LUN config.\n"); return -EINVAL; } @@ -716,10 +719,11 @@ static int pblk_setup_line_meta_12(struct pblk *pblk, struct pblk_line *line, /* * In 1.2 spec. chunk state is not persisted by the device. Thus - * some of the values are reset each time pblk is instantiated. + * some of the values are reset each time pblk is instantiated, + * so we have to assume that the block is closed. */ if (lun_bb_meta[line->id] == NVM_BLK_T_FREE) - chunk->state = NVM_CHK_ST_FREE; + chunk->state = NVM_CHK_ST_CLOSED; else chunk->state = NVM_CHK_ST_OFFLINE; @@ -1026,7 +1030,7 @@ add_emeta_page: lm->emeta_sec[0], geo->clba); if (lm->min_blk_line > lm->blk_per_line) { - pr_err("pblk: config. not supported. Min. LUN in line:%d\n", + pblk_err(pblk, "config. not supported. Min. LUN in line:%d\n", lm->blk_per_line); return -EINVAL; } @@ -1078,7 +1082,7 @@ static int pblk_lines_init(struct pblk *pblk) } if (!nr_free_chks) { - pr_err("pblk: too many bad blocks prevent for sane instance\n"); + pblk_err(pblk, "too many bad blocks prevent for sane instance\n"); return -EINTR; } @@ -1108,7 +1112,7 @@ static int pblk_writer_init(struct pblk *pblk) int err = PTR_ERR(pblk->writer_ts); if (err != -EINTR) - pr_err("pblk: could not allocate writer kthread (%d)\n", + pblk_err(pblk, "could not allocate writer kthread (%d)\n", err); return err; } @@ -1154,7 +1158,7 @@ static void pblk_tear_down(struct pblk *pblk, bool graceful) pblk_rb_sync_l2p(&pblk->rwb); pblk_rl_free(&pblk->rl); - pr_debug("pblk: consistent tear down (graceful:%d)\n", graceful); + pblk_debug(pblk, "consistent tear down (graceful:%d)\n", graceful); } static void pblk_exit(void *private, bool graceful) @@ -1165,8 +1169,8 @@ static void pblk_exit(void *private, bool graceful) pblk_gc_exit(pblk, graceful); pblk_tear_down(pblk, graceful); -#ifdef CONFIG_NVM_DEBUG - pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk)); +#ifdef CONFIG_NVM_PBLK_DEBUG + pblk_info(pblk, "exit: L2P CRC: %x\n", pblk_l2p_crc(pblk)); #endif pblk_free(pblk); @@ -1189,34 +1193,35 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, struct pblk *pblk; int ret; - /* pblk supports 1.2 and 2.0 versions */ + pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL); + if (!pblk) + return ERR_PTR(-ENOMEM); + + pblk->dev = dev; + pblk->disk = tdisk; + pblk->state = PBLK_STATE_RUNNING; + pblk->gc.gc_enabled = 0; + if (!(geo->version == NVM_OCSSD_SPEC_12 || geo->version == NVM_OCSSD_SPEC_20)) { - pr_err("pblk: OCSSD version not supported (%u)\n", + pblk_err(pblk, "OCSSD version not supported (%u)\n", geo->version); + kfree(pblk); return ERR_PTR(-EINVAL); } if (geo->version == NVM_OCSSD_SPEC_12 && geo->dom & NVM_RSP_L2P) { - pr_err("pblk: host-side L2P table not supported. (%x)\n", + pblk_err(pblk, "host-side L2P table not supported. (%x)\n", geo->dom); + kfree(pblk); return ERR_PTR(-EINVAL); } - pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL); - if (!pblk) - return ERR_PTR(-ENOMEM); - - pblk->dev = dev; - pblk->disk = tdisk; - pblk->state = PBLK_STATE_RUNNING; - pblk->gc.gc_enabled = 0; - spin_lock_init(&pblk->resubmit_lock); spin_lock_init(&pblk->trans_lock); spin_lock_init(&pblk->lock); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_set(&pblk->inflight_writes, 0); atomic_long_set(&pblk->padded_writes, 0); atomic_long_set(&pblk->padded_wb, 0); @@ -1241,38 +1246,38 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, ret = pblk_core_init(pblk); if (ret) { - pr_err("pblk: could not initialize core\n"); + pblk_err(pblk, "could not initialize core\n"); goto fail; } ret = pblk_lines_init(pblk); if (ret) { - pr_err("pblk: could not initialize lines\n"); + pblk_err(pblk, "could not initialize lines\n"); goto fail_free_core; } ret = pblk_rwb_init(pblk); if (ret) { - pr_err("pblk: could not initialize write buffer\n"); + pblk_err(pblk, "could not initialize write buffer\n"); goto fail_free_lines; } ret = pblk_l2p_init(pblk, flags & NVM_TARGET_FACTORY); if (ret) { - pr_err("pblk: could not initialize maps\n"); + pblk_err(pblk, "could not initialize maps\n"); goto fail_free_rwb; } ret = pblk_writer_init(pblk); if (ret) { if (ret != -EINTR) - pr_err("pblk: could not initialize write thread\n"); + pblk_err(pblk, "could not initialize write thread\n"); goto fail_free_l2p; } ret = pblk_gc_init(pblk); if (ret) { - pr_err("pblk: could not initialize gc\n"); + pblk_err(pblk, "could not initialize gc\n"); goto fail_stop_writer; } @@ -1287,8 +1292,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); - pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n", - tdisk->disk_name, + pblk_info(pblk, "luns:%u, lines:%d, secs:%llu, buf entries:%u\n", geo->all_luns, pblk->l_mg.nr_lines, (unsigned long long)pblk->rl.nr_secs, pblk->rwb.nr_entries); diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c index 55e9442a99e2..f6eec0212dfc 100644 --- a/drivers/lightnvm/pblk-rb.c +++ b/drivers/lightnvm/pblk-rb.c @@ -111,7 +111,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, } while (iter > 0); up_write(&pblk_rb_lock); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_set(&rb->inflight_flush_point, 0); #endif @@ -308,7 +308,7 @@ void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data, entry = &rb->entries[ring_pos]; flags = READ_ONCE(entry->w_ctx.flags); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Caller must guarantee that the entry is free */ BUG_ON(!(flags & PBLK_WRITABLE_ENTRY)); #endif @@ -332,7 +332,7 @@ void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data, entry = &rb->entries[ring_pos]; flags = READ_ONCE(entry->w_ctx.flags); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Caller must guarantee that the entry is free */ BUG_ON(!(flags & PBLK_WRITABLE_ENTRY)); #endif @@ -362,7 +362,7 @@ static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio, return 0; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_inc(&rb->inflight_flush_point); #endif @@ -547,7 +547,7 @@ try: page = virt_to_page(entry->data); if (!page) { - pr_err("pblk: could not allocate write bio page\n"); + pblk_err(pblk, "could not allocate write bio page\n"); flags &= ~PBLK_WRITTEN_DATA; flags |= PBLK_SUBMITTED_ENTRY; /* Release flags on context. Protect from writes */ @@ -557,7 +557,7 @@ try: if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != rb->seg_size) { - pr_err("pblk: could not add page to write bio\n"); + pblk_err(pblk, "could not add page to write bio\n"); flags &= ~PBLK_WRITTEN_DATA; flags |= PBLK_SUBMITTED_ENTRY; /* Release flags on context. Protect from writes */ @@ -576,19 +576,19 @@ try: if (pad) { if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) { - pr_err("pblk: could not pad page in write bio\n"); + pblk_err(pblk, "could not pad page in write bio\n"); return NVM_IO_ERR; } if (pad < pblk->min_write_pgs) atomic64_inc(&pblk->pad_dist[pad - 1]); else - pr_warn("pblk: padding more than min. sectors\n"); + pblk_warn(pblk, "padding more than min. sectors\n"); atomic64_add(pad, &pblk->pad_wa); } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(pad, &pblk->padded_writes); #endif @@ -613,7 +613,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, int ret = 1; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Caller must ensure that the access will not cause an overflow */ BUG_ON(pos >= rb->nr_entries); #endif @@ -820,7 +820,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf) rb->subm, rb->sync, rb->l2p_update, -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_read(&rb->inflight_flush_point), #else 0, @@ -838,7 +838,7 @@ ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf) rb->subm, rb->sync, rb->l2p_update, -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_read(&rb->inflight_flush_point), #else 0, diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c index 18694694e5f0..5a46d7f9302f 100644 --- a/drivers/lightnvm/pblk-read.c +++ b/drivers/lightnvm/pblk-read.c @@ -28,7 +28,7 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, sector_t lba, struct ppa_addr ppa, int bio_iter, bool advanced_bio) { -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Callers must ensure that the ppa points to a cache address */ BUG_ON(pblk_ppa_empty(ppa)); BUG_ON(!pblk_addr_in_cache(ppa)); @@ -79,7 +79,7 @@ retry: WARN_ON(test_and_set_bit(i, read_bitmap)); meta_list[i].lba = cpu_to_le64(lba); advanced_bio = true; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->cache_reads); #endif } else { @@ -97,7 +97,7 @@ next: else rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(nr_secs, &pblk->inflight_reads); #endif } @@ -117,13 +117,13 @@ static void pblk_read_check_seq(struct pblk *pblk, struct nvm_rq *rqd, continue; if (lba != blba + i) { -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG struct ppa_addr *p; p = (nr_lbas == 1) ? &rqd->ppa_list[i] : &rqd->ppa_addr; - print_ppa(&pblk->dev->geo, p, "seq", i); + print_ppa(pblk, p, "seq", i); #endif - pr_err("pblk: corrupted read LBA (%llu/%llu)\n", + pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n", lba, (u64)blba + i); WARN_ON(1); } @@ -149,14 +149,14 @@ static void pblk_read_check_rand(struct pblk *pblk, struct nvm_rq *rqd, meta_lba = le64_to_cpu(meta_lba_list[j].lba); if (lba != meta_lba) { -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG struct ppa_addr *p; int nr_ppas = rqd->nr_ppas; p = (nr_ppas == 1) ? &rqd->ppa_list[j] : &rqd->ppa_addr; - print_ppa(&pblk->dev->geo, p, "seq", j); + print_ppa(pblk, p, "seq", j); #endif - pr_err("pblk: corrupted read LBA (%llu/%llu)\n", + pblk_err(pblk, "corrupted read LBA (%llu/%llu)\n", lba, meta_lba); WARN_ON(1); } @@ -185,7 +185,7 @@ static void pblk_read_put_rqd_kref(struct pblk *pblk, struct nvm_rq *rqd) static void pblk_end_user_read(struct bio *bio) { -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG WARN_ONCE(bio->bi_status, "pblk: corrupted read bio\n"); #endif bio_endio(bio); @@ -199,7 +199,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, struct bio *int_bio = rqd->bio; unsigned long start_time = r_ctx->start_time; - generic_end_io_acct(dev->q, READ, &pblk->disk->part0, start_time); + generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time); if (rqd->error) pblk_log_read_err(pblk, rqd); @@ -212,7 +212,7 @@ static void __pblk_end_io_read(struct pblk *pblk, struct nvm_rq *rqd, if (put_line) pblk_read_put_rqd_kref(pblk, rqd); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(rqd->nr_ppas, &pblk->sync_reads); atomic_long_sub(rqd->nr_ppas, &pblk->inflight_reads); #endif @@ -231,74 +231,36 @@ static void pblk_end_io_read(struct nvm_rq *rqd) __pblk_end_io_read(pblk, rqd, true); } -static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd, - struct bio *orig_bio, unsigned int bio_init_idx, - unsigned long *read_bitmap) +static void pblk_end_partial_read(struct nvm_rq *rqd) { - struct pblk_sec_meta *meta_list = rqd->meta_list; - struct bio *new_bio; + struct pblk *pblk = rqd->private; + struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); + struct pblk_pr_ctx *pr_ctx = r_ctx->private; + struct bio *new_bio = rqd->bio; + struct bio *bio = pr_ctx->orig_bio; struct bio_vec src_bv, dst_bv; - void *ppa_ptr = NULL; - void *src_p, *dst_p; - dma_addr_t dma_ppa_list = 0; - __le64 *lba_list_mem, *lba_list_media; - int nr_secs = rqd->nr_ppas; + struct pblk_sec_meta *meta_list = rqd->meta_list; + int bio_init_idx = pr_ctx->bio_init_idx; + unsigned long *read_bitmap = pr_ctx->bitmap; + int nr_secs = pr_ctx->orig_nr_secs; int nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); - int i, ret, hole; - - /* Re-use allocated memory for intermediate lbas */ - lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); - lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size); - - new_bio = bio_alloc(GFP_KERNEL, nr_holes); - - if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) - goto fail_add_pages; - - if (nr_holes != new_bio->bi_vcnt) { - pr_err("pblk: malformed bio\n"); - goto fail; - } - - for (i = 0; i < nr_secs; i++) - lba_list_mem[i] = meta_list[i].lba; - - new_bio->bi_iter.bi_sector = 0; /* internal bio */ - bio_set_op_attrs(new_bio, REQ_OP_READ, 0); - - rqd->bio = new_bio; - rqd->nr_ppas = nr_holes; - rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); - - if (unlikely(nr_holes == 1)) { - ppa_ptr = rqd->ppa_list; - dma_ppa_list = rqd->dma_ppa_list; - rqd->ppa_addr = rqd->ppa_list[0]; - } - - ret = pblk_submit_io_sync(pblk, rqd); - if (ret) { - bio_put(rqd->bio); - pr_err("pblk: sync read IO submission failed\n"); - goto fail; - } - - if (rqd->error) { - atomic_long_inc(&pblk->read_failed); -#ifdef CONFIG_NVM_DEBUG - pblk_print_failed_rqd(pblk, rqd, rqd->error); -#endif - } + __le64 *lba_list_mem, *lba_list_media; + void *src_p, *dst_p; + int hole, i; if (unlikely(nr_holes == 1)) { struct ppa_addr ppa; ppa = rqd->ppa_addr; - rqd->ppa_list = ppa_ptr; - rqd->dma_ppa_list = dma_ppa_list; + rqd->ppa_list = pr_ctx->ppa_ptr; + rqd->dma_ppa_list = pr_ctx->dma_ppa_list; rqd->ppa_list[0] = ppa; } + /* Re-use allocated memory for intermediate lbas */ + lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); + lba_list_media = (((void *)rqd->ppa_list) + 2 * pblk_dma_ppa_size); + for (i = 0; i < nr_secs; i++) { lba_list_media[i] = meta_list[i].lba; meta_list[i].lba = lba_list_mem[i]; @@ -316,7 +278,7 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd, meta_list[hole].lba = lba_list_media[i]; src_bv = new_bio->bi_io_vec[i++]; - dst_bv = orig_bio->bi_io_vec[bio_init_idx + hole]; + dst_bv = bio->bi_io_vec[bio_init_idx + hole]; src_p = kmap_atomic(src_bv.bv_page); dst_p = kmap_atomic(dst_bv.bv_page); @@ -334,19 +296,107 @@ static int pblk_partial_read(struct pblk *pblk, struct nvm_rq *rqd, } while (hole < nr_secs); bio_put(new_bio); + kfree(pr_ctx); /* restore original request */ rqd->bio = NULL; rqd->nr_ppas = nr_secs; + bio_endio(bio); __pblk_end_io_read(pblk, rqd, false); - return NVM_IO_DONE; +} -fail: - /* Free allocated pages in new bio */ +static int pblk_setup_partial_read(struct pblk *pblk, struct nvm_rq *rqd, + unsigned int bio_init_idx, + unsigned long *read_bitmap, + int nr_holes) +{ + struct pblk_sec_meta *meta_list = rqd->meta_list; + struct pblk_g_ctx *r_ctx = nvm_rq_to_pdu(rqd); + struct pblk_pr_ctx *pr_ctx; + struct bio *new_bio, *bio = r_ctx->private; + __le64 *lba_list_mem; + int nr_secs = rqd->nr_ppas; + int i; + + /* Re-use allocated memory for intermediate lbas */ + lba_list_mem = (((void *)rqd->ppa_list) + pblk_dma_ppa_size); + + new_bio = bio_alloc(GFP_KERNEL, nr_holes); + + if (pblk_bio_add_pages(pblk, new_bio, GFP_KERNEL, nr_holes)) + goto fail_bio_put; + + if (nr_holes != new_bio->bi_vcnt) { + WARN_ONCE(1, "pblk: malformed bio\n"); + goto fail_free_pages; + } + + pr_ctx = kmalloc(sizeof(struct pblk_pr_ctx), GFP_KERNEL); + if (!pr_ctx) + goto fail_free_pages; + + for (i = 0; i < nr_secs; i++) + lba_list_mem[i] = meta_list[i].lba; + + new_bio->bi_iter.bi_sector = 0; /* internal bio */ + bio_set_op_attrs(new_bio, REQ_OP_READ, 0); + + rqd->bio = new_bio; + rqd->nr_ppas = nr_holes; + rqd->flags = pblk_set_read_mode(pblk, PBLK_READ_RANDOM); + + pr_ctx->ppa_ptr = NULL; + pr_ctx->orig_bio = bio; + bitmap_copy(pr_ctx->bitmap, read_bitmap, NVM_MAX_VLBA); + pr_ctx->bio_init_idx = bio_init_idx; + pr_ctx->orig_nr_secs = nr_secs; + r_ctx->private = pr_ctx; + + if (unlikely(nr_holes == 1)) { + pr_ctx->ppa_ptr = rqd->ppa_list; + pr_ctx->dma_ppa_list = rqd->dma_ppa_list; + rqd->ppa_addr = rqd->ppa_list[0]; + } + return 0; + +fail_free_pages: pblk_bio_free_pages(pblk, new_bio, 0, new_bio->bi_vcnt); -fail_add_pages: - pr_err("pblk: failed to perform partial read\n"); +fail_bio_put: + bio_put(new_bio); + + return -ENOMEM; +} + +static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd, + unsigned int bio_init_idx, + unsigned long *read_bitmap, int nr_secs) +{ + int nr_holes; + int ret; + + nr_holes = nr_secs - bitmap_weight(read_bitmap, nr_secs); + + if (pblk_setup_partial_read(pblk, rqd, bio_init_idx, read_bitmap, + nr_holes)) + return NVM_IO_ERR; + + rqd->end_io = pblk_end_partial_read; + + ret = pblk_submit_io(pblk, rqd); + if (ret) { + bio_put(rqd->bio); + pblk_err(pblk, "partial read IO submission failed\n"); + goto err; + } + + return NVM_IO_OK; + +err: + pblk_err(pblk, "failed to perform partial read\n"); + + /* Free allocated pages in new bio */ + pblk_bio_free_pages(pblk, rqd->bio, 0, rqd->bio->bi_vcnt); __pblk_end_io_read(pblk, rqd, false); return NVM_IO_ERR; } @@ -359,7 +409,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, struct bio *bio, pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->inflight_reads); #endif @@ -382,7 +432,7 @@ retry: WARN_ON(test_and_set_bit(0, read_bitmap)); meta_list[0].lba = cpu_to_le64(lba); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->cache_reads); #endif } else { @@ -401,7 +451,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) struct pblk_g_ctx *r_ctx; struct nvm_rq *rqd; unsigned int bio_init_idx; - unsigned long read_bitmap; /* Max 64 ppas per request */ + DECLARE_BITMAP(read_bitmap, NVM_MAX_VLBA); int ret = NVM_IO_ERR; /* logic error: lba out-of-bounds. Ignore read request */ @@ -411,9 +461,10 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) return NVM_IO_ERR; } - generic_start_io_acct(q, READ, bio_sectors(bio), &pblk->disk->part0); + generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), + &pblk->disk->part0); - bitmap_zero(&read_bitmap, nr_secs); + bitmap_zero(read_bitmap, nr_secs); rqd = pblk_alloc_rqd(pblk, PBLK_READ); @@ -436,7 +487,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) rqd->meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &rqd->dma_meta_list); if (!rqd->meta_list) { - pr_err("pblk: not able to allocate ppa list\n"); + pblk_err(pblk, "not able to allocate ppa list\n"); goto fail_rqd_free; } @@ -444,32 +495,32 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; - pblk_read_ppalist_rq(pblk, rqd, bio, blba, &read_bitmap); + pblk_read_ppalist_rq(pblk, rqd, bio, blba, read_bitmap); } else { - pblk_read_rq(pblk, rqd, bio, blba, &read_bitmap); + pblk_read_rq(pblk, rqd, bio, blba, read_bitmap); } - if (bitmap_full(&read_bitmap, nr_secs)) { + if (bitmap_full(read_bitmap, nr_secs)) { atomic_inc(&pblk->inflight_io); __pblk_end_io_read(pblk, rqd, false); return NVM_IO_DONE; } /* All sectors are to be read from the device */ - if (bitmap_empty(&read_bitmap, rqd->nr_ppas)) { + if (bitmap_empty(read_bitmap, rqd->nr_ppas)) { struct bio *int_bio = NULL; /* Clone read bio to deal with read errors internally */ int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set); if (!int_bio) { - pr_err("pblk: could not clone read bio\n"); + pblk_err(pblk, "could not clone read bio\n"); goto fail_end_io; } rqd->bio = int_bio; if (pblk_submit_io(pblk, rqd)) { - pr_err("pblk: read IO submission failed\n"); + pblk_err(pblk, "read IO submission failed\n"); ret = NVM_IO_ERR; goto fail_end_io; } @@ -480,8 +531,15 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio) /* The read bio request could be partially filled by the write buffer, * but there are some holes that need to be read from the drive. */ - return pblk_partial_read(pblk, rqd, bio, bio_init_idx, &read_bitmap); + ret = pblk_partial_read_bio(pblk, rqd, bio_init_idx, read_bitmap, + nr_secs); + if (ret) + goto fail_meta_free; + + return NVM_IO_OK; +fail_meta_free: + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); fail_rqd_free: pblk_free_rqd(pblk, rqd, PBLK_READ); return ret; @@ -514,7 +572,7 @@ static int read_ppalist_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, rqd->ppa_list[valid_secs++] = ppa_list_l2p[i]; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(valid_secs, &pblk->inflight_reads); #endif @@ -548,7 +606,7 @@ static int read_rq_gc(struct pblk *pblk, struct nvm_rq *rqd, rqd->ppa_addr = ppa_l2p; valid_secs = 1; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_inc(&pblk->inflight_reads); #endif @@ -595,7 +653,8 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) bio = pblk_bio_map_addr(pblk, gc_rq->data, gc_rq->secs_to_gc, data_len, PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { - pr_err("pblk: could not allocate GC bio (%lu)\n", PTR_ERR(bio)); + pblk_err(pblk, "could not allocate GC bio (%lu)\n", + PTR_ERR(bio)); goto err_free_dma; } @@ -609,7 +668,7 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) if (pblk_submit_io_sync(pblk, &rqd)) { ret = -EIO; - pr_err("pblk: GC read request failed\n"); + pblk_err(pblk, "GC read request failed\n"); goto err_free_bio; } @@ -619,12 +678,12 @@ int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq) if (rqd.error) { atomic_long_inc(&pblk->read_failed_gc); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG pblk_print_failed_rqd(pblk, &rqd, rqd.error); #endif } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(gc_rq->secs_to_gc, &pblk->sync_reads); atomic_long_add(gc_rq->secs_to_gc, &pblk->recov_gc_reads); atomic_long_sub(gc_rq->secs_to_gc, &pblk->inflight_reads); diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 3a5069183859..e232e47e1353 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -77,7 +77,7 @@ static int pblk_recov_l2p_from_emeta(struct pblk *pblk, struct pblk_line *line) } if (nr_valid_lbas != nr_lbas) - pr_err("pblk: line %d - inconsistent lba list(%llu/%llu)\n", + pblk_err(pblk, "line %d - inconsistent lba list(%llu/%llu)\n", line->id, nr_valid_lbas, nr_lbas); line->left_msecs = 0; @@ -184,7 +184,7 @@ next_read_rq: /* If read fails, more padding is needed */ ret = pblk_submit_io_sync(pblk, rqd); if (ret) { - pr_err("pblk: I/O submission failed: %d\n", ret); + pblk_err(pblk, "I/O submission failed: %d\n", ret); return ret; } @@ -194,7 +194,7 @@ next_read_rq: * we cannot recover from here. Need FTL log. */ if (rqd->error && rqd->error != NVM_RSP_WARN_HIGHECC) { - pr_err("pblk: L2P recovery failed (%d)\n", rqd->error); + pblk_err(pblk, "L2P recovery failed (%d)\n", rqd->error); return -EINTR; } @@ -273,7 +273,7 @@ static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, next_pad_rq: rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); if (rq_ppas < pblk->min_write_pgs) { - pr_err("pblk: corrupted pad line %d\n", line->id); + pblk_err(pblk, "corrupted pad line %d\n", line->id); goto fail_free_pad; } @@ -342,7 +342,7 @@ next_pad_rq: ret = pblk_submit_io(pblk, rqd); if (ret) { - pr_err("pblk: I/O submission failed: %d\n", ret); + pblk_err(pblk, "I/O submission failed: %d\n", ret); pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); goto fail_free_bio; } @@ -356,12 +356,12 @@ next_pad_rq: if (!wait_for_completion_io_timeout(&pad_rq->wait, msecs_to_jiffies(PBLK_COMMAND_TIMEOUT_MS))) { - pr_err("pblk: pad write timed out\n"); + pblk_err(pblk, "pad write timed out\n"); ret = -ETIME; } if (!pblk_line_is_full(line)) - pr_err("pblk: corrupted padded line: %d\n", line->id); + pblk_err(pblk, "corrupted padded line: %d\n", line->id); vfree(data); free_rq: @@ -461,7 +461,7 @@ next_rq: ret = pblk_submit_io_sync(pblk, rqd); if (ret) { - pr_err("pblk: I/O submission failed: %d\n", ret); + pblk_err(pblk, "I/O submission failed: %d\n", ret); return ret; } @@ -501,11 +501,11 @@ next_rq: ret = pblk_recov_pad_oob(pblk, line, pad_secs); if (ret) - pr_err("pblk: OOB padding failed (err:%d)\n", ret); + pblk_err(pblk, "OOB padding failed (err:%d)\n", ret); ret = pblk_recov_read_oob(pblk, line, p, r_ptr); if (ret) - pr_err("pblk: OOB read failed (err:%d)\n", ret); + pblk_err(pblk, "OOB read failed (err:%d)\n", ret); left_ppas = 0; } @@ -592,7 +592,7 @@ next_rq: ret = pblk_submit_io_sync(pblk, rqd); if (ret) { - pr_err("pblk: I/O submission failed: %d\n", ret); + pblk_err(pblk, "I/O submission failed: %d\n", ret); bio_put(bio); return ret; } @@ -671,14 +671,14 @@ static int pblk_recov_l2p_from_oob(struct pblk *pblk, struct pblk_line *line) ret = pblk_recov_scan_oob(pblk, line, p, &done); if (ret) { - pr_err("pblk: could not recover L2P from OOB\n"); + pblk_err(pblk, "could not recover L2P from OOB\n"); goto out; } if (!done) { ret = pblk_recov_scan_all_oob(pblk, line, p); if (ret) { - pr_err("pblk: could not recover L2P from OOB\n"); + pblk_err(pblk, "could not recover L2P from OOB\n"); goto out; } } @@ -737,14 +737,15 @@ static int pblk_recov_check_line_version(struct pblk *pblk, struct line_header *header = &emeta->header; if (header->version_major != EMETA_VERSION_MAJOR) { - pr_err("pblk: line major version mismatch: %d, expected: %d\n", - header->version_major, EMETA_VERSION_MAJOR); + pblk_err(pblk, "line major version mismatch: %d, expected: %d\n", + header->version_major, EMETA_VERSION_MAJOR); return 1; } -#ifdef NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG if (header->version_minor > EMETA_VERSION_MINOR) - pr_info("pblk: newer line minor version found: %d\n", line_v); + pblk_info(pblk, "newer line minor version found: %d\n", + header->version_minor); #endif return 0; @@ -851,7 +852,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) continue; if (smeta_buf->header.version_major != SMETA_VERSION_MAJOR) { - pr_err("pblk: found incompatible line version %u\n", + pblk_err(pblk, "found incompatible line version %u\n", smeta_buf->header.version_major); return ERR_PTR(-EINVAL); } @@ -863,7 +864,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) } if (memcmp(pblk->instance_uuid, smeta_buf->header.uuid, 16)) { - pr_debug("pblk: ignore line %u due to uuid mismatch\n", + pblk_debug(pblk, "ignore line %u due to uuid mismatch\n", i); continue; } @@ -887,7 +888,7 @@ struct pblk_line *pblk_recov_l2p(struct pblk *pblk) pblk_recov_line_add_ordered(&recov_list, line); found_lines++; - pr_debug("pblk: recovering data line %d, seq:%llu\n", + pblk_debug(pblk, "recovering data line %d, seq:%llu\n", line->id, smeta_buf->seq_nr); } @@ -947,7 +948,7 @@ next: line->emeta = NULL; } else { if (open_lines > 1) - pr_err("pblk: failed to recover L2P\n"); + pblk_err(pblk, "failed to recover L2P\n"); open_lines++; line->meta_line = meta_line; @@ -976,7 +977,7 @@ next: out: if (found_lines != recovered_lines) - pr_err("pblk: failed to recover all found lines %d/%d\n", + pblk_err(pblk, "failed to recover all found lines %d/%d\n", found_lines, recovered_lines); return data_line; @@ -999,7 +1000,7 @@ int pblk_recov_pad(struct pblk *pblk) ret = pblk_recov_pad_oob(pblk, line, left_msecs); if (ret) { - pr_err("pblk: Tear down padding failed (%d)\n", ret); + pblk_err(pblk, "tear down padding failed (%d)\n", ret); return ret; } diff --git a/drivers/lightnvm/pblk-sysfs.c b/drivers/lightnvm/pblk-sysfs.c index 88a0a7c407aa..9fc3dfa168b4 100644 --- a/drivers/lightnvm/pblk-sysfs.c +++ b/drivers/lightnvm/pblk-sysfs.c @@ -268,7 +268,7 @@ static ssize_t pblk_sysfs_lines(struct pblk *pblk, char *page) spin_unlock(&l_mg->free_lock); if (nr_free_lines != free_line_cnt) - pr_err("pblk: corrupted free line list:%d/%d\n", + pblk_err(pblk, "corrupted free line list:%d/%d\n", nr_free_lines, free_line_cnt); sz = snprintf(page, PAGE_SIZE - sz, @@ -421,7 +421,7 @@ static ssize_t pblk_sysfs_get_padding_dist(struct pblk *pblk, char *page) return sz; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG static ssize_t pblk_sysfs_stats_debug(struct pblk *pblk, char *page) { return snprintf(page, PAGE_SIZE, @@ -598,7 +598,7 @@ static struct attribute sys_padding_dist = { .mode = 0644, }; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG static struct attribute sys_stats_debug_attr = { .name = "stats", .mode = 0444, @@ -619,7 +619,7 @@ static struct attribute *pblk_attrs[] = { &sys_write_amp_mileage, &sys_write_amp_trip, &sys_padding_dist, -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG &sys_stats_debug_attr, #endif NULL, @@ -654,7 +654,7 @@ static ssize_t pblk_sysfs_show(struct kobject *kobj, struct attribute *attr, return pblk_sysfs_get_write_amp_trip(pblk, buf); else if (strcmp(attr->name, "padding_dist") == 0) return pblk_sysfs_get_padding_dist(pblk, buf); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG else if (strcmp(attr->name, "stats") == 0) return pblk_sysfs_stats_debug(pblk, buf); #endif @@ -697,8 +697,7 @@ int pblk_sysfs_init(struct gendisk *tdisk) kobject_get(&parent_dev->kobj), "%s", "pblk"); if (ret) { - pr_err("pblk: could not register %s/pblk\n", - tdisk->disk_name); + pblk_err(pblk, "could not register\n"); return ret; } diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index f353e52941f5..ee774a86cf1e 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -38,7 +38,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, /* Release flags on context. Protect from writes */ smp_store_release(&w_ctx->flags, flags); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_dec(&rwb->inflight_flush_point); #endif } @@ -51,7 +51,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid, c_ctx->nr_padded); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(rqd->nr_ppas, &pblk->sync_writes); #endif @@ -78,7 +78,7 @@ static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd, unsigned long flags; unsigned long pos; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes); #endif @@ -196,7 +196,7 @@ static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx) list_add_tail(&r_ctx->list, &pblk->resubmit_list); spin_unlock(&pblk->resubmit_lock); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes); #endif } @@ -238,7 +238,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd) recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC); if (!recovery) { - pr_err("pblk: could not allocate recovery work\n"); + pblk_err(pblk, "could not allocate recovery work\n"); return; } @@ -258,7 +258,7 @@ static void pblk_end_io_write(struct nvm_rq *rqd) pblk_end_w_fail(pblk, rqd); return; } -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG else WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n"); #endif @@ -279,7 +279,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) if (rqd->error) { pblk_log_write_err(pblk, rqd); - pr_err("pblk: metadata I/O failed. Line %d\n", line->id); + pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id); line->w_err_gc->has_write_err = 1; } @@ -356,11 +356,11 @@ static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail, secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush); -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG if ((!secs_to_sync && secs_to_flush) || (secs_to_sync < 0) || (secs_to_sync > secs_avail && !secs_to_flush)) { - pr_err("pblk: bad sector calculation (a:%d,s:%d,f:%d)\n", + pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n", secs_avail, secs_to_sync, secs_to_flush); } #endif @@ -397,7 +397,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, l_mg->emeta_alloc_type, GFP_KERNEL); if (IS_ERR(bio)) { - pr_err("pblk: failed to map emeta io"); + pblk_err(pblk, "failed to map emeta io"); ret = PTR_ERR(bio); goto fail_free_rqd; } @@ -428,7 +428,7 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) ret = pblk_submit_io(pblk, rqd); if (ret) { - pr_err("pblk: emeta I/O submission failed: %d\n", ret); + pblk_err(pblk, "emeta I/O submission failed: %d\n", ret); goto fail_rollback; } @@ -518,7 +518,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) /* Assign lbas to ppas and populate request structure */ err = pblk_setup_w_rq(pblk, rqd, &erase_ppa); if (err) { - pr_err("pblk: could not setup write request: %d\n", err); + pblk_err(pblk, "could not setup write request: %d\n", err); return NVM_IO_ERR; } @@ -527,7 +527,7 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) /* Submit data write for current data line */ err = pblk_submit_io(pblk, rqd); if (err) { - pr_err("pblk: data I/O submission failed: %d\n", err); + pblk_err(pblk, "data I/O submission failed: %d\n", err); return NVM_IO_ERR; } @@ -549,7 +549,8 @@ static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd) /* Submit metadata write for previous data line */ err = pblk_submit_meta_io(pblk, meta_line); if (err) { - pr_err("pblk: metadata I/O submission failed: %d", err); + pblk_err(pblk, "metadata I/O submission failed: %d", + err); return NVM_IO_ERR; } } @@ -614,7 +615,7 @@ static int pblk_submit_write(struct pblk *pblk) secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail, secs_to_flush); if (secs_to_sync > pblk->max_write_pgs) { - pr_err("pblk: bad buffer sync calculation\n"); + pblk_err(pblk, "bad buffer sync calculation\n"); return 1; } @@ -633,14 +634,14 @@ static int pblk_submit_write(struct pblk *pblk) if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync, secs_avail)) { - pr_err("pblk: corrupted write bio\n"); + pblk_err(pblk, "corrupted write bio\n"); goto fail_put_bio; } if (pblk_submit_io_set(pblk, rqd)) goto fail_free_bio; -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_long_add(secs_to_sync, &pblk->sub_writes); #endif diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 34cc1d64a9d4..4760af7b6499 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -119,6 +119,16 @@ struct pblk_g_ctx { u64 lba; }; +/* partial read context */ +struct pblk_pr_ctx { + struct bio *orig_bio; + DECLARE_BITMAP(bitmap, NVM_MAX_VLBA); + unsigned int orig_nr_secs; + unsigned int bio_init_idx; + void *ppa_ptr; + dma_addr_t dma_ppa_list; +}; + /* Pad context */ struct pblk_pad_rq { struct pblk *pblk; @@ -193,7 +203,7 @@ struct pblk_rb { spinlock_t w_lock; /* Write lock */ spinlock_t s_lock; /* Sync lock */ -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG atomic_t inflight_flush_point; /* Not served REQ_FLUSH | REQ_FUA */ #endif }; @@ -608,9 +618,6 @@ struct pblk { int min_write_pgs; /* Minimum amount of pages required by controller */ int max_write_pgs; /* Maximum amount of pages supported by controller */ - int pgs_in_buffer; /* Number of pages that need to be held in buffer to - * guarantee successful reads. - */ sector_t capacity; /* Device capacity when bad blocks are subtracted */ @@ -639,7 +646,7 @@ struct pblk { u64 nr_flush_rst; /* Flushes reset value for pad dist.*/ atomic64_t nr_flush; /* Number of flush/fua I/O */ -#ifdef CONFIG_NVM_DEBUG +#ifdef CONFIG_NVM_PBLK_DEBUG /* Non-persistent debug counters, 4kb sector I/Os */ atomic_long_t inflight_writes; /* Inflight writes (user and gc) */ atomic_long_t padded_writes; /* Sectors padded due to flush/fua */ @@ -706,6 +713,15 @@ struct pblk_line_ws { #define pblk_g_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_g_ctx)) #define pblk_w_rq_size (sizeof(struct nvm_rq) + sizeof(struct pblk_c_ctx)) +#define pblk_err(pblk, fmt, ...) \ + pr_err("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__) +#define pblk_info(pblk, fmt, ...) \ + pr_info("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__) +#define pblk_warn(pblk, fmt, ...) \ + pr_warn("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__) +#define pblk_debug(pblk, fmt, ...) \ + pr_debug("pblk %s: " fmt, pblk->disk->disk_name, ##__VA_ARGS__) + /* * pblk ring buffer operations */ @@ -1282,20 +1298,22 @@ static inline int pblk_io_aligned(struct pblk *pblk, int nr_secs) return !(nr_secs % pblk->min_write_pgs); } -#ifdef CONFIG_NVM_DEBUG -static inline void print_ppa(struct nvm_geo *geo, struct ppa_addr *p, +#ifdef CONFIG_NVM_PBLK_DEBUG +static inline void print_ppa(struct pblk *pblk, struct ppa_addr *p, char *msg, int error) { + struct nvm_geo *geo = &pblk->dev->geo; + if (p->c.is_cached) { - pr_err("ppa: (%s: %x) cache line: %llu\n", + pblk_err(pblk, "ppa: (%s: %x) cache line: %llu\n", msg, error, (u64)p->c.line); } else if (geo->version == NVM_OCSSD_SPEC_12) { - pr_err("ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n", + pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,blk:%d,pg:%d,pl:%d,sec:%d\n", msg, error, p->g.ch, p->g.lun, p->g.blk, p->g.pg, p->g.pl, p->g.sec); } else { - pr_err("ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n", + pblk_err(pblk, "ppa: (%s: %x):ch:%d,lun:%d,chk:%d,sec:%d\n", msg, error, p->m.grp, p->m.pu, p->m.chk, p->m.sec); } @@ -1307,16 +1325,16 @@ static inline void pblk_print_failed_rqd(struct pblk *pblk, struct nvm_rq *rqd, int bit = -1; if (rqd->nr_ppas == 1) { - print_ppa(&pblk->dev->geo, &rqd->ppa_addr, "rqd", error); + print_ppa(pblk, &rqd->ppa_addr, "rqd", error); return; } while ((bit = find_next_bit((void *)&rqd->ppa_status, rqd->nr_ppas, bit + 1)) < rqd->nr_ppas) { - print_ppa(&pblk->dev->geo, &rqd->ppa_list[bit], "rqd", error); + print_ppa(pblk, &rqd->ppa_list[bit], "rqd", error); } - pr_err("error:%d, ppa_status:%llx\n", error, rqd->ppa_status); + pblk_err(pblk, "error:%d, ppa_status:%llx\n", error, rqd->ppa_status); } static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, @@ -1347,7 +1365,7 @@ static inline int pblk_boundary_ppa_checks(struct nvm_tgt_dev *tgt_dev, continue; } - print_ppa(geo, ppa, "boundary", i); + print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i); return 1; } @@ -1377,7 +1395,7 @@ static inline int pblk_check_io(struct pblk *pblk, struct nvm_rq *rqd) spin_lock(&line->lock); if (line->state != PBLK_LINESTATE_OPEN) { - pr_err("pblk: bad ppa: line:%d,state:%d\n", + pblk_err(pblk, "bad ppa: line:%d,state:%d\n", line->id, line->state); WARN_ON(1); spin_unlock(&line->lock); diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index d6bf294f3907..05f82ff6f016 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -328,13 +328,6 @@ struct cached_dev { */ atomic_t has_dirty; - /* - * Set to zero by things that touch the backing volume-- except - * writeback. Incremented by writeback. Used to determine when to - * accelerate idle writeback. - */ - atomic_t backing_idle; - struct bch_ratelimit writeback_rate; struct delayed_work writeback_rate_update; @@ -423,9 +416,9 @@ struct cache { /* * When allocating new buckets, prio_write() gets first dibs - since we * may not be allocate at all without writing priorities and gens. - * prio_buckets[] contains the last buckets we wrote priorities to (so - * gc can mark them as metadata), prio_next[] contains the buckets - * allocated for the next prio write. + * prio_last_buckets[] contains the last buckets we wrote priorities to + * (so gc can mark them as metadata), prio_buckets[] contains the + * buckets allocated for the next prio write. */ uint64_t *prio_buckets; uint64_t *prio_last_buckets; @@ -474,6 +467,7 @@ struct cache { struct gc_stat { size_t nodes; + size_t nodes_pre; size_t key_bytes; size_t nkeys; @@ -514,6 +508,8 @@ struct cache_set { struct cache_accounting accounting; unsigned long flags; + atomic_t idle_counter; + atomic_t at_max_writeback_rate; struct cache_sb sb; @@ -523,8 +519,10 @@ struct cache_set { struct bcache_device **devices; unsigned devices_max_used; + atomic_t attached_dev_nr; struct list_head cached_devs; uint64_t cached_dev_sectors; + atomic_long_t flash_dev_dirty_sectors; struct closure caching; struct closure sb_write; @@ -603,6 +601,10 @@ struct cache_set { */ atomic_t rescale; /* + * used for GC, identify if any front side I/Os is inflight + */ + atomic_t search_inflight; + /* * When we invalidate buckets, we use both the priority and the amount * of good data to determine which buckets to reuse first - to weight * those together consistently we keep track of the smallest nonzero @@ -995,7 +997,7 @@ void bch_open_buckets_free(struct cache_set *); int bch_cache_allocator_start(struct cache *ca); void bch_debug_exit(void); -int bch_debug_init(struct kobject *); +void bch_debug_init(struct kobject *kobj); void bch_request_exit(void); int bch_request_init(void); diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index f3403b45bc28..596c93b44e9b 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -366,6 +366,10 @@ EXPORT_SYMBOL(bch_btree_keys_init); /* Binary tree stuff for auxiliary search trees */ +/* + * return array index next to j when does in-order traverse + * of a binary tree which is stored in a linear array + */ static unsigned inorder_next(unsigned j, unsigned size) { if (j * 2 + 1 < size) { @@ -379,6 +383,10 @@ static unsigned inorder_next(unsigned j, unsigned size) return j; } +/* + * return array index previous to j when does in-order traverse + * of a binary tree which is stored in a linear array + */ static unsigned inorder_prev(unsigned j, unsigned size) { if (j * 2 < size) { @@ -421,6 +429,10 @@ static unsigned __to_inorder(unsigned j, unsigned size, unsigned extra) return j; } +/* + * Return the cacheline index in bset_tree->data, where j is index + * from a linear array which stores the auxiliar binary tree + */ static unsigned to_inorder(unsigned j, struct bset_tree *t) { return __to_inorder(j, t->size, t->extra); @@ -441,6 +453,10 @@ static unsigned __inorder_to_tree(unsigned j, unsigned size, unsigned extra) return j; } +/* + * Return an index from a linear array which stores the auxiliar binary + * tree, j is the cacheline index of t->data. + */ static unsigned inorder_to_tree(unsigned j, struct bset_tree *t) { return __inorder_to_tree(j, t->size, t->extra); @@ -546,6 +562,20 @@ static inline uint64_t shrd128(uint64_t high, uint64_t low, uint8_t shift) return low; } +/* + * Calculate mantissa value for struct bkey_float. + * If most significant bit of f->exponent is not set, then + * - f->exponent >> 6 is 0 + * - p[0] points to bkey->low + * - p[-1] borrows bits from KEY_INODE() of bkey->high + * if most isgnificant bits of f->exponent is set, then + * - f->exponent >> 6 is 1 + * - p[0] points to bits from KEY_INODE() of bkey->high + * - p[-1] points to other bits from KEY_INODE() of + * bkey->high too. + * See make_bfloat() to check when most significant bit of f->exponent + * is set or not. + */ static inline unsigned bfloat_mantissa(const struct bkey *k, struct bkey_float *f) { @@ -570,6 +600,16 @@ static void make_bfloat(struct bset_tree *t, unsigned j) BUG_ON(m < l || m > r); BUG_ON(bkey_next(p) != m); + /* + * If l and r have different KEY_INODE values (different backing + * device), f->exponent records how many least significant bits + * are different in KEY_INODE values and sets most significant + * bits to 1 (by +64). + * If l and r have same KEY_INODE value, f->exponent records + * how many different bits in least significant bits of bkey->low. + * See bfloat_mantiss() how the most significant bit of + * f->exponent is used to calculate bfloat mantissa value. + */ if (KEY_INODE(l) != KEY_INODE(r)) f->exponent = fls64(KEY_INODE(r) ^ KEY_INODE(l)) + 64; else @@ -633,6 +673,15 @@ void bch_bset_init_next(struct btree_keys *b, struct bset *i, uint64_t magic) } EXPORT_SYMBOL(bch_bset_init_next); +/* + * Build auxiliary binary tree 'struct bset_tree *t', this tree is used to + * accelerate bkey search in a btree node (pointed by bset_tree->data in + * memory). After search in the auxiliar tree by calling bset_search_tree(), + * a struct bset_search_iter is returned which indicates range [l, r] from + * bset_tree->data where the searching bkey might be inside. Then a followed + * linear comparison does the exact search, see __bch_bset_search() for how + * the auxiliary tree is used. + */ void bch_bset_build_written_tree(struct btree_keys *b) { struct bset_tree *t = bset_tree_last(b); @@ -898,6 +947,17 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, unsigned inorder, j, n = 1; do { + /* + * A bit trick here. + * If p < t->size, (int)(p - t->size) is a minus value and + * the most significant bit is set, right shifting 31 bits + * gets 1. If p >= t->size, the most significant bit is + * not set, right shifting 31 bits gets 0. + * So the following 2 lines equals to + * if (p >= t->size) + * p = 0; + * but a branch instruction is avoided. + */ unsigned p = n << 4; p &= ((int) (p - t->size)) >> 31; @@ -907,6 +967,9 @@ static struct bset_search_iter bset_search_tree(struct bset_tree *t, f = &t->tree[j]; /* + * Similar bit trick, use subtract operation to avoid a branch + * instruction. + * * n = (f->mantissa > bfloat_mantissa()) * ? j * 2 * : j * 2 + 1; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 547c9eedc2f4..c19f7716df88 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -90,6 +90,9 @@ #define MAX_NEED_GC 64 #define MAX_SAVE_PRIO 72 +#define MAX_GC_TIMES 100 +#define MIN_GC_NODES 100 +#define GC_SLEEP_MS 100 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36)) @@ -1008,6 +1011,13 @@ retry: BUG_ON(b->level != level); } + if (btree_node_io_error(b)) { + rw_unlock(write, b); + return ERR_PTR(-EIO); + } + + BUG_ON(!b->written); + b->parent = parent; b->accessed = 1; @@ -1019,13 +1029,6 @@ retry: for (; i <= b->keys.nsets; i++) prefetch(b->keys.set[i].data); - if (btree_node_io_error(b)) { - rw_unlock(write, b); - return ERR_PTR(-EIO); - } - - BUG_ON(!b->written); - return b; } @@ -1520,6 +1523,32 @@ static unsigned btree_gc_count_keys(struct btree *b) return ret; } +static size_t btree_gc_min_nodes(struct cache_set *c) +{ + size_t min_nodes; + + /* + * Since incremental GC would stop 100ms when front + * side I/O comes, so when there are many btree nodes, + * if GC only processes constant (100) nodes each time, + * GC would last a long time, and the front side I/Os + * would run out of the buckets (since no new bucket + * can be allocated during GC), and be blocked again. + * So GC should not process constant nodes, but varied + * nodes according to the number of btree nodes, which + * realized by dividing GC into constant(100) times, + * so when there are many btree nodes, GC can process + * more nodes each time, otherwise, GC will process less + * nodes each time (but no less than MIN_GC_NODES) + */ + min_nodes = c->gc_stats.nodes / MAX_GC_TIMES; + if (min_nodes < MIN_GC_NODES) + min_nodes = MIN_GC_NODES; + + return min_nodes; +} + + static int btree_gc_recurse(struct btree *b, struct btree_op *op, struct closure *writes, struct gc_stat *gc) { @@ -1585,6 +1614,13 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op, memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1)); r->b = NULL; + if (atomic_read(&b->c->search_inflight) && + gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) { + gc->nodes_pre = gc->nodes; + ret = -EAGAIN; + break; + } + if (need_resched()) { ret = -EAGAIN; break; @@ -1753,7 +1789,10 @@ static void bch_btree_gc(struct cache_set *c) closure_sync(&writes); cond_resched(); - if (ret && ret != -EAGAIN) + if (ret == -EAGAIN) + schedule_timeout_interruptible(msecs_to_jiffies + (GC_SLEEP_MS)); + else if (ret) pr_warn("gc failed!"); } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); @@ -1834,8 +1873,14 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op) do { k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad); - if (k) + if (k) { btree_node_prefetch(b, k); + /* + * initiallize c->gc_stats.nodes + * for incremental GC + */ + b->c->gc_stats.nodes++; + } if (p) ret = btree(check_recurse, p, b, op); diff --git a/drivers/md/bcache/btree.h b/drivers/md/bcache/btree.h index d211e2c25b6b..68e9d926134d 100644 --- a/drivers/md/bcache/btree.h +++ b/drivers/md/bcache/btree.h @@ -152,7 +152,7 @@ static inline bool btree_node_ ## flag(struct btree *b) \ { return test_bit(BTREE_NODE_ ## flag, &b->flags); } \ \ static inline void set_btree_node_ ## flag(struct btree *b) \ -{ set_bit(BTREE_NODE_ ## flag, &b->flags); } \ +{ set_bit(BTREE_NODE_ ## flag, &b->flags); } enum btree_flags { BTREE_NODE_io_error, diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 0e14969182c6..618253683d40 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -199,11 +199,16 @@ static const struct file_operations debug_ops = { .release = single_release }; -int __init closure_debug_init(void) +void __init closure_debug_init(void) { - closure_debug = debugfs_create_file("closures", - 0400, bcache_debug, NULL, &debug_ops); - return IS_ERR_OR_NULL(closure_debug); + if (!IS_ERR_OR_NULL(bcache_debug)) + /* + * it is unnecessary to check return value of + * debugfs_create_file(), we should not care + * about this. + */ + closure_debug = debugfs_create_file( + "closures", 0400, bcache_debug, NULL, &debug_ops); } #endif diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 71427eb5fdae..7c2c5bc7c88b 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -186,13 +186,13 @@ static inline void closure_sync(struct closure *cl) #ifdef CONFIG_BCACHE_CLOSURES_DEBUG -int closure_debug_init(void); +void closure_debug_init(void); void closure_debug_create(struct closure *cl); void closure_debug_destroy(struct closure *cl); #else -static inline int closure_debug_init(void) { return 0; } +static inline void closure_debug_init(void) {} static inline void closure_debug_create(struct closure *cl) {} static inline void closure_debug_destroy(struct closure *cl) {} diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index d030ce3025a6..12034c07257b 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -110,11 +110,15 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) struct bio_vec bv, cbv; struct bvec_iter iter, citer = { 0 }; - check = bio_clone_kmalloc(bio, GFP_NOIO); + check = bio_kmalloc(GFP_NOIO, bio_segments(bio)); if (!check) return; + check->bi_disk = bio->bi_disk; check->bi_opf = REQ_OP_READ; + check->bi_iter.bi_sector = bio->bi_iter.bi_sector; + check->bi_iter.bi_size = bio->bi_iter.bi_size; + bch_bio_map(check, NULL); if (bch_bio_alloc_pages(check, GFP_NOIO)) goto out_put; @@ -248,11 +252,12 @@ void bch_debug_exit(void) debugfs_remove_recursive(bcache_debug); } -int __init bch_debug_init(struct kobject *kobj) +void __init bch_debug_init(struct kobject *kobj) { - if (!IS_ENABLED(CONFIG_DEBUG_FS)) - return 0; - + /* + * it is unnecessary to check return value of + * debugfs_create_file(), we should not care + * about this. + */ bcache_debug = debugfs_create_dir("bcache", NULL); - return IS_ERR_OR_NULL(bcache_debug); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 18f1b5239620..10748c626a1d 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -828,6 +828,7 @@ void bch_journal_free(struct cache_set *c) free_pages((unsigned long) c->journal.w[1].data, JSET_BITS); free_pages((unsigned long) c->journal.w[0].data, JSET_BITS); free_fifo(&c->journal.pin); + free_heap(&c->flush_btree); } int bch_journal_alloc(struct cache_set *c) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index ae67f5fa8047..7dbe8b6316a0 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -107,7 +107,7 @@ static int bch_keylist_realloc(struct keylist *l, unsigned u64s, /* * The journalling code doesn't handle the case where the keys to insert * is bigger than an empty write: If we just return -ENOMEM here, - * bio_insert() and bio_invalidate() will insert the keys created so far + * bch_data_insert_keys() will insert the keys created so far * and finish the rest when the keylist is empty. */ if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) @@ -667,8 +667,7 @@ static void backing_request_endio(struct bio *bio) static void bio_complete(struct search *s) { if (s->orig_bio) { - generic_end_io_acct(s->d->disk->queue, - bio_data_dir(s->orig_bio), + generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio), &s->d->disk->part0, s->start_time); trace_bcache_request_end(s->d, s->orig_bio); @@ -702,6 +701,8 @@ static void search_free(struct closure *cl) { struct search *s = container_of(cl, struct search, cl); + atomic_dec(&s->d->c->search_inflight); + if (s->iop.bio) bio_put(s->iop.bio); @@ -719,6 +720,7 @@ static inline struct search *search_alloc(struct bio *bio, closure_init(&s->cl, NULL); do_bio_hook(s, bio, request_endio); + atomic_inc(&d->c->search_inflight); s->orig_bio = bio; s->cache_miss = NULL; @@ -1062,8 +1064,7 @@ static void detached_dev_end_io(struct bio *bio) bio->bi_end_io = ddip->bi_end_io; bio->bi_private = ddip->bi_private; - generic_end_io_acct(ddip->d->disk->queue, - bio_data_dir(bio), + generic_end_io_acct(ddip->d->disk->queue, bio_op(bio), &ddip->d->disk->part0, ddip->start_time); if (bio->bi_status) { @@ -1102,6 +1103,44 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) generic_make_request(bio); } +static void quit_max_writeback_rate(struct cache_set *c, + struct cached_dev *this_dc) +{ + int i; + struct bcache_device *d; + struct cached_dev *dc; + + /* + * mutex bch_register_lock may compete with other parallel requesters, + * or attach/detach operations on other backing device. Waiting to + * the mutex lock may increase I/O request latency for seconds or more. + * To avoid such situation, if mutext_trylock() failed, only writeback + * rate of current cached device is set to 1, and __update_write_back() + * will decide writeback rate of other cached devices (remember now + * c->idle_counter is 0 already). + */ + if (mutex_trylock(&bch_register_lock)) { + for (i = 0; i < c->devices_max_used; i++) { + if (!c->devices[i]) + continue; + + if (UUID_FLASH_ONLY(&c->uuids[i])) + continue; + + d = c->devices[i]; + dc = container_of(d, struct cached_dev, disk); + /* + * set writeback rate to default minimum value, + * then let update_writeback_rate() to decide the + * upcoming rate. + */ + atomic_long_set(&dc->writeback_rate.rate, 1); + } + mutex_unlock(&bch_register_lock); + } else + atomic_long_set(&this_dc->writeback_rate.rate, 1); +} + /* Cached devices - read & write stuff */ static blk_qc_t cached_dev_make_request(struct request_queue *q, @@ -1119,8 +1158,25 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, return BLK_QC_T_NONE; } - atomic_set(&dc->backing_idle, 0); - generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); + if (likely(d->c)) { + if (atomic_read(&d->c->idle_counter)) + atomic_set(&d->c->idle_counter, 0); + /* + * If at_max_writeback_rate of cache set is true and new I/O + * comes, quit max writeback rate of all cached devices + * attached to this cache set, and set at_max_writeback_rate + * to false. + */ + if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) { + atomic_set(&d->c->at_max_writeback_rate, 0); + quit_max_writeback_rate(d->c, dc); + } + } + + generic_start_io_acct(q, + bio_op(bio), + bio_sectors(bio), + &d->disk->part0); bio_set_dev(bio, dc->bdev); bio->bi_iter.bi_sector += dc->sb.data_offset; @@ -1229,7 +1285,6 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, struct search *s; struct closure *cl; struct bcache_device *d = bio->bi_disk->private_data; - int rw = bio_data_dir(bio); if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { bio->bi_status = BLK_STS_IOERR; @@ -1237,7 +1292,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, return BLK_QC_T_NONE; } - generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); + generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); s = search_alloc(bio, d); cl = &s->cl; @@ -1254,7 +1309,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, flash_dev_nodata, bcache_wq); return BLK_QC_T_NONE; - } else if (rw) { + } else if (bio_data_dir(bio)) { bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &KEY(d->id, bio->bi_iter.bi_sector, 0), &KEY(d->id, bio_end_sector(bio), 0)); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index fa4058e43202..55a37641aa95 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -181,7 +181,7 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev, goto err; } - sb->last_mount = get_seconds(); + sb->last_mount = (u32)ktime_get_real_seconds(); err = NULL; get_page(bh->b_page); @@ -696,12 +696,14 @@ static void bcache_device_detach(struct bcache_device *d) { lockdep_assert_held(&bch_register_lock); + atomic_dec(&d->c->attached_dev_nr); + if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) { struct uuid_entry *u = d->c->uuids + d->id; SET_UUID_FLASH_ONLY(u, 0); memcpy(u->uuid, invalid_uuid, 16); - u->invalidated = cpu_to_le32(get_seconds()); + u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); bch_uuid_write(d->c); } @@ -796,11 +798,12 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, return idx; if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio), - BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) || - !(d->disk = alloc_disk(BCACHE_MINORS))) { - ida_simple_remove(&bcache_device_idx, idx); - return -ENOMEM; - } + BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) + goto err; + + d->disk = alloc_disk(BCACHE_MINORS); + if (!d->disk) + goto err; set_capacity(d->disk, sectors); snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx); @@ -834,6 +837,11 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, blk_queue_write_cache(q, true, true); return 0; + +err: + ida_simple_remove(&bcache_device_idx, idx); + return -ENOMEM; + } /* Cached device */ @@ -1027,7 +1035,7 @@ void bch_cached_dev_detach(struct cached_dev *dc) int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, uint8_t *set_uuid) { - uint32_t rtime = cpu_to_le32(get_seconds()); + uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds()); struct uuid_entry *u; struct cached_dev *exist_dc, *t; @@ -1070,7 +1078,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE || BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) { memcpy(u->uuid, invalid_uuid, 16); - u->invalidated = cpu_to_le32(get_seconds()); + u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds()); u = NULL; } @@ -1138,6 +1146,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, bch_cached_dev_run(dc); bcache_device_link(&dc->disk, c, "bdev"); + atomic_inc(&c->attached_dev_nr); /* Allow the writeback thread to proceed */ up_write(&dc->writeback_lock); @@ -1285,6 +1294,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page, pr_info("registered backing device %s", dc->backing_dev_name); list_add(&dc->list, &uncached_devices); + /* attach to a matched cache set if it exists */ list_for_each_entry(c, &bch_cache_sets, list) bch_cached_dev_attach(dc, c, NULL); @@ -1311,6 +1321,8 @@ static void flash_dev_free(struct closure *cl) { struct bcache_device *d = container_of(cl, struct bcache_device, cl); mutex_lock(&bch_register_lock); + atomic_long_sub(bcache_dev_sectors_dirty(d), + &d->c->flash_dev_dirty_sectors); bcache_device_free(d); mutex_unlock(&bch_register_lock); kobject_put(&d->kobj); @@ -1390,7 +1402,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) get_random_bytes(u->uuid, 16); memset(u->label, 0, 32); - u->first_reg = u->last_reg = cpu_to_le32(get_seconds()); + u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds()); SET_UUID_FLASH_ONLY(u, 1); u->sectors = size >> 9; @@ -1687,6 +1699,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->block_bits = ilog2(sb->block_size); c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); c->devices_max_used = 0; + atomic_set(&c->attached_dev_nr, 0); c->btree_pages = bucket_pages(c); if (c->btree_pages > BTREE_MAX_PAGES) c->btree_pages = max_t(int, c->btree_pages / 4, @@ -1894,7 +1907,7 @@ static void run_cache_set(struct cache_set *c) goto err; closure_sync(&cl); - c->sb.last_mount = get_seconds(); + c->sb.last_mount = (u32)ktime_get_real_seconds(); bcache_write_super(c); list_for_each_entry_safe(dc, t, &uncached_devices, list) @@ -2163,8 +2176,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, if (!try_module_get(THIS_MODULE)) return -EBUSY; - if (!(path = kstrndup(buffer, size, GFP_KERNEL)) || - !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) + path = kstrndup(buffer, size, GFP_KERNEL); + if (!path) + goto err; + + sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL); + if (!sb) goto err; err = "failed to open device"; @@ -2324,13 +2341,21 @@ static int __init bcache_init(void) return bcache_major; } - if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || - !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || - bch_request_init() || - bch_debug_init(bcache_kobj) || closure_debug_init() || + bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); + if (!bcache_wq) + goto err; + + bcache_kobj = kobject_create_and_add("bcache", fs_kobj); + if (!bcache_kobj) + goto err; + + if (bch_request_init() || sysfs_create_files(bcache_kobj, files)) goto err; + bch_debug_init(bcache_kobj); + closure_debug_init(); + return 0; err: bcache_exit(); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 225b15aa0340..81d3520b0702 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -149,6 +149,7 @@ SHOW(__bch_cached_dev) struct cached_dev *dc = container_of(kobj, struct cached_dev, disk.kobj); const char *states[] = { "no cache", "clean", "dirty", "inconsistent" }; + int wb = dc->writeback_running; #define var(stat) (dc->stat) @@ -170,7 +171,8 @@ SHOW(__bch_cached_dev) var_printf(writeback_running, "%i"); var_print(writeback_delay); var_print(writeback_percent); - sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); + sysfs_hprint(writeback_rate, + wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0); sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); sysfs_printf(io_error_limit, "%i", dc->error_limit); sysfs_printf(io_disable, "%i", dc->io_disable); @@ -188,15 +190,22 @@ SHOW(__bch_cached_dev) char change[20]; s64 next_io; - bch_hprint(rate, dc->writeback_rate.rate << 9); - bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); - bch_hprint(target, dc->writeback_rate_target << 9); - bch_hprint(proportional,dc->writeback_rate_proportional << 9); - bch_hprint(integral, dc->writeback_rate_integral_scaled << 9); - bch_hprint(change, dc->writeback_rate_change << 9); - - next_io = div64_s64(dc->writeback_rate.next - local_clock(), - NSEC_PER_MSEC); + /* + * Except for dirty and target, other values should + * be 0 if writeback is not running. + */ + bch_hprint(rate, + wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 + : 0); + bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9); + bch_hprint(target, dc->writeback_rate_target << 9); + bch_hprint(proportional, + wb ? dc->writeback_rate_proportional << 9 : 0); + bch_hprint(integral, + wb ? dc->writeback_rate_integral_scaled << 9 : 0); + bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0); + next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(), + NSEC_PER_MSEC) : 0; return sprintf(buf, "rate:\t\t%s/sec\n" @@ -255,8 +264,19 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40); - sysfs_strtoul_clamp(writeback_rate, - dc->writeback_rate.rate, 1, INT_MAX); + if (attr == &sysfs_writeback_rate) { + ssize_t ret; + long int v = atomic_long_read(&dc->writeback_rate.rate); + + ret = strtoul_safe_clamp(buf, v, 1, INT_MAX); + + if (!ret) { + atomic_long_set(&dc->writeback_rate.rate, v); + ret = size; + } + + return ret; + } sysfs_strtoul_clamp(writeback_rate_update_seconds, dc->writeback_rate_update_seconds, @@ -338,8 +358,8 @@ STORE(__cached_dev) if (!v) return size; } - - pr_err("Can't attach %s: cache set not found", buf); + if (v == -ENOENT) + pr_err("Can't attach %s: cache set not found", buf); return v; } diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index fc479b026d6d..b15256bcf0e7 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -200,7 +200,7 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { uint64_t now = local_clock(); - d->next += div_u64(done * NSEC_PER_SEC, d->rate); + d->next += div_u64(done * NSEC_PER_SEC, atomic_long_read(&d->rate)); /* Bound the time. Don't let us fall further than 2 seconds behind * (this prevents unnecessary backlog that would make it impossible diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index cced87f8eb27..f7b0133c9d2f 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -442,7 +442,7 @@ struct bch_ratelimit { * Rate at which we want to do work, in units per second * The units here correspond to the units passed to bch_next_delay() */ - uint32_t rate; + atomic_long_t rate; }; static inline void bch_ratelimit_reset(struct bch_ratelimit *d) diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index ad45ebe1a74b..481d4cf38ac0 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -27,7 +27,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc) * flash-only devices */ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size - - bcache_flash_devs_sectors_dirty(c); + atomic_long_read(&c->flash_dev_dirty_sectors); /* * Unfortunately there is no control of global dirty data. If the @@ -104,11 +104,56 @@ static void __update_writeback_rate(struct cached_dev *dc) dc->writeback_rate_proportional = proportional_scaled; dc->writeback_rate_integral_scaled = integral_scaled; - dc->writeback_rate_change = new_rate - dc->writeback_rate.rate; - dc->writeback_rate.rate = new_rate; + dc->writeback_rate_change = new_rate - + atomic_long_read(&dc->writeback_rate.rate); + atomic_long_set(&dc->writeback_rate.rate, new_rate); dc->writeback_rate_target = target; } +static bool set_at_max_writeback_rate(struct cache_set *c, + struct cached_dev *dc) +{ + /* + * Idle_counter is increased everytime when update_writeback_rate() is + * called. If all backing devices attached to the same cache set have + * identical dc->writeback_rate_update_seconds values, it is about 6 + * rounds of update_writeback_rate() on each backing device before + * c->at_max_writeback_rate is set to 1, and then max wrteback rate set + * to each dc->writeback_rate.rate. + * In order to avoid extra locking cost for counting exact dirty cached + * devices number, c->attached_dev_nr is used to calculate the idle + * throushold. It might be bigger if not all cached device are in write- + * back mode, but it still works well with limited extra rounds of + * update_writeback_rate(). + */ + if (atomic_inc_return(&c->idle_counter) < + atomic_read(&c->attached_dev_nr) * 6) + return false; + + if (atomic_read(&c->at_max_writeback_rate) != 1) + atomic_set(&c->at_max_writeback_rate, 1); + + atomic_long_set(&dc->writeback_rate.rate, INT_MAX); + + /* keep writeback_rate_target as existing value */ + dc->writeback_rate_proportional = 0; + dc->writeback_rate_integral_scaled = 0; + dc->writeback_rate_change = 0; + + /* + * Check c->idle_counter and c->at_max_writeback_rate agagain in case + * new I/O arrives during before set_at_max_writeback_rate() returns. + * Then the writeback rate is set to 1, and its new value should be + * decided via __update_writeback_rate(). + */ + if ((atomic_read(&c->idle_counter) < + atomic_read(&c->attached_dev_nr) * 6) || + !atomic_read(&c->at_max_writeback_rate)) + return false; + + return true; +} + static void update_writeback_rate(struct work_struct *work) { struct cached_dev *dc = container_of(to_delayed_work(work), @@ -136,13 +181,20 @@ static void update_writeback_rate(struct work_struct *work) return; } - down_read(&dc->writeback_lock); - - if (atomic_read(&dc->has_dirty) && - dc->writeback_percent) - __update_writeback_rate(dc); + if (atomic_read(&dc->has_dirty) && dc->writeback_percent) { + /* + * If the whole cache set is idle, set_at_max_writeback_rate() + * will set writeback rate to a max number. Then it is + * unncessary to update writeback rate for an idle cache set + * in maximum writeback rate number(s). + */ + if (!set_at_max_writeback_rate(c, dc)) { + down_read(&dc->writeback_lock); + __update_writeback_rate(dc); + up_read(&dc->writeback_lock); + } + } - up_read(&dc->writeback_lock); /* * CACHE_SET_IO_DISABLE might be set via sysfs interface, @@ -422,27 +474,6 @@ static void read_dirty(struct cached_dev *dc) delay = writeback_delay(dc, size); - /* If the control system would wait for at least half a - * second, and there's been no reqs hitting the backing disk - * for awhile: use an alternate mode where we have at most - * one contiguous set of writebacks in flight at a time. If - * someone wants to do IO it will be quick, as it will only - * have to contend with one operation in flight, and we'll - * be round-tripping data to the backing disk as quickly as - * it can accept it. - */ - if (delay >= HZ / 2) { - /* 3 means at least 1.5 seconds, up to 7.5 if we - * have slowed way down. - */ - if (atomic_inc_return(&dc->backing_idle) >= 3) { - /* Wait for current I/Os to finish */ - closure_sync(&cl); - /* And immediately launch a new set. */ - delay = 0; - } - } - while (!kthread_should_stop() && !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && delay) { @@ -476,6 +507,9 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode, if (!d) return; + if (UUID_FLASH_ONLY(&c->uuids[inode])) + atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors); + stripe = offset_to_stripe(d, offset); stripe_offset = offset & (d->stripe_size - 1); @@ -673,10 +707,14 @@ static int bch_writeback_thread(void *arg) } /* Init */ +#define INIT_KEYS_EACH_TIME 500000 +#define INIT_KEYS_SLEEP_MS 100 struct sectors_dirty_init { struct btree_op op; unsigned inode; + size_t count; + struct bkey start; }; static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, @@ -691,18 +729,37 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b, bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k), KEY_START(k), KEY_SIZE(k)); + op->count++; + if (atomic_read(&b->c->search_inflight) && + !(op->count % INIT_KEYS_EACH_TIME)) { + bkey_copy_key(&op->start, k); + return -EAGAIN; + } + return MAP_CONTINUE; } void bch_sectors_dirty_init(struct bcache_device *d) { struct sectors_dirty_init op; + int ret; bch_btree_op_init(&op.op, -1); op.inode = d->id; - - bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0), - sectors_dirty_init_fn, 0); + op.count = 0; + op.start = KEY(op.inode, 0, 0); + + do { + ret = bch_btree_map_keys(&op.op, d->c, &op.start, + sectors_dirty_init_fn, 0); + if (ret == -EAGAIN) + schedule_timeout_interruptible( + msecs_to_jiffies(INIT_KEYS_SLEEP_MS)); + else if (ret < 0) { + pr_warn("sectors dirty init failed, ret=%d!", ret); + break; + } + } while (ret == -EAGAIN); } void bch_cached_dev_writeback_init(struct cached_dev *dc) @@ -715,7 +772,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_running = true; dc->writeback_percent = 10; dc->writeback_delay = 30; - dc->writeback_rate.rate = 1024; + atomic_long_set(&dc->writeback_rate.rate, 1024); dc->writeback_rate_minimum = 8; dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 610fb01de629..3745d7004c47 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -28,25 +28,6 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) return ret; } -static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) -{ - uint64_t i, ret = 0; - - mutex_lock(&bch_register_lock); - - for (i = 0; i < c->devices_max_used; i++) { - struct bcache_device *d = c->devices[i]; - - if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) - continue; - ret += bcache_dev_sectors_dirty(d); - } - - mutex_unlock(&bch_register_lock); - - return ret; -} - static inline unsigned offset_to_stripe(struct bcache_device *d, uint64_t offset) { diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b0dd7027848b..20f7e4ef5342 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -609,7 +609,8 @@ static void start_io_acct(struct dm_io *io) io->start_time = jiffies; - generic_start_io_acct(md->queue, rw, bio_sectors(bio), &dm_disk(md)->part0); + generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), + &dm_disk(md)->part0); atomic_set(&dm_disk(md)->part0.in_flight[rw], atomic_inc_return(&md->pending[rw])); @@ -628,7 +629,8 @@ static void end_io_acct(struct dm_io *io) int pending; int rw = bio_data_dir(bio); - generic_end_io_acct(md->queue, rw, &dm_disk(md)->part0, io->start_time); + generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, + io->start_time); if (unlikely(dm_stats_used(&md->stats))) dm_stats_account_io(&md->stats, bio_data_dir(bio), diff --git a/drivers/md/md.c b/drivers/md/md.c index 994aed2f9dff..cb4eb5faa519 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -204,10 +204,6 @@ static int start_readonly; */ static bool create_on_open = true; -/* bio_clone_mddev - * like bio_clone_bioset, but with a local bio set - */ - struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev) { @@ -335,6 +331,7 @@ EXPORT_SYMBOL(md_handle_request); static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) { const int rw = bio_data_dir(bio); + const int sgrp = op_stat_group(bio_op(bio)); struct mddev *mddev = q->queuedata; unsigned int sectors; int cpu; @@ -363,8 +360,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) md_handle_request(mddev, bio); cpu = part_stat_lock(); - part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); - part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); + part_stat_inc(cpu, &mddev->gendisk->part0, ios[sgrp]); + part_stat_add(cpu, &mddev->gendisk->part0, sectors[sgrp], sectors); part_stat_unlock(); return BLK_QC_T_NONE; @@ -8046,8 +8043,7 @@ static int is_mddev_idle(struct mddev *mddev, int init) rcu_read_lock(); rdev_for_each_rcu(rdev, mddev) { struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; - curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + - (int)part_stat_read(&disk->part0, sectors[1]) - + curr_events = (int)part_stat_read_accum(&disk->part0, sectors) - atomic_read(&disk->sync_io); /* sync IO will cause sync_io to increase before the disk_stats * as sync_io is counted when a request starts, and diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 85de8053aa34..0360c015f658 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1423,11 +1423,11 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip, static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip, struct page *page, unsigned int len, unsigned int off, - bool is_write, sector_t sector) + unsigned int op, sector_t sector) { int ret; - if (!is_write) { + if (!op_is_write(op)) { ret = btt_read_pg(btt, bip, page, off, sector, len); flush_dcache_page(page); } else { @@ -1464,7 +1464,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) } err = btt_do_bvec(btt, bip, bvec.bv_page, len, bvec.bv_offset, - op_is_write(bio_op(bio)), iter.bi_sector); + bio_op(bio), iter.bi_sector); if (err) { dev_err(&btt->nd_btt->dev, "io error in %s sector %lld, len %d,\n", @@ -1483,16 +1483,16 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) } static int btt_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, bool is_write) + struct page *page, unsigned int op) { struct btt *btt = bdev->bd_disk->private_data; int rc; unsigned int len; len = hpage_nr_pages(page) * PAGE_SIZE; - rc = btt_do_bvec(btt, NULL, page, len, 0, is_write, sector); + rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector); if (rc == 0) - page_endio(page, is_write, 0); + page_endio(page, op_is_write(op), 0); return rc; } diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 32e0364b48b9..6ee7fd7e4bbd 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -396,16 +396,15 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) return false; *start = jiffies; - generic_start_io_acct(disk->queue, bio_data_dir(bio), - bio_sectors(bio), &disk->part0); + generic_start_io_acct(disk->queue, bio_op(bio), bio_sectors(bio), + &disk->part0); return true; } static inline void nd_iostat_end(struct bio *bio, unsigned long start) { struct gendisk *disk = bio->bi_disk; - generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, - start); + generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start); } static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector, unsigned int len) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 8b1fd7f1a224..dd17acd8fe68 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -120,7 +120,7 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, } static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, - unsigned int len, unsigned int off, bool is_write, + unsigned int len, unsigned int off, unsigned int op, sector_t sector) { blk_status_t rc = BLK_STS_OK; @@ -131,7 +131,7 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) bad_pmem = true; - if (!is_write) { + if (!op_is_write(op)) { if (unlikely(bad_pmem)) rc = BLK_STS_IOERR; else { @@ -180,8 +180,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, - bvec.bv_offset, op_is_write(bio_op(bio)), - iter.bi_sector); + bvec.bv_offset, bio_op(bio), iter.bi_sector); if (rc) { bio->bi_status = rc; break; @@ -198,13 +197,13 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) } static int pmem_rw_page(struct block_device *bdev, sector_t sector, - struct page *page, bool is_write) + struct page *page, unsigned int op) { struct pmem_device *pmem = bdev->bd_queue->queuedata; blk_status_t rc; rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE, - 0, is_write, sector); + 0, op, sector); /* * The ->rw_page interface is subtle and tricky. The core @@ -213,7 +212,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, * caused by double completion. */ if (rc == 0) - page_endio(page, is_write, 0); + page_endio(page, op_is_write(op), 0); return blk_status_to_errno(rc); } diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index bf65501e6ed6..dd8ec1dd9219 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -252,7 +252,8 @@ void nvme_complete_rq(struct request *req) trace_nvme_complete_rq(req); if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) { - if (nvme_req_needs_failover(req, status)) { + if ((req->cmd_flags & REQ_NVME_MPATH) && + blk_path_error(status)) { nvme_failover_req(req); return; } @@ -617,6 +618,8 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, if (WARN_ON_ONCE(!nvme_ns_has_pi(ns))) return BLK_STS_NOTSUPP; control |= NVME_RW_PRINFO_PRACT; + } else if (req_op(req) == REQ_OP_WRITE) { + t10_pi_prepare(req, ns->pi_type); } switch (ns->pi_type) { @@ -627,8 +630,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, case NVME_NS_DPS_PI_TYPE2: control |= NVME_RW_PRINFO_PRCHK_GUARD | NVME_RW_PRINFO_PRCHK_REF; - cmnd->rw.reftag = cpu_to_le32( - nvme_block_nr(ns, blk_rq_pos(req))); + cmnd->rw.reftag = cpu_to_le32(t10_pi_ref_tag(req)); break; } } @@ -638,6 +640,22 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns, return 0; } +void nvme_cleanup_cmd(struct request *req) +{ + if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ && + nvme_req(req)->status == 0) { + struct nvme_ns *ns = req->rq_disk->private_data; + + t10_pi_complete(req, ns->pi_type, + blk_rq_bytes(req) >> ns->lba_shift); + } + if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { + kfree(page_address(req->special_vec.bv_page) + + req->special_vec.bv_offset); + } +} +EXPORT_SYMBOL_GPL(nvme_cleanup_cmd); + blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, struct nvme_command *cmd) { @@ -668,10 +686,7 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, } cmd->common.command_id = req->tag; - if (ns) - trace_nvme_setup_nvm_cmd(req->q->id, cmd); - else - trace_nvme_setup_admin_cmd(cmd); + trace_nvme_setup_cmd(req, cmd); return ret; } EXPORT_SYMBOL_GPL(nvme_setup_cmd); @@ -864,9 +879,6 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) if (unlikely(ctrl->kato == 0)) return; - INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); - memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); - ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } @@ -1056,7 +1068,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) EXPORT_SYMBOL_GPL(nvme_set_queue_count); #define NVME_AEN_SUPPORTED \ - (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT) + (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_FW_ACT | NVME_AEN_CFG_ANA_CHANGE) static void nvme_enable_aen(struct nvme_ctrl *ctrl) { @@ -1472,6 +1484,12 @@ static void nvme_update_disk_info(struct gendisk *disk, set_capacity(disk, capacity); nvme_config_discard(ns); + + if (id->nsattr & (1 << 0)) + set_disk_ro(disk, true); + else + set_disk_ro(disk, false); + blk_mq_unfreeze_queue(disk->queue); } @@ -2270,21 +2288,16 @@ out_unlock: return ret; } -int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - u8 log_page, void *log, - size_t size, u64 offset) +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, + void *log, size_t size, u64 offset) { struct nvme_command c = { }; unsigned long dwlen = size / 4 - 1; c.get_log_page.opcode = nvme_admin_get_log_page; - - if (ns) - c.get_log_page.nsid = cpu_to_le32(ns->head->ns_id); - else - c.get_log_page.nsid = cpu_to_le32(NVME_NSID_ALL); - + c.get_log_page.nsid = cpu_to_le32(nsid); c.get_log_page.lid = log_page; + c.get_log_page.lsp = lsp; c.get_log_page.numdl = cpu_to_le16(dwlen & ((1 << 16) - 1)); c.get_log_page.numdu = cpu_to_le16(dwlen >> 16); c.get_log_page.lpol = cpu_to_le32(lower_32_bits(offset)); @@ -2293,12 +2306,6 @@ int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, return nvme_submit_sync_cmd(ctrl->admin_q, &c, log, size); } -static int nvme_get_log(struct nvme_ctrl *ctrl, u8 log_page, void *log, - size_t size) -{ - return nvme_get_log_ext(ctrl, NULL, log_page, log, size, 0); -} - static int nvme_get_effects_log(struct nvme_ctrl *ctrl) { int ret; @@ -2309,8 +2316,8 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl) if (!ctrl->effects) return 0; - ret = nvme_get_log(ctrl, NVME_LOG_CMD_EFFECTS, ctrl->effects, - sizeof(*ctrl->effects)); + ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, + ctrl->effects, sizeof(*ctrl->effects), 0); if (ret) { kfree(ctrl->effects); ctrl->effects = NULL; @@ -2401,6 +2408,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) nvme_set_queue_limits(ctrl, ctrl->admin_q); ctrl->sgls = le32_to_cpu(id->sgls); ctrl->kas = le16_to_cpu(id->kas); + ctrl->max_namespaces = le32_to_cpu(id->mnan); if (id->rtd3e) { /* us -> s */ @@ -2460,8 +2468,12 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) ctrl->hmmaxd = le16_to_cpu(id->hmmaxd); } + ret = nvme_mpath_init(ctrl, id); kfree(id); + if (ret < 0) + return ret; + if (ctrl->apst_enabled && !prev_apst_enabled) dev_pm_qos_expose_latency_tolerance(ctrl->device); else if (!ctrl->apst_enabled && prev_apst_enabled) @@ -2680,6 +2692,10 @@ static struct attribute *nvme_ns_id_attrs[] = { &dev_attr_nguid.attr, &dev_attr_eui.attr, &dev_attr_nsid.attr, +#ifdef CONFIG_NVME_MULTIPATH + &dev_attr_ana_grpid.attr, + &dev_attr_ana_state.attr, +#endif NULL, }; @@ -2702,6 +2718,14 @@ static umode_t nvme_ns_id_attrs_are_visible(struct kobject *kobj, if (!memchr_inv(ids->eui64, 0, sizeof(ids->eui64))) return 0; } +#ifdef CONFIG_NVME_MULTIPATH + if (a == &dev_attr_ana_grpid.attr || a == &dev_attr_ana_state.attr) { + if (dev_to_disk(dev)->fops != &nvme_fops) /* per-path attr */ + return 0; + if (!nvme_ctrl_use_ana(nvme_get_ns_from_dev(dev)->ctrl)) + return 0; + } +#endif return a->mode; } @@ -3075,8 +3099,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) nvme_get_ctrl(ctrl); - kfree(id); - device_add_disk(ctrl->device, ns->disk); if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj, &nvme_ns_id_attr_group)) @@ -3086,8 +3108,10 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) pr_warn("%s: failed to register lightnvm sysfs group for identification\n", ns->disk->disk_name); - nvme_mpath_add_disk(ns->head); + nvme_mpath_add_disk(ns, id); nvme_fault_inject_init(ns); + kfree(id); + return; out_unlink_ns: mutex_lock(&ctrl->subsys->lock); @@ -3229,7 +3253,8 @@ static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl) * raced with us in reading the log page, which could cause us to miss * updates. */ - error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size); + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CHANGED_NS, 0, log, + log_size, 0); if (error) dev_warn(ctrl->device, "reading changed ns log failed: %d\n", error); @@ -3346,9 +3371,9 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl) if (!log) return; - if (nvme_get_log(ctrl, NVME_LOG_FW_SLOT, log, sizeof(*log))) - dev_warn(ctrl->device, - "Get FW SLOT INFO log error\n"); + if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log, + sizeof(*log), 0)) + dev_warn(ctrl->device, "Get FW SLOT INFO log error\n"); kfree(log); } @@ -3394,6 +3419,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result) case NVME_AER_NOTICE_FW_ACT_STARTING: queue_work(nvme_wq, &ctrl->fw_act_work); break; +#ifdef CONFIG_NVME_MULTIPATH + case NVME_AER_NOTICE_ANA: + if (!ctrl->ana_log_buf) + break; + queue_work(nvme_wq, &ctrl->ana_work); + break; +#endif default: dev_warn(ctrl->device, "async event result %08x\n", result); } @@ -3426,6 +3458,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event); void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { + nvme_mpath_stop(ctrl); nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); flush_work(&ctrl->scan_work); @@ -3463,6 +3496,7 @@ static void nvme_free_ctrl(struct device *dev) ida_simple_remove(&nvme_instance_ida, ctrl->instance); kfree(ctrl->effects); + nvme_mpath_uninit(ctrl); if (subsys) { mutex_lock(&subsys->lock); @@ -3499,6 +3533,10 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work); INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work); + INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work); + memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd)); + ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive; + ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL); if (ret < 0) goto out; diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index f7efe5a58cc7..206d63cb1afc 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -474,7 +474,7 @@ EXPORT_SYMBOL_GPL(nvmf_connect_io_queue); bool nvmf_should_reconnect(struct nvme_ctrl *ctrl) { - if (ctrl->opts->max_reconnects != -1 && + if (ctrl->opts->max_reconnects == -1 || ctrl->nr_reconnects < ctrl->opts->max_reconnects) return true; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 9bac912173ba..611e70cae754 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1737,6 +1737,7 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq, int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; + nvme_req(rq)->ctrl = &ctrl->ctrl; return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++); } diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 41279da799ed..6fe5923c95d4 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -414,12 +414,6 @@ static int nvme_nvm_setup_20(struct nvme_nvm_id20 *id, /* Set compacted version for upper layers */ geo->version = NVM_OCSSD_SPEC_20; - if (!(geo->major_ver_id == 2 && geo->minor_ver_id == 0)) { - pr_err("nvm: OCSSD version not supported (v%d.%d)\n", - geo->major_ver_id, geo->minor_ver_id); - return -EINVAL; - } - geo->num_ch = le16_to_cpu(id->num_grp); geo->num_lun = le16_to_cpu(id->num_pu); geo->all_luns = geo->num_ch * geo->num_lun; @@ -583,7 +577,13 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev, struct ppa_addr ppa; size_t left = nchks * sizeof(struct nvme_nvm_chk_meta); size_t log_pos, offset, len; - int ret, i; + int ret, i, max_len; + + /* + * limit requests to maximum 256K to avoid issuing arbitrary large + * requests when the device does not specific a maximum transfer size. + */ + max_len = min_t(unsigned int, ctrl->max_hw_sectors << 9, 256 * 1024); /* Normalize lba address space to obtain log offset */ ppa.ppa = slba; @@ -596,10 +596,11 @@ static int nvme_nvm_get_chk_meta(struct nvm_dev *ndev, offset = log_pos * sizeof(struct nvme_nvm_chk_meta); while (left) { - len = min_t(unsigned int, left, ctrl->max_hw_sectors << 9); + len = min_t(unsigned int, left, max_len); - ret = nvme_get_log_ext(ctrl, ns, NVME_NVM_LOG_REPORT_CHUNK, - dev_meta, len, offset); + ret = nvme_get_log(ctrl, ns->head->ns_id, + NVME_NVM_LOG_REPORT_CHUNK, 0, dev_meta, len, + offset); if (ret) { dev_err(ctrl->device, "Get REPORT CHUNK log error\n"); break; @@ -662,12 +663,10 @@ static struct request *nvme_nvm_alloc_request(struct request_queue *q, rq->cmd_flags &= ~REQ_FAILFAST_DRIVER; - if (rqd->bio) { + if (rqd->bio) blk_init_request_from_bio(rq, rqd->bio); - } else { + else rq->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); - rq->__data_len = 0; - } return rq; } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 1ffd3e8b13a1..5a9562881d4e 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017 Christoph Hellwig. + * Copyright (c) 2017-2018 Christoph Hellwig. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -20,6 +20,11 @@ module_param(multipath, bool, 0444); MODULE_PARM_DESC(multipath, "turn on native support for multiple controllers per subsystem"); +inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3)); +} + /* * If multipathing is enabled we need to always use the subsystem instance * number for numbering our devices to avoid conflicts between subsystems that @@ -45,6 +50,7 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, void nvme_failover_req(struct request *req) { struct nvme_ns *ns = req->q->queuedata; + u16 status = nvme_req(req)->status; unsigned long flags; spin_lock_irqsave(&ns->head->requeue_lock, flags); @@ -52,15 +58,35 @@ void nvme_failover_req(struct request *req) spin_unlock_irqrestore(&ns->head->requeue_lock, flags); blk_mq_end_request(req, 0); - nvme_reset_ctrl(ns->ctrl); - kblockd_schedule_work(&ns->head->requeue_work); -} + switch (status & 0x7ff) { + case NVME_SC_ANA_TRANSITION: + case NVME_SC_ANA_INACCESSIBLE: + case NVME_SC_ANA_PERSISTENT_LOSS: + /* + * If we got back an ANA error we know the controller is alive, + * but not ready to serve this namespaces. The spec suggests + * we should update our general state here, but due to the fact + * that the admin and I/O queues are not serialized that is + * fundamentally racy. So instead just clear the current path, + * mark the the path as pending and kick of a re-read of the ANA + * log page ASAP. + */ + nvme_mpath_clear_current_path(ns); + if (ns->ctrl->ana_log_buf) { + set_bit(NVME_NS_ANA_PENDING, &ns->flags); + queue_work(nvme_wq, &ns->ctrl->ana_work); + } + break; + default: + /* + * Reset the controller for any non-ANA error as we don't know + * what caused the error. + */ + nvme_reset_ctrl(ns->ctrl); + break; + } -bool nvme_req_needs_failover(struct request *req, blk_status_t error) -{ - if (!(req->cmd_flags & REQ_NVME_MPATH)) - return false; - return blk_path_error(error); + kblockd_schedule_work(&ns->head->requeue_work); } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) @@ -75,25 +101,51 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) up_read(&ctrl->namespaces_rwsem); } +static const char *nvme_ana_state_names[] = { + [0] = "invalid state", + [NVME_ANA_OPTIMIZED] = "optimized", + [NVME_ANA_NONOPTIMIZED] = "non-optimized", + [NVME_ANA_INACCESSIBLE] = "inaccessible", + [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss", + [NVME_ANA_CHANGE] = "change", +}; + static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head) { - struct nvme_ns *ns; + struct nvme_ns *ns, *fallback = NULL; list_for_each_entry_rcu(ns, &head->list, siblings) { - if (ns->ctrl->state == NVME_CTRL_LIVE) { + if (ns->ctrl->state != NVME_CTRL_LIVE || + test_bit(NVME_NS_ANA_PENDING, &ns->flags)) + continue; + switch (ns->ana_state) { + case NVME_ANA_OPTIMIZED: rcu_assign_pointer(head->current_path, ns); return ns; + case NVME_ANA_NONOPTIMIZED: + fallback = ns; + break; + default: + break; } } - return NULL; + if (fallback) + rcu_assign_pointer(head->current_path, fallback); + return fallback; +} + +static inline bool nvme_path_is_optimized(struct nvme_ns *ns) +{ + return ns->ctrl->state == NVME_CTRL_LIVE && + ns->ana_state == NVME_ANA_OPTIMIZED; } inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) { struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu); - if (unlikely(!ns || ns->ctrl->state != NVME_CTRL_LIVE)) + if (unlikely(!ns || !nvme_path_is_optimized(ns))) ns = __nvme_find_path(head); return ns; } @@ -142,7 +194,7 @@ static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc) srcu_idx = srcu_read_lock(&head->srcu); ns = srcu_dereference(head->current_path, &head->srcu); - if (likely(ns && ns->ctrl->state == NVME_CTRL_LIVE)) + if (likely(ns && nvme_path_is_optimized(ns))) found = ns->queue->poll_fn(q, qc); srcu_read_unlock(&head->srcu, srcu_idx); return found; @@ -176,6 +228,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) struct request_queue *q; bool vwc = false; + mutex_init(&head->lock); bio_list_init(&head->requeue_list); spin_lock_init(&head->requeue_lock); INIT_WORK(&head->requeue_work, nvme_requeue_work); @@ -220,29 +273,232 @@ out: return -ENOMEM; } -void nvme_mpath_add_disk(struct nvme_ns_head *head) +static void nvme_mpath_set_live(struct nvme_ns *ns) { + struct nvme_ns_head *head = ns->head; + + lockdep_assert_held(&ns->head->lock); + if (!head->disk) return; - mutex_lock(&head->subsys->lock); if (!(head->disk->flags & GENHD_FL_UP)) { device_add_disk(&head->subsys->dev, head->disk); if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, &nvme_ns_id_attr_group)) - pr_warn("%s: failed to create sysfs group for identification\n", - head->disk->disk_name); + dev_warn(&head->subsys->dev, + "failed to create id group.\n"); + } + + kblockd_schedule_work(&ns->head->requeue_work); +} + +static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data, + int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *, + void *)) +{ + void *base = ctrl->ana_log_buf; + size_t offset = sizeof(struct nvme_ana_rsp_hdr); + int error, i; + + lockdep_assert_held(&ctrl->ana_lock); + + for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { + struct nvme_ana_group_desc *desc = base + offset; + u32 nr_nsids = le32_to_cpu(desc->nnsids); + size_t nsid_buf_size = nr_nsids * sizeof(__le32); + + if (WARN_ON_ONCE(desc->grpid == 0)) + return -EINVAL; + if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax)) + return -EINVAL; + if (WARN_ON_ONCE(desc->state == 0)) + return -EINVAL; + if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE)) + return -EINVAL; + + offset += sizeof(*desc); + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size)) + return -EINVAL; + + error = cb(ctrl, desc, data); + if (error) + return error; + + offset += nsid_buf_size; + if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc))) + return -EINVAL; + } + + return 0; +} + +static inline bool nvme_state_is_live(enum nvme_ana_state state) +{ + return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED; +} + +static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, + struct nvme_ns *ns) +{ + enum nvme_ana_state old; + + mutex_lock(&ns->head->lock); + old = ns->ana_state; + ns->ana_grpid = le32_to_cpu(desc->grpid); + ns->ana_state = desc->state; + clear_bit(NVME_NS_ANA_PENDING, &ns->flags); + + if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) + nvme_mpath_set_live(ns); + mutex_unlock(&ns->head->lock); +} + +static int nvme_update_ana_state(struct nvme_ctrl *ctrl, + struct nvme_ana_group_desc *desc, void *data) +{ + u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; + unsigned *nr_change_groups = data; + struct nvme_ns *ns; + + dev_info(ctrl->device, "ANA group %d: %s.\n", + le32_to_cpu(desc->grpid), + nvme_ana_state_names[desc->state]); + + if (desc->state == NVME_ANA_CHANGE) + (*nr_change_groups)++; + + if (!nr_nsids) + return 0; + + down_write(&ctrl->namespaces_rwsem); + list_for_each_entry(ns, &ctrl->namespaces, list) { + if (ns->head->ns_id != le32_to_cpu(desc->nsids[n])) + continue; + nvme_update_ns_ana_state(desc, ns); + if (++n == nr_nsids) + break; + } + up_write(&ctrl->namespaces_rwsem); + WARN_ON_ONCE(n < nr_nsids); + return 0; +} + +static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only) +{ + u32 nr_change_groups = 0; + int error; + + mutex_lock(&ctrl->ana_lock); + error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, + groups_only ? NVME_ANA_LOG_RGO : 0, + ctrl->ana_log_buf, ctrl->ana_log_size, 0); + if (error) { + dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error); + goto out_unlock; + } + + error = nvme_parse_ana_log(ctrl, &nr_change_groups, + nvme_update_ana_state); + if (error) + goto out_unlock; + + /* + * In theory we should have an ANATT timer per group as they might enter + * the change state at different times. But that is a lot of overhead + * just to protect against a target that keeps entering new changes + * states while never finishing previous ones. But we'll still + * eventually time out once all groups are in change state, so this + * isn't a big deal. + * + * We also double the ANATT value to provide some slack for transports + * or AEN processing overhead. + */ + if (nr_change_groups) + mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies); + else + del_timer_sync(&ctrl->anatt_timer); +out_unlock: + mutex_unlock(&ctrl->ana_lock); + return error; +} + +static void nvme_ana_work(struct work_struct *work) +{ + struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work); + + nvme_read_ana_log(ctrl, false); +} + +static void nvme_anatt_timeout(struct timer_list *t) +{ + struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer); + + dev_info(ctrl->device, "ANATT timeout, resetting controller.\n"); + nvme_reset_ctrl(ctrl); +} + +void nvme_mpath_stop(struct nvme_ctrl *ctrl) +{ + if (!nvme_ctrl_use_ana(ctrl)) + return; + del_timer_sync(&ctrl->anatt_timer); + cancel_work_sync(&ctrl->ana_work); +} + +static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid); +} +DEVICE_ATTR_RO(ana_grpid); + +static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); + + return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); +} +DEVICE_ATTR_RO(ana_state); + +static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl, + struct nvme_ana_group_desc *desc, void *data) +{ + struct nvme_ns *ns = data; + + if (ns->ana_grpid == le32_to_cpu(desc->grpid)) { + nvme_update_ns_ana_state(desc, ns); + return -ENXIO; /* just break out of the loop */ + } + + return 0; +} + +void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) +{ + if (nvme_ctrl_use_ana(ns->ctrl)) { + mutex_lock(&ns->ctrl->ana_lock); + ns->ana_grpid = le32_to_cpu(id->anagrpid); + nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state); + mutex_unlock(&ns->ctrl->ana_lock); + } else { + mutex_lock(&ns->head->lock); + ns->ana_state = NVME_ANA_OPTIMIZED; + nvme_mpath_set_live(ns); + mutex_unlock(&ns->head->lock); } - mutex_unlock(&head->subsys->lock); } void nvme_mpath_remove_disk(struct nvme_ns_head *head) { if (!head->disk) return; - sysfs_remove_group(&disk_to_dev(head->disk)->kobj, - &nvme_ns_id_attr_group); - del_gendisk(head->disk); + if (head->disk->flags & GENHD_FL_UP) { + sysfs_remove_group(&disk_to_dev(head->disk)->kobj, + &nvme_ns_id_attr_group); + del_gendisk(head->disk); + } blk_set_queue_dying(head->disk->queue); /* make sure all pending bios are cleaned up */ kblockd_schedule_work(&head->requeue_work); @@ -250,3 +506,52 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) blk_cleanup_queue(head->disk->queue); put_disk(head->disk); } + +int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id) +{ + int error; + + if (!nvme_ctrl_use_ana(ctrl)) + return 0; + + ctrl->anacap = id->anacap; + ctrl->anatt = id->anatt; + ctrl->nanagrpid = le32_to_cpu(id->nanagrpid); + ctrl->anagrpmax = le32_to_cpu(id->anagrpmax); + + mutex_init(&ctrl->ana_lock); + timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); + ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) + + ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc); + if (!(ctrl->anacap & (1 << 6))) + ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32); + + if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) { + dev_err(ctrl->device, + "ANA log page size (%zd) larger than MDTS (%d).\n", + ctrl->ana_log_size, + ctrl->max_hw_sectors << SECTOR_SHIFT); + dev_err(ctrl->device, "disabling ANA support.\n"); + return 0; + } + + INIT_WORK(&ctrl->ana_work, nvme_ana_work); + ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); + if (!ctrl->ana_log_buf) + goto out; + + error = nvme_read_ana_log(ctrl, true); + if (error) + goto out_free_ana_log_buf; + return 0; +out_free_ana_log_buf: + kfree(ctrl->ana_log_buf); +out: + return -ENOMEM; +} + +void nvme_mpath_uninit(struct nvme_ctrl *ctrl) +{ + kfree(ctrl->ana_log_buf); +} + diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 0c4a33df3b2f..bb4a2003c097 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -102,6 +102,7 @@ struct nvme_request { u8 retries; u8 flags; u16 status; + struct nvme_ctrl *ctrl; }; /* @@ -119,6 +120,13 @@ static inline struct nvme_request *nvme_req(struct request *req) return blk_mq_rq_to_pdu(req); } +static inline u16 nvme_req_qid(struct request *req) +{ + if (!req->rq_disk) + return 0; + return blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(req)) + 1; +} + /* The below value is the specific amount of delay needed before checking * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was @@ -175,6 +183,7 @@ struct nvme_ctrl { u16 oacs; u16 nssa; u16 nr_streams; + u32 max_namespaces; atomic_t abort_limit; u8 vwc; u32 vs; @@ -197,6 +206,19 @@ struct nvme_ctrl { struct work_struct fw_act_work; unsigned long events; +#ifdef CONFIG_NVME_MULTIPATH + /* asymmetric namespace access: */ + u8 anacap; + u8 anatt; + u32 anagrpmax; + u32 nanagrpid; + struct mutex ana_lock; + struct nvme_ana_rsp_hdr *ana_log_buf; + size_t ana_log_size; + struct timer_list anatt_timer; + struct work_struct ana_work; +#endif + /* Power saving configuration */ u64 ps_max_latency_us; bool apst_enabled; @@ -261,6 +283,7 @@ struct nvme_ns_head { struct bio_list requeue_list; spinlock_t requeue_lock; struct work_struct requeue_work; + struct mutex lock; #endif struct list_head list; struct srcu_struct srcu; @@ -287,6 +310,10 @@ struct nvme_ns { struct nvme_ctrl *ctrl; struct request_queue *queue; struct gendisk *disk; +#ifdef CONFIG_NVME_MULTIPATH + enum nvme_ana_state ana_state; + u32 ana_grpid; +#endif struct list_head siblings; struct nvm_dev *ndev; struct kref kref; @@ -299,8 +326,9 @@ struct nvme_ns { bool ext; u8 pi_type; unsigned long flags; -#define NVME_NS_REMOVING 0 -#define NVME_NS_DEAD 1 +#define NVME_NS_REMOVING 0 +#define NVME_NS_DEAD 1 +#define NVME_NS_ANA_PENDING 2 u16 noiob; #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS @@ -356,14 +384,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector) return (sector >> (ns->lba_shift - 9)); } -static inline void nvme_cleanup_cmd(struct request *req) -{ - if (req->rq_flags & RQF_SPECIAL_PAYLOAD) { - kfree(page_address(req->special_vec.bv_page) + - req->special_vec.bv_offset); - } -} - static inline void nvme_end_request(struct request *req, __le16 status, union nvme_result result) { @@ -420,6 +440,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl); #define NVME_QID_ANY -1 struct request *nvme_alloc_request(struct request_queue *q, struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid); +void nvme_cleanup_cmd(struct request *req); blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req, struct nvme_command *cmd); int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, @@ -435,21 +456,24 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl); int nvme_delete_ctrl(struct nvme_ctrl *ctrl); int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl); -int nvme_get_log_ext(struct nvme_ctrl *ctrl, struct nvme_ns *ns, - u8 log_page, void *log, size_t size, u64 offset); +int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, + void *log, size_t size, u64 offset); extern const struct attribute_group nvme_ns_id_attr_group; extern const struct block_device_operations nvme_ns_head_ops; #ifdef CONFIG_NVME_MULTIPATH +bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl); void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, struct nvme_ctrl *ctrl, int *flags); void nvme_failover_req(struct request *req); -bool nvme_req_needs_failover(struct request *req, blk_status_t error); void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl); int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head); -void nvme_mpath_add_disk(struct nvme_ns_head *head); +void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id); void nvme_mpath_remove_disk(struct nvme_ns_head *head); +int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id); +void nvme_mpath_uninit(struct nvme_ctrl *ctrl); +void nvme_mpath_stop(struct nvme_ctrl *ctrl); static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) { @@ -468,7 +492,14 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) kblockd_schedule_work(&head->requeue_work); } +extern struct device_attribute dev_attr_ana_grpid; +extern struct device_attribute dev_attr_ana_state; + #else +static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) +{ + return false; +} /* * Without the multipath code enabled, multiple controller per subsystems are * visible as devices and thus we cannot use the subsystem instance. @@ -482,11 +513,6 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, static inline void nvme_failover_req(struct request *req) { } -static inline bool nvme_req_needs_failover(struct request *req, - blk_status_t error) -{ - return false; -} static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) { } @@ -495,7 +521,8 @@ static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, { return 0; } -static inline void nvme_mpath_add_disk(struct nvme_ns_head *head) +static inline void nvme_mpath_add_disk(struct nvme_ns *ns, + struct nvme_id_ns *id) { } static inline void nvme_mpath_remove_disk(struct nvme_ns_head *head) @@ -507,6 +534,17 @@ static inline void nvme_mpath_clear_current_path(struct nvme_ns *ns) static inline void nvme_mpath_check_last_path(struct nvme_ns *ns) { } +static inline int nvme_mpath_init(struct nvme_ctrl *ctrl, + struct nvme_id_ctrl *id) +{ + return 0; +} +static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) +{ +} +static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) +{ +} #endif /* CONFIG_NVME_MULTIPATH */ #ifdef CONFIG_NVM diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index ddd441b1516a..1b9951d2067e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -418,6 +418,8 @@ static int nvme_init_request(struct blk_mq_tag_set *set, struct request *req, BUG_ON(!nvmeq); iod->nvmeq = nvmeq; + + nvme_req(req)->ctrl = &dev->ctrl; return 0; } @@ -535,73 +537,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req) mempool_free(iod->sg, dev->iod_mempool); } -#ifdef CONFIG_BLK_DEV_INTEGRITY -static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) -{ - if (be32_to_cpu(pi->ref_tag) == v) - pi->ref_tag = cpu_to_be32(p); -} - -static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) -{ - if (be32_to_cpu(pi->ref_tag) == p) - pi->ref_tag = cpu_to_be32(v); -} - -/** - * nvme_dif_remap - remaps ref tags to bip seed and physical lba - * - * The virtual start sector is the one that was originally submitted by the - * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical - * start sector may be different. Remap protection information to match the - * physical LBA on writes, and back to the original seed on reads. - * - * Type 0 and 3 do not have a ref tag, so no remapping required. - */ -static void nvme_dif_remap(struct request *req, - void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) -{ - struct nvme_ns *ns = req->rq_disk->private_data; - struct bio_integrity_payload *bip; - struct t10_pi_tuple *pi; - void *p, *pmap; - u32 i, nlb, ts, phys, virt; - - if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) - return; - - bip = bio_integrity(req->bio); - if (!bip) - return; - - pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; - - p = pmap; - virt = bip_get_seed(bip); - phys = nvme_block_nr(ns, blk_rq_pos(req)); - nlb = (blk_rq_bytes(req) >> ns->lba_shift); - ts = ns->disk->queue->integrity.tuple_size; - - for (i = 0; i < nlb; i++, virt++, phys++) { - pi = (struct t10_pi_tuple *)p; - dif_swap(phys, virt, pi); - p += ts; - } - kunmap_atomic(pmap); -} -#else /* CONFIG_BLK_DEV_INTEGRITY */ -static void nvme_dif_remap(struct request *req, - void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) -{ -} -static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) -{ -} -static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) -{ -} -#endif - static void nvme_print_sgl(struct scatterlist *sgl, int nents) { int i; @@ -827,9 +762,6 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, if (blk_rq_map_integrity_sg(q, req->bio, &iod->meta_sg) != 1) goto out_unmap; - if (req_op(req) == REQ_OP_WRITE) - nvme_dif_remap(req, nvme_dif_prep); - if (!dma_map_sg(dev->dev, &iod->meta_sg, 1, dma_dir)) goto out_unmap; } @@ -852,11 +784,8 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) if (iod->nents) { dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); - if (blk_integrity_rq(req)) { - if (req_op(req) == REQ_OP_READ) - nvme_dif_remap(req, nvme_dif_complete); + if (blk_integrity_rq(req)) dma_unmap_sg(dev->dev, &iod->meta_sg, 1, dma_dir); - } } nvme_cleanup_cmd(req); diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 66ec5985c9f3..0805fa6215ee 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -40,13 +40,14 @@ #define NVME_RDMA_MAX_SEGMENTS 256 -#define NVME_RDMA_MAX_INLINE_SEGMENTS 1 +#define NVME_RDMA_MAX_INLINE_SEGMENTS 4 struct nvme_rdma_device { struct ib_device *dev; struct ib_pd *pd; struct kref ref; struct list_head entry; + unsigned int num_inline_segments; }; struct nvme_rdma_qe { @@ -117,6 +118,7 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; + bool use_inline_data; }; static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) @@ -249,7 +251,7 @@ static int nvme_rdma_create_qp(struct nvme_rdma_queue *queue, const int factor) /* +1 for drain */ init_attr.cap.max_recv_wr = queue->queue_size + 1; init_attr.cap.max_recv_sge = 1; - init_attr.cap.max_send_sge = 1 + NVME_RDMA_MAX_INLINE_SEGMENTS; + init_attr.cap.max_send_sge = 1 + dev->num_inline_segments; init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; init_attr.qp_type = IB_QPT_RC; init_attr.send_cq = queue->ib_cq; @@ -286,6 +288,7 @@ static int nvme_rdma_init_request(struct blk_mq_tag_set *set, struct ib_device *ibdev = dev->dev; int ret; + nvme_req(rq)->ctrl = &ctrl->ctrl; ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), DMA_TO_DEVICE); if (ret) @@ -374,6 +377,8 @@ nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) goto out_free_pd; } + ndev->num_inline_segments = min(NVME_RDMA_MAX_INLINE_SEGMENTS, + ndev->dev->attrs.max_sge - 1); list_add(&ndev->entry, &device_list); out_unlock: mutex_unlock(&device_list_mutex); @@ -868,6 +873,31 @@ out_free_io_queues: return ret; } +static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, + bool remove) +{ + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_stop_queue(&ctrl->queues[0]); + blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, + &ctrl->ctrl); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_destroy_admin_queue(ctrl, remove); +} + +static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, + bool remove) +{ + if (ctrl->ctrl.queue_count > 1) { + nvme_stop_queues(&ctrl->ctrl); + nvme_rdma_stop_io_queues(ctrl); + blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, + &ctrl->ctrl); + if (remove) + nvme_start_queues(&ctrl->ctrl); + nvme_rdma_destroy_io_queues(ctrl, remove); + } +} + static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) { struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); @@ -912,21 +942,44 @@ static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) } } -static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) +static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) { - struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), - struct nvme_rdma_ctrl, reconnect_work); + int ret = -EINVAL; bool changed; - int ret; - ++ctrl->ctrl.nr_reconnects; - - ret = nvme_rdma_configure_admin_queue(ctrl, false); + ret = nvme_rdma_configure_admin_queue(ctrl, new); if (ret) - goto requeue; + return ret; + + if (ctrl->ctrl.icdoff) { + dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); + goto destroy_admin; + } + + if (!(ctrl->ctrl.sgls & (1 << 2))) { + dev_err(ctrl->ctrl.device, + "Mandatory keyed sgls are not supported!\n"); + goto destroy_admin; + } + + if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { + dev_warn(ctrl->ctrl.device, + "queue_size %zu > ctrl sqsize %u, clamping down\n", + ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); + } + + if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { + dev_warn(ctrl->ctrl.device, + "sqsize %u > ctrl maxcmd %u, clamping down\n", + ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); + ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; + } + + if (ctrl->ctrl.sgls & (1 << 20)) + ctrl->use_inline_data = true; if (ctrl->ctrl.queue_count > 1) { - ret = nvme_rdma_configure_io_queues(ctrl, false); + ret = nvme_rdma_configure_io_queues(ctrl, new); if (ret) goto destroy_admin; } @@ -935,10 +988,31 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) if (!changed) { /* state change failure is ok if we're in DELETING state */ WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); - return; + ret = -EINVAL; + goto destroy_io; } nvme_start_ctrl(&ctrl->ctrl); + return 0; + +destroy_io: + if (ctrl->ctrl.queue_count > 1) + nvme_rdma_destroy_io_queues(ctrl, new); +destroy_admin: + nvme_rdma_stop_queue(&ctrl->queues[0]); + nvme_rdma_destroy_admin_queue(ctrl, new); + return ret; +} + +static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) +{ + struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), + struct nvme_rdma_ctrl, reconnect_work); + + ++ctrl->ctrl.nr_reconnects; + + if (nvme_rdma_setup_ctrl(ctrl, false)) + goto requeue; dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", ctrl->ctrl.nr_reconnects); @@ -947,9 +1021,6 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) return; -destroy_admin: - nvme_rdma_stop_queue(&ctrl->queues[0]); - nvme_rdma_destroy_admin_queue(ctrl, false); requeue: dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", ctrl->ctrl.nr_reconnects); @@ -962,27 +1033,9 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); - - if (ctrl->ctrl.queue_count > 1) { - nvme_stop_queues(&ctrl->ctrl); - nvme_rdma_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(&ctrl->tag_set, - nvme_cancel_request, &ctrl->ctrl); - nvme_rdma_destroy_io_queues(ctrl, false); - } - - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); - nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, - nvme_cancel_request, &ctrl->ctrl); - nvme_rdma_destroy_admin_queue(ctrl, false); - - /* - * queues are not a live anymore, so restart the queues to fail fast - * new IO - */ - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); + nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); + nvme_rdma_teardown_admin_queue(ctrl, false); if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { /* state change failure is ok if we're in DELETING state */ @@ -1090,19 +1143,27 @@ static int nvme_rdma_set_sg_null(struct nvme_command *c) } static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, - struct nvme_rdma_request *req, struct nvme_command *c) + struct nvme_rdma_request *req, struct nvme_command *c, + int count) { struct nvme_sgl_desc *sg = &c->common.dptr.sgl; + struct scatterlist *sgl = req->sg_table.sgl; + struct ib_sge *sge = &req->sge[1]; + u32 len = 0; + int i; - req->sge[1].addr = sg_dma_address(req->sg_table.sgl); - req->sge[1].length = sg_dma_len(req->sg_table.sgl); - req->sge[1].lkey = queue->device->pd->local_dma_lkey; + for (i = 0; i < count; i++, sgl++, sge++) { + sge->addr = sg_dma_address(sgl); + sge->length = sg_dma_len(sgl); + sge->lkey = queue->device->pd->local_dma_lkey; + len += sge->length; + } sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); - sg->length = cpu_to_le32(sg_dma_len(req->sg_table.sgl)); + sg->length = cpu_to_le32(len); sg->type = (NVME_SGL_FMT_DATA_DESC << 4) | NVME_SGL_FMT_OFFSET; - req->num_sge++; + req->num_sge += count; return 0; } @@ -1195,15 +1256,16 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, goto out_free_table; } - if (count == 1) { + if (count <= dev->num_inline_segments) { if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) && + queue->ctrl->use_inline_data && blk_rq_payload_bytes(rq) <= nvme_rdma_inline_data_size(queue)) { - ret = nvme_rdma_map_sg_inline(queue, req, c); + ret = nvme_rdma_map_sg_inline(queue, req, c, count); goto out; } - if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { + if (count == 1 && dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY) { ret = nvme_rdma_map_sg_single(queue, req, c); goto out; } @@ -1574,6 +1636,7 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_CONNECT_ERROR: case RDMA_CM_EVENT_UNREACHABLE: nvme_rdma_destroy_queue_ib(queue); + /* fall through */ case RDMA_CM_EVENT_ADDR_ERROR: dev_dbg(queue->ctrl->ctrl.device, "CM error event %d\n", ev->event); @@ -1736,25 +1799,12 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { - if (ctrl->ctrl.queue_count > 1) { - nvme_stop_queues(&ctrl->ctrl); - nvme_rdma_stop_io_queues(ctrl); - blk_mq_tagset_busy_iter(&ctrl->tag_set, - nvme_cancel_request, &ctrl->ctrl); - nvme_rdma_destroy_io_queues(ctrl, shutdown); - } - + nvme_rdma_teardown_io_queues(ctrl, shutdown); if (shutdown) nvme_shutdown_ctrl(&ctrl->ctrl); else nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); - - blk_mq_quiesce_queue(ctrl->ctrl.admin_q); - nvme_rdma_stop_queue(&ctrl->queues[0]); - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, - nvme_cancel_request, &ctrl->ctrl); - blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); - nvme_rdma_destroy_admin_queue(ctrl, shutdown); + nvme_rdma_teardown_admin_queue(ctrl, shutdown); } static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) @@ -1766,8 +1816,6 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) { struct nvme_rdma_ctrl *ctrl = container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); - int ret; - bool changed; nvme_stop_ctrl(&ctrl->ctrl); nvme_rdma_shutdown_ctrl(ctrl, false); @@ -1778,25 +1826,9 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) return; } - ret = nvme_rdma_configure_admin_queue(ctrl, false); - if (ret) + if (nvme_rdma_setup_ctrl(ctrl, false)) goto out_fail; - if (ctrl->ctrl.queue_count > 1) { - ret = nvme_rdma_configure_io_queues(ctrl, false); - if (ret) - goto out_fail; - } - - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - if (!changed) { - /* state change failure is ok if we're in DELETING state */ - WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING); - return; - } - - nvme_start_ctrl(&ctrl->ctrl); - return; out_fail: @@ -1959,49 +1991,10 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); WARN_ON_ONCE(!changed); - ret = nvme_rdma_configure_admin_queue(ctrl, true); + ret = nvme_rdma_setup_ctrl(ctrl, true); if (ret) goto out_uninit_ctrl; - /* sanity check icdoff */ - if (ctrl->ctrl.icdoff) { - dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); - ret = -EINVAL; - goto out_remove_admin_queue; - } - - /* sanity check keyed sgls */ - if (!(ctrl->ctrl.sgls & (1 << 2))) { - dev_err(ctrl->ctrl.device, - "Mandatory keyed sgls are not supported!\n"); - ret = -EINVAL; - goto out_remove_admin_queue; - } - - /* only warn if argument is too large here, will clamp later */ - if (opts->queue_size > ctrl->ctrl.sqsize + 1) { - dev_warn(ctrl->ctrl.device, - "queue_size %zu > ctrl sqsize %u, clamping down\n", - opts->queue_size, ctrl->ctrl.sqsize + 1); - } - - /* warn if maxcmd is lower than sqsize+1 */ - if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { - dev_warn(ctrl->ctrl.device, - "sqsize %u > ctrl maxcmd %u, clamping down\n", - ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); - ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; - } - - if (opts->nr_io_queues) { - ret = nvme_rdma_configure_io_queues(ctrl, true); - if (ret) - goto out_remove_admin_queue; - } - - changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); - WARN_ON_ONCE(!changed); - dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", ctrl->ctrl.opts->subsysnqn, &ctrl->addr); @@ -2011,13 +2004,8 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); - nvme_start_ctrl(&ctrl->ctrl); - return &ctrl->ctrl; -out_remove_admin_queue: - nvme_rdma_stop_queue(&ctrl->queues[0]); - nvme_rdma_destroy_admin_queue(ctrl, true); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c index 41944bbef835..25b0e310f4a8 100644 --- a/drivers/nvme/host/trace.c +++ b/drivers/nvme/host/trace.c @@ -128,3 +128,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, return nvme_trace_common(p, cdw10); } } + +const char *nvme_trace_disk_name(struct trace_seq *p, char *name) +{ + const char *ret = trace_seq_buffer_ptr(p); + + if (*name) + trace_seq_printf(p, "disk=%s, ", name); + trace_seq_putc(p, 0); + + return ret; +} diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h index 01390f0e1671..a490790d6691 100644 --- a/drivers/nvme/host/trace.h +++ b/drivers/nvme/host/trace.h @@ -50,13 +50,8 @@ nvme_admin_opcode_name(nvme_admin_security_recv), \ nvme_admin_opcode_name(nvme_admin_sanitize_nvm)) -const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, - u8 *cdw10); -#define __parse_nvme_admin_cmd(opcode, cdw10) \ - nvme_trace_parse_admin_cmd(p, opcode, cdw10) - #define nvme_opcode_name(opcode) { opcode, #opcode } -#define show_opcode_name(val) \ +#define show_nvm_opcode_name(val) \ __print_symbolic(val, \ nvme_opcode_name(nvme_cmd_flush), \ nvme_opcode_name(nvme_cmd_write), \ @@ -70,85 +65,92 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, nvme_opcode_name(nvme_cmd_resv_acquire), \ nvme_opcode_name(nvme_cmd_resv_release)) -const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, - u8 *cdw10); -#define __parse_nvme_cmd(opcode, cdw10) \ - nvme_trace_parse_nvm_cmd(p, opcode, cdw10) - -TRACE_EVENT(nvme_setup_admin_cmd, - TP_PROTO(struct nvme_command *cmd), - TP_ARGS(cmd), - TP_STRUCT__entry( - __field(u8, opcode) - __field(u8, flags) - __field(u16, cid) - __field(u64, metadata) - __array(u8, cdw10, 24) - ), - TP_fast_assign( - __entry->opcode = cmd->common.opcode; - __entry->flags = cmd->common.flags; - __entry->cid = cmd->common.command_id; - __entry->metadata = le64_to_cpu(cmd->common.metadata); - memcpy(__entry->cdw10, cmd->common.cdw10, - sizeof(__entry->cdw10)); - ), - TP_printk(" cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", - __entry->cid, __entry->flags, __entry->metadata, - show_admin_opcode_name(__entry->opcode), - __parse_nvme_admin_cmd(__entry->opcode, __entry->cdw10)) -); - +#define show_opcode_name(qid, opcode) \ + (qid ? show_nvm_opcode_name(opcode) : show_admin_opcode_name(opcode)) -TRACE_EVENT(nvme_setup_nvm_cmd, - TP_PROTO(int qid, struct nvme_command *cmd), - TP_ARGS(qid, cmd), +const char *nvme_trace_parse_admin_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); +const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p, u8 opcode, + u8 *cdw10); + +#define parse_nvme_cmd(qid, opcode, cdw10) \ + (qid ? \ + nvme_trace_parse_nvm_cmd(p, opcode, cdw10) : \ + nvme_trace_parse_admin_cmd(p, opcode, cdw10)) + +const char *nvme_trace_disk_name(struct trace_seq *p, char *name); +#define __print_disk_name(name) \ + nvme_trace_disk_name(p, name) + +#ifndef TRACE_HEADER_MULTI_READ +static inline void __assign_disk_name(char *name, struct gendisk *disk) +{ + if (disk) + memcpy(name, disk->disk_name, DISK_NAME_LEN); + else + memset(name, 0, DISK_NAME_LEN); +} +#endif + +TRACE_EVENT(nvme_setup_cmd, + TP_PROTO(struct request *req, struct nvme_command *cmd), + TP_ARGS(req, cmd), TP_STRUCT__entry( - __field(int, qid) - __field(u8, opcode) - __field(u8, flags) - __field(u16, cid) - __field(u32, nsid) - __field(u64, metadata) - __array(u8, cdw10, 24) + __array(char, disk, DISK_NAME_LEN) + __field(int, ctrl_id) + __field(int, qid) + __field(u8, opcode) + __field(u8, flags) + __field(u16, cid) + __field(u32, nsid) + __field(u64, metadata) + __array(u8, cdw10, 24) ), TP_fast_assign( - __entry->qid = qid; - __entry->opcode = cmd->common.opcode; - __entry->flags = cmd->common.flags; - __entry->cid = cmd->common.command_id; - __entry->nsid = le32_to_cpu(cmd->common.nsid); - __entry->metadata = le64_to_cpu(cmd->common.metadata); - memcpy(__entry->cdw10, cmd->common.cdw10, - sizeof(__entry->cdw10)); + __entry->ctrl_id = nvme_req(req)->ctrl->instance; + __entry->qid = nvme_req_qid(req); + __entry->opcode = cmd->common.opcode; + __entry->flags = cmd->common.flags; + __entry->cid = cmd->common.command_id; + __entry->nsid = le32_to_cpu(cmd->common.nsid); + __entry->metadata = le64_to_cpu(cmd->common.metadata); + __assign_disk_name(__entry->disk, req->rq_disk); + memcpy(__entry->cdw10, cmd->common.cdw10, + sizeof(__entry->cdw10)); ), - TP_printk("qid=%d, nsid=%u, cmdid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", - __entry->qid, __entry->nsid, __entry->cid, + TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", + __entry->ctrl_id, __print_disk_name(__entry->disk), + __entry->qid, __entry->cid, __entry->nsid, __entry->flags, __entry->metadata, - show_opcode_name(__entry->opcode), - __parse_nvme_cmd(__entry->opcode, __entry->cdw10)) + show_opcode_name(__entry->qid, __entry->opcode), + parse_nvme_cmd(__entry->qid, __entry->opcode, __entry->cdw10)) ); TRACE_EVENT(nvme_complete_rq, TP_PROTO(struct request *req), TP_ARGS(req), TP_STRUCT__entry( - __field(int, qid) - __field(int, cid) - __field(u64, result) - __field(u8, retries) - __field(u8, flags) - __field(u16, status) + __array(char, disk, DISK_NAME_LEN) + __field(int, ctrl_id) + __field(int, qid) + __field(int, cid) + __field(u64, result) + __field(u8, retries) + __field(u8, flags) + __field(u16, status) ), TP_fast_assign( - __entry->qid = req->q->id; - __entry->cid = req->tag; - __entry->result = le64_to_cpu(nvme_req(req)->result.u64); - __entry->retries = nvme_req(req)->retries; - __entry->flags = nvme_req(req)->flags; - __entry->status = nvme_req(req)->status; + __entry->ctrl_id = nvme_req(req)->ctrl->instance; + __entry->qid = nvme_req_qid(req); + __entry->cid = req->tag; + __entry->result = le64_to_cpu(nvme_req(req)->result.u64); + __entry->retries = nvme_req(req)->retries; + __entry->flags = nvme_req(req)->flags; + __entry->status = nvme_req(req)->status; + __assign_disk_name(__entry->disk, req->rq_disk); ), - TP_printk("qid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u", + TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%llu, retries=%u, flags=0x%x, status=%u", + __entry->ctrl_id, __print_disk_name(__entry->disk), __entry->qid, __entry->cid, __entry->result, __entry->retries, __entry->flags, __entry->status) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 38803576d5e1..a21caea1e080 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -19,6 +19,19 @@ #include <asm/unaligned.h> #include "nvmet.h" +/* + * This helper allows us to clear the AEN based on the RAE bit, + * Please use this helper when processing the log pages which are + * associated with the AEN. + */ +static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit) +{ + int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15; + + if (!rae) + clear_bit(aen_bit, &req->sq->ctrl->aen_masked); +} + u32 nvmet_get_log_page_len(struct nvme_command *cmd) { u32 len = le16_to_cpu(cmd->get_log_page.numdu); @@ -128,6 +141,36 @@ out: nvmet_req_complete(req, status); } +static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) +{ + u16 status = NVME_SC_INTERNAL; + struct nvme_effects_log *log; + + log = kzalloc(sizeof(*log), GFP_KERNEL); + if (!log) + goto out; + + log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0); + log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0); + + log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0); + log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0); + + status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); + + kfree(log); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -146,12 +189,76 @@ static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) if (!status) status = nvmet_zero_sgl(req, len, req->data_len - len); ctrl->nr_changed_ns = 0; - clear_bit(NVME_AEN_CFG_NS_ATTR, &ctrl->aen_masked); + nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR); mutex_unlock(&ctrl->lock); out: nvmet_req_complete(req, status); } +static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, + struct nvme_ana_group_desc *desc) +{ + struct nvmet_ctrl *ctrl = req->sq->ctrl; + struct nvmet_ns *ns; + u32 count = 0; + + if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { + rcu_read_lock(); + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) + if (ns->anagrpid == grpid) + desc->nsids[count++] = cpu_to_le32(ns->nsid); + rcu_read_unlock(); + } + + desc->grpid = cpu_to_le32(grpid); + desc->nnsids = cpu_to_le32(count); + desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt); + desc->state = req->port->ana_state[grpid]; + memset(desc->rsvd17, 0, sizeof(desc->rsvd17)); + return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32); +} + +static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) +{ + struct nvme_ana_rsp_hdr hdr = { 0, }; + struct nvme_ana_group_desc *desc; + size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */ + size_t len; + u32 grpid; + u16 ngrps = 0; + u16 status; + + status = NVME_SC_INTERNAL; + desc = kmalloc(sizeof(struct nvme_ana_group_desc) + + NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL); + if (!desc) + goto out; + + down_read(&nvmet_ana_sem); + for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) { + if (!nvmet_ana_group_enabled[grpid]) + continue; + len = nvmet_format_ana_group(req, grpid, desc); + status = nvmet_copy_to_sgl(req, offset, desc, len); + if (status) + break; + offset += len; + ngrps++; + } + + hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); + hdr.ngrps = cpu_to_le16(ngrps); + nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE); + up_read(&nvmet_ana_sem); + + kfree(desc); + + /* copy the header last once we know the number of groups */ + status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); +out: + nvmet_req_complete(req, status); +} + static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; @@ -183,8 +290,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) * the safest is to leave it as zeroes. */ - /* we support multiple ports and multiples hosts: */ - id->cmic = (1 << 0) | (1 << 1); + /* we support multiple ports, multiples hosts and ANA: */ + id->cmic = (1 << 0) | (1 << 1) | (1 << 3); /* no limit on data transfer sizes for now */ id->mdts = 0; @@ -208,7 +315,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) /* first slot is read-only, only one slot supported */ id->frmw = (1 << 0) | (1 << 1); - id->lpa = (1 << 0) | (1 << 2); + id->lpa = (1 << 0) | (1 << 1) | (1 << 2); id->elpe = NVMET_ERROR_LOG_SLOTS - 1; id->npss = 0; @@ -222,6 +329,7 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->maxcmd = cpu_to_le16(NVMET_MAX_CMD); id->nn = cpu_to_le32(ctrl->subsys->max_nsid); + id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES); id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM | NVME_CTRL_ONCS_WRITE_ZEROES); @@ -238,19 +346,24 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->has_keyed_sgls) id->sgls |= cpu_to_le32(1 << 2); - if (ctrl->ops->sqe_inline_size) + if (req->port->inline_data_size) id->sgls |= cpu_to_le32(1 << 20); strcpy(id->subnqn, ctrl->subsys->subsysnqn); /* Max command capsule size is sqe + single page of in-capsule data */ id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) + - ctrl->ops->sqe_inline_size) / 16); + req->port->inline_data_size) / 16); /* Max response capsule size is cqe */ id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16); id->msdbd = ctrl->ops->msdbd; + id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4); + id->anatt = 10; /* random value */ + id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS); + id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS); + /* * Meh, we don't really support any power state. Fake up the same * values that qemu does. @@ -259,6 +372,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->psd[0].entry_lat = cpu_to_le32(0x10); id->psd[0].exit_lat = cpu_to_le32(0x4); + id->nwpc = 1 << 0; /* write protect and no write protect */ + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); kfree(id); @@ -292,8 +407,15 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) * nuse = ncap = nsze isn't always true, but we have no way to find * that out from the underlying device. */ - id->ncap = id->nuse = id->nsze = - cpu_to_le64(ns->size >> ns->blksize_shift); + id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift); + switch (req->port->ana_state[ns->anagrpid]) { + case NVME_ANA_INACCESSIBLE: + case NVME_ANA_PERSISTENT_LOSS: + break; + default: + id->nuse = id->nsze; + break; + } /* * We just provide a single LBA format that matches what the @@ -307,11 +429,14 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) * controllers, but also with any other user of the block device. */ id->nmic = (1 << 0); + id->anagrpid = cpu_to_le32(ns->anagrpid); - memcpy(&id->nguid, &ns->nguid, sizeof(uuid_le)); + memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid)); id->lbaf[0].ds = ns->blksize_shift; + if (ns->readonly) + id->nsattr |= (1 << 0); nvmet_put_namespace(ns); done: status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); @@ -424,6 +549,52 @@ static void nvmet_execute_abort(struct nvmet_req *req) nvmet_req_complete(req, 0); } +static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) +{ + u16 status; + + if (req->ns->file) + status = nvmet_file_flush(req); + else + status = nvmet_bdev_flush(req); + + if (status) + pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); + return status; +} + +static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) +{ + u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]); + struct nvmet_subsys *subsys = req->sq->ctrl->subsys; + u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE; + + req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); + if (unlikely(!req->ns)) + return status; + + mutex_lock(&subsys->lock); + switch (write_protect) { + case NVME_NS_WRITE_PROTECT: + req->ns->readonly = true; + status = nvmet_write_protect_flush_sync(req); + if (status) + req->ns->readonly = false; + break; + case NVME_NS_NO_WRITE_PROTECT: + req->ns->readonly = false; + status = 0; + break; + default: + break; + } + + if (!status) + nvmet_ns_changed(subsys, req->ns->nsid); + mutex_unlock(&subsys->lock); + return status; +} + static void nvmet_execute_set_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; @@ -454,6 +625,9 @@ static void nvmet_execute_set_features(struct nvmet_req *req) case NVME_FEAT_HOST_ID: status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR; break; + case NVME_FEAT_WRITE_PROTECT: + status = nvmet_set_feat_write_protect(req); + break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; @@ -462,6 +636,26 @@ static void nvmet_execute_set_features(struct nvmet_req *req) nvmet_req_complete(req, status); } +static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) +{ + struct nvmet_subsys *subsys = req->sq->ctrl->subsys; + u32 result; + + req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); + if (!req->ns) + return NVME_SC_INVALID_NS | NVME_SC_DNR; + + mutex_lock(&subsys->lock); + if (req->ns->readonly == true) + result = NVME_NS_WRITE_PROTECT; + else + result = NVME_NS_NO_WRITE_PROTECT; + nvmet_set_result(req, result); + mutex_unlock(&subsys->lock); + + return 0; +} + static void nvmet_execute_get_features(struct nvmet_req *req) { struct nvmet_subsys *subsys = req->sq->ctrl->subsys; @@ -513,6 +707,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, sizeof(req->sq->ctrl->hostid)); break; + case NVME_FEAT_WRITE_PROTECT: + status = nvmet_get_feat_write_protect(req); + break; default: status = NVME_SC_INVALID_FIELD | NVME_SC_DNR; break; @@ -586,6 +783,12 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req) case NVME_LOG_CHANGED_NS: req->execute = nvmet_execute_get_log_changed_ns; return 0; + case NVME_LOG_CMD_EFFECTS: + req->execute = nvmet_execute_get_log_cmd_effects_ns; + return 0; + case NVME_LOG_ANA: + req->execute = nvmet_execute_get_log_page_ana; + return 0; } break; case nvme_admin_identify: diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index ebea1373d1b7..b37a8e3e3f80 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -218,6 +218,35 @@ static ssize_t nvmet_addr_trsvcid_store(struct config_item *item, CONFIGFS_ATTR(nvmet_, addr_trsvcid); +static ssize_t nvmet_param_inline_data_size_show(struct config_item *item, + char *page) +{ + struct nvmet_port *port = to_nvmet_port(item); + + return snprintf(page, PAGE_SIZE, "%d\n", port->inline_data_size); +} + +static ssize_t nvmet_param_inline_data_size_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_port *port = to_nvmet_port(item); + int ret; + + if (port->enabled) { + pr_err("Cannot modify inline_data_size while port enabled\n"); + pr_err("Disable the port before modifying\n"); + return -EACCES; + } + ret = kstrtoint(page, 0, &port->inline_data_size); + if (ret) { + pr_err("Invalid value '%s' for inline_data_size\n", page); + return -EINVAL; + } + return count; +} + +CONFIGFS_ATTR(nvmet_, param_inline_data_size); + static ssize_t nvmet_addr_trtype_show(struct config_item *item, char *page) { @@ -387,6 +416,39 @@ out_unlock: CONFIGFS_ATTR(nvmet_ns_, device_nguid); +static ssize_t nvmet_ns_ana_grpid_show(struct config_item *item, char *page) +{ + return sprintf(page, "%u\n", to_nvmet_ns(item)->anagrpid); +} + +static ssize_t nvmet_ns_ana_grpid_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + u32 oldgrpid, newgrpid; + int ret; + + ret = kstrtou32(page, 0, &newgrpid); + if (ret) + return ret; + + if (newgrpid < 1 || newgrpid > NVMET_MAX_ANAGRPS) + return -EINVAL; + + down_write(&nvmet_ana_sem); + oldgrpid = ns->anagrpid; + nvmet_ana_group_enabled[newgrpid]++; + ns->anagrpid = newgrpid; + nvmet_ana_group_enabled[oldgrpid]--; + nvmet_ana_chgcnt++; + up_write(&nvmet_ana_sem); + + nvmet_send_ana_event(ns->subsys, NULL); + return count; +} + +CONFIGFS_ATTR(nvmet_ns_, ana_grpid); + static ssize_t nvmet_ns_enable_show(struct config_item *item, char *page) { return sprintf(page, "%d\n", to_nvmet_ns(item)->enabled); @@ -412,11 +474,41 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item, CONFIGFS_ATTR(nvmet_ns_, enable); +static ssize_t nvmet_ns_buffered_io_show(struct config_item *item, char *page) +{ + return sprintf(page, "%d\n", to_nvmet_ns(item)->buffered_io); +} + +static ssize_t nvmet_ns_buffered_io_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ns *ns = to_nvmet_ns(item); + bool val; + + if (strtobool(page, &val)) + return -EINVAL; + + mutex_lock(&ns->subsys->lock); + if (ns->enabled) { + pr_err("disable ns before setting buffered_io value.\n"); + mutex_unlock(&ns->subsys->lock); + return -EINVAL; + } + + ns->buffered_io = val; + mutex_unlock(&ns->subsys->lock); + return count; +} + +CONFIGFS_ATTR(nvmet_ns_, buffered_io); + static struct configfs_attribute *nvmet_ns_attrs[] = { &nvmet_ns_attr_device_path, &nvmet_ns_attr_device_nguid, &nvmet_ns_attr_device_uuid, + &nvmet_ns_attr_ana_grpid, &nvmet_ns_attr_enable, + &nvmet_ns_attr_buffered_io, NULL, }; @@ -863,6 +955,134 @@ static const struct config_item_type nvmet_referrals_type = { .ct_group_ops = &nvmet_referral_group_ops, }; +static struct { + enum nvme_ana_state state; + const char *name; +} nvmet_ana_state_names[] = { + { NVME_ANA_OPTIMIZED, "optimized" }, + { NVME_ANA_NONOPTIMIZED, "non-optimized" }, + { NVME_ANA_INACCESSIBLE, "inaccessible" }, + { NVME_ANA_PERSISTENT_LOSS, "persistent-loss" }, + { NVME_ANA_CHANGE, "change" }, +}; + +static ssize_t nvmet_ana_group_ana_state_show(struct config_item *item, + char *page) +{ + struct nvmet_ana_group *grp = to_ana_group(item); + enum nvme_ana_state state = grp->port->ana_state[grp->grpid]; + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { + if (state != nvmet_ana_state_names[i].state) + continue; + return sprintf(page, "%s\n", nvmet_ana_state_names[i].name); + } + + return sprintf(page, "\n"); +} + +static ssize_t nvmet_ana_group_ana_state_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_ana_group *grp = to_ana_group(item); + int i; + + for (i = 0; i < ARRAY_SIZE(nvmet_ana_state_names); i++) { + if (sysfs_streq(page, nvmet_ana_state_names[i].name)) + goto found; + } + + pr_err("Invalid value '%s' for ana_state\n", page); + return -EINVAL; + +found: + down_write(&nvmet_ana_sem); + grp->port->ana_state[grp->grpid] = nvmet_ana_state_names[i].state; + nvmet_ana_chgcnt++; + up_write(&nvmet_ana_sem); + + nvmet_port_send_ana_event(grp->port); + return count; +} + +CONFIGFS_ATTR(nvmet_ana_group_, ana_state); + +static struct configfs_attribute *nvmet_ana_group_attrs[] = { + &nvmet_ana_group_attr_ana_state, + NULL, +}; + +static void nvmet_ana_group_release(struct config_item *item) +{ + struct nvmet_ana_group *grp = to_ana_group(item); + + if (grp == &grp->port->ana_default_group) + return; + + down_write(&nvmet_ana_sem); + grp->port->ana_state[grp->grpid] = NVME_ANA_INACCESSIBLE; + nvmet_ana_group_enabled[grp->grpid]--; + up_write(&nvmet_ana_sem); + + nvmet_port_send_ana_event(grp->port); + kfree(grp); +} + +static struct configfs_item_operations nvmet_ana_group_item_ops = { + .release = nvmet_ana_group_release, +}; + +static const struct config_item_type nvmet_ana_group_type = { + .ct_item_ops = &nvmet_ana_group_item_ops, + .ct_attrs = nvmet_ana_group_attrs, + .ct_owner = THIS_MODULE, +}; + +static struct config_group *nvmet_ana_groups_make_group( + struct config_group *group, const char *name) +{ + struct nvmet_port *port = ana_groups_to_port(&group->cg_item); + struct nvmet_ana_group *grp; + u32 grpid; + int ret; + + ret = kstrtou32(name, 0, &grpid); + if (ret) + goto out; + + ret = -EINVAL; + if (grpid <= 1 || grpid > NVMET_MAX_ANAGRPS) + goto out; + + ret = -ENOMEM; + grp = kzalloc(sizeof(*grp), GFP_KERNEL); + if (!grp) + goto out; + grp->port = port; + grp->grpid = grpid; + + down_write(&nvmet_ana_sem); + nvmet_ana_group_enabled[grpid]++; + up_write(&nvmet_ana_sem); + + nvmet_port_send_ana_event(grp->port); + + config_group_init_type_name(&grp->group, name, &nvmet_ana_group_type); + return &grp->group; +out: + return ERR_PTR(ret); +} + +static struct configfs_group_operations nvmet_ana_groups_group_ops = { + .make_group = nvmet_ana_groups_make_group, +}; + +static const struct config_item_type nvmet_ana_groups_type = { + .ct_group_ops = &nvmet_ana_groups_group_ops, + .ct_owner = THIS_MODULE, +}; + /* * Ports definitions. */ @@ -870,6 +1090,7 @@ static void nvmet_port_release(struct config_item *item) { struct nvmet_port *port = to_nvmet_port(item); + kfree(port->ana_state); kfree(port); } @@ -879,6 +1100,7 @@ static struct configfs_attribute *nvmet_port_attrs[] = { &nvmet_attr_addr_traddr, &nvmet_attr_addr_trsvcid, &nvmet_attr_addr_trtype, + &nvmet_attr_param_inline_data_size, NULL, }; @@ -897,6 +1119,7 @@ static struct config_group *nvmet_ports_make(struct config_group *group, { struct nvmet_port *port; u16 portid; + u32 i; if (kstrtou16(name, 0, &portid)) return ERR_PTR(-EINVAL); @@ -905,9 +1128,24 @@ static struct config_group *nvmet_ports_make(struct config_group *group, if (!port) return ERR_PTR(-ENOMEM); + port->ana_state = kcalloc(NVMET_MAX_ANAGRPS + 1, + sizeof(*port->ana_state), GFP_KERNEL); + if (!port->ana_state) { + kfree(port); + return ERR_PTR(-ENOMEM); + } + + for (i = 1; i <= NVMET_MAX_ANAGRPS; i++) { + if (i == NVMET_DEFAULT_ANA_GRPID) + port->ana_state[1] = NVME_ANA_OPTIMIZED; + else + port->ana_state[i] = NVME_ANA_INACCESSIBLE; + } + INIT_LIST_HEAD(&port->entry); INIT_LIST_HEAD(&port->subsystems); INIT_LIST_HEAD(&port->referrals); + port->inline_data_size = -1; /* < 0 == let the transport choose */ port->disc_addr.portid = cpu_to_le16(portid); config_group_init_type_name(&port->group, name, &nvmet_port_type); @@ -920,6 +1158,18 @@ static struct config_group *nvmet_ports_make(struct config_group *group, "referrals", &nvmet_referrals_type); configfs_add_default_group(&port->referrals_group, &port->group); + config_group_init_type_name(&port->ana_groups_group, + "ana_groups", &nvmet_ana_groups_type); + configfs_add_default_group(&port->ana_groups_group, &port->group); + + port->ana_default_group.port = port; + port->ana_default_group.grpid = NVMET_DEFAULT_ANA_GRPID; + config_group_init_type_name(&port->ana_default_group.group, + __stringify(NVMET_DEFAULT_ANA_GRPID), + &nvmet_ana_group_type); + configfs_add_default_group(&port->ana_default_group.group, + &port->ana_groups_group); + return &port->group; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 9838103f2d62..ebf3e7a6c49e 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -18,6 +18,7 @@ #include "nvmet.h" +struct workqueue_struct *buffered_io_wq; static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX]; static DEFINE_IDA(cntlid_ida); @@ -39,6 +40,10 @@ static DEFINE_IDA(cntlid_ida); */ DECLARE_RWSEM(nvmet_config_sem); +u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; +u64 nvmet_ana_chgcnt; +DECLARE_RWSEM(nvmet_ana_sem); + static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port, const char *subsysnqn); @@ -175,7 +180,7 @@ out_unlock: mutex_unlock(&ctrl->lock); } -static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) +void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) { struct nvmet_ctrl *ctrl; @@ -189,6 +194,33 @@ static void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid) } } +void nvmet_send_ana_event(struct nvmet_subsys *subsys, + struct nvmet_port *port) +{ + struct nvmet_ctrl *ctrl; + + mutex_lock(&subsys->lock); + list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { + if (port && ctrl->port != port) + continue; + if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE)) + continue; + nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, + NVME_AER_NOTICE_ANA, NVME_LOG_ANA); + } + mutex_unlock(&subsys->lock); +} + +void nvmet_port_send_ana_event(struct nvmet_port *port) +{ + struct nvmet_subsys_link *p; + + down_read(&nvmet_config_sem); + list_for_each_entry(p, &port->subsystems, entry) + nvmet_send_ana_event(p->subsys, port); + up_read(&nvmet_config_sem); +} + int nvmet_register_transport(const struct nvmet_fabrics_ops *ops) { int ret = 0; @@ -241,6 +273,10 @@ int nvmet_enable_port(struct nvmet_port *port) return ret; } + /* If the transport didn't set inline_data_size, then disable it. */ + if (port->inline_data_size < 0) + port->inline_data_size = 0; + port->enabled = true; return 0; } @@ -332,9 +368,13 @@ static void nvmet_ns_dev_disable(struct nvmet_ns *ns) int nvmet_ns_enable(struct nvmet_ns *ns) { struct nvmet_subsys *subsys = ns->subsys; - int ret = 0; + int ret; mutex_lock(&subsys->lock); + ret = -EMFILE; + if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES) + goto out_unlock; + ret = 0; if (ns->enabled) goto out_unlock; @@ -369,6 +409,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns) list_add_tail_rcu(&ns->dev_link, &old->dev_link); } + subsys->nr_namespaces++; nvmet_ns_changed(subsys, ns->nsid); ns->enabled = true; @@ -409,6 +450,7 @@ void nvmet_ns_disable(struct nvmet_ns *ns) percpu_ref_exit(&ns->ref); mutex_lock(&subsys->lock); + subsys->nr_namespaces--; nvmet_ns_changed(subsys, ns->nsid); nvmet_ns_dev_disable(ns); out_unlock: @@ -419,6 +461,10 @@ void nvmet_ns_free(struct nvmet_ns *ns) { nvmet_ns_disable(ns); + down_write(&nvmet_ana_sem); + nvmet_ana_group_enabled[ns->anagrpid]--; + up_write(&nvmet_ana_sem); + kfree(ns->device_path); kfree(ns); } @@ -436,7 +482,14 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) ns->nsid = nsid; ns->subsys = subsys; + + down_write(&nvmet_ana_sem); + ns->anagrpid = NVMET_DEFAULT_ANA_GRPID; + nvmet_ana_group_enabled[ns->anagrpid]++; + up_write(&nvmet_ana_sem); + uuid_gen(&ns->uuid); + ns->buffered_io = false; return ns; } @@ -542,6 +595,35 @@ int nvmet_sq_init(struct nvmet_sq *sq) } EXPORT_SYMBOL_GPL(nvmet_sq_init); +static inline u16 nvmet_check_ana_state(struct nvmet_port *port, + struct nvmet_ns *ns) +{ + enum nvme_ana_state state = port->ana_state[ns->anagrpid]; + + if (unlikely(state == NVME_ANA_INACCESSIBLE)) + return NVME_SC_ANA_INACCESSIBLE; + if (unlikely(state == NVME_ANA_PERSISTENT_LOSS)) + return NVME_SC_ANA_PERSISTENT_LOSS; + if (unlikely(state == NVME_ANA_CHANGE)) + return NVME_SC_ANA_TRANSITION; + return 0; +} + +static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req) +{ + if (unlikely(req->ns->readonly)) { + switch (req->cmd->common.opcode) { + case nvme_cmd_read: + case nvme_cmd_flush: + break; + default: + return NVME_SC_NS_WRITE_PROTECTED; + } + } + + return 0; +} + static u16 nvmet_parse_io_cmd(struct nvmet_req *req) { struct nvme_command *cmd = req->cmd; @@ -554,6 +636,12 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req) req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); if (unlikely(!req->ns)) return NVME_SC_INVALID_NS | NVME_SC_DNR; + ret = nvmet_check_ana_state(req->port, req->ns); + if (unlikely(ret)) + return ret; + ret = nvmet_io_cmd_check_access(req); + if (unlikely(ret)) + return ret; if (req->ns->file) return nvmet_file_parse_io_cmd(req); @@ -870,6 +958,8 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, nvmet_init_cap(ctrl); + ctrl->port = req->port; + INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); INIT_LIST_HEAD(&ctrl->async_events); @@ -1109,6 +1199,15 @@ static int __init nvmet_init(void) { int error; + nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1; + + buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq", + WQ_MEM_RECLAIM, 0); + if (!buffered_io_wq) { + error = -ENOMEM; + goto out; + } + error = nvmet_init_discovery(); if (error) goto out; @@ -1129,6 +1228,7 @@ static void __exit nvmet_exit(void) nvmet_exit_configfs(); nvmet_exit_discovery(); ida_destroy(&cntlid_ida); + destroy_workqueue(buffered_io_wq); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024); BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024); diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c index 08656b849bd6..eae29f493a07 100644 --- a/drivers/nvme/target/discovery.c +++ b/drivers/nvme/target/discovery.c @@ -171,7 +171,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req) id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */ if (ctrl->ops->has_keyed_sgls) id->sgls |= cpu_to_le32(1 << 2); - if (ctrl->ops->sqe_inline_size) + if (req->port->inline_data_size) id->sgls |= cpu_to_le32(1 << 20); strcpy(id->subnqn, ctrl->subsys->subsysnqn); diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index e0b0f7df70c2..7bc9f6240432 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -124,6 +124,13 @@ static void nvmet_bdev_execute_flush(struct nvmet_req *req) submit_bio(bio); } +u16 nvmet_bdev_flush(struct nvmet_req *req) +{ + if (blkdev_issue_flush(req->ns->bdev, GFP_KERNEL, NULL)) + return NVME_SC_INTERNAL | NVME_SC_DNR; + return 0; +} + static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns, struct nvme_dsm_range *range, struct bio **bio) { diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c index 8c42b3a8c420..81a9dc5290a8 100644 --- a/drivers/nvme/target/io-cmd-file.c +++ b/drivers/nvme/target/io-cmd-file.c @@ -16,6 +16,8 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns) { if (ns->file) { + if (ns->buffered_io) + flush_workqueue(buffered_io_wq); mempool_destroy(ns->bvec_pool); ns->bvec_pool = NULL; kmem_cache_destroy(ns->bvec_cache); @@ -27,11 +29,14 @@ void nvmet_file_ns_disable(struct nvmet_ns *ns) int nvmet_file_ns_enable(struct nvmet_ns *ns) { - int ret; + int flags = O_RDWR | O_LARGEFILE; struct kstat stat; + int ret; + + if (!ns->buffered_io) + flags |= O_DIRECT; - ns->file = filp_open(ns->device_path, - O_RDWR | O_LARGEFILE | O_DIRECT, 0); + ns->file = filp_open(ns->device_path, flags, 0); if (IS_ERR(ns->file)) { pr_err("failed to open file %s: (%ld)\n", ns->device_path, PTR_ERR(ns->file)); @@ -100,7 +105,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, iocb->ki_pos = pos; iocb->ki_filp = req->ns->file; - iocb->ki_flags = IOCB_DIRECT | ki_flags; + iocb->ki_flags = ki_flags | iocb_flags(req->ns->file); ret = call_iter(iocb, &iter); @@ -140,6 +145,12 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) return; } + pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; + if (unlikely(pos + req->data_len > req->ns->size)) { + nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); + return; + } + if (nr_bvec > NVMET_MAX_INLINE_BIOVEC) req->f.bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec), GFP_KERNEL); @@ -155,8 +166,6 @@ static void nvmet_file_execute_rw(struct nvmet_req *req) is_sync = true; } - pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift; - memset(&req->f.iocb, 0, sizeof(struct kiocb)); for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); @@ -189,14 +198,31 @@ out: nvmet_file_submit_bvec(req, pos, bv_cnt, total_len); } -static void nvmet_file_flush_work(struct work_struct *w) +static void nvmet_file_buffered_io_work(struct work_struct *w) { struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); - int ret; - ret = vfs_fsync(req->ns->file, 1); + nvmet_file_execute_rw(req); +} - nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); +static void nvmet_file_execute_rw_buffered_io(struct nvmet_req *req) +{ + INIT_WORK(&req->f.work, nvmet_file_buffered_io_work); + queue_work(buffered_io_wq, &req->f.work); +} + +u16 nvmet_file_flush(struct nvmet_req *req) +{ + if (vfs_fsync(req->ns->file, 1) < 0) + return NVME_SC_INTERNAL | NVME_SC_DNR; + return 0; +} + +static void nvmet_file_flush_work(struct work_struct *w) +{ + struct nvmet_req *req = container_of(w, struct nvmet_req, f.work); + + nvmet_req_complete(req, nvmet_file_flush(req)); } static void nvmet_file_execute_flush(struct nvmet_req *req) @@ -209,22 +235,30 @@ static void nvmet_file_execute_discard(struct nvmet_req *req) { int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE; struct nvme_dsm_range range; - loff_t offset; - loff_t len; - int i, ret; + loff_t offset, len; + u16 ret; + int i; for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) { - if (nvmet_copy_from_sgl(req, i * sizeof(range), &range, - sizeof(range))) + ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range, + sizeof(range)); + if (ret) break; + offset = le64_to_cpu(range.slba) << req->ns->blksize_shift; len = le32_to_cpu(range.nlb) << req->ns->blksize_shift; - ret = vfs_fallocate(req->ns->file, mode, offset, len); - if (ret) + if (offset + len > req->ns->size) { + ret = NVME_SC_LBA_RANGE | NVME_SC_DNR; break; + } + + if (vfs_fallocate(req->ns->file, mode, offset, len)) { + ret = NVME_SC_INTERNAL | NVME_SC_DNR; + break; + } } - nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); + nvmet_req_complete(req, ret); } static void nvmet_file_dsm_work(struct work_struct *w) @@ -263,6 +297,11 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w) len = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) << req->ns->blksize_shift); + if (unlikely(offset + len > req->ns->size)) { + nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR); + return; + } + ret = vfs_fallocate(req->ns->file, mode, offset, len); nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0); } @@ -280,7 +319,10 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req) switch (cmd->common.opcode) { case nvme_cmd_read: case nvme_cmd_write: - req->execute = nvmet_file_execute_rw; + if (req->ns->buffered_io) + req->execute = nvmet_file_execute_rw_buffered_io; + else + req->execute = nvmet_file_execute_rw; req->data_len = nvmet_rw_len(req); return 0; case nvme_cmd_flush: diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index ae7586b8be07..9908082b32c4 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -227,6 +227,7 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set, { struct nvme_loop_ctrl *ctrl = set->driver_data; + nvme_req(req)->ctrl = &ctrl->ctrl; return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req), (set == &ctrl->tag_set) ? hctx_idx + 1 : 0); } diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 480dfe10fad9..ec9af4ee03b6 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -30,12 +30,11 @@ #define NVMET_ASYNC_EVENTS 4 #define NVMET_ERROR_LOG_SLOTS 128 - /* * Supported optional AENs: */ #define NVMET_AEN_CFG_OPTIONAL \ - NVME_AEN_CFG_NS_ATTR + (NVME_AEN_CFG_NS_ATTR | NVME_AEN_CFG_ANA_CHANGE) /* * Plus mandatory SMART AENs (we'll never send them, but allow enabling them): @@ -59,12 +58,15 @@ struct nvmet_ns { struct percpu_ref ref; struct block_device *bdev; struct file *file; + bool readonly; u32 nsid; u32 blksize_shift; loff_t size; u8 nguid[16]; uuid_t uuid; + u32 anagrpid; + bool buffered_io; bool enabled; struct nvmet_subsys *subsys; const char *device_path; @@ -97,6 +99,18 @@ struct nvmet_sq { struct completion confirm_done; }; +struct nvmet_ana_group { + struct config_group group; + struct nvmet_port *port; + u32 grpid; +}; + +static inline struct nvmet_ana_group *to_ana_group(struct config_item *item) +{ + return container_of(to_config_group(item), struct nvmet_ana_group, + group); +} + /** * struct nvmet_port - Common structure to keep port * information for the target. @@ -114,8 +128,12 @@ struct nvmet_port { struct list_head subsystems; struct config_group referrals_group; struct list_head referrals; + struct config_group ana_groups_group; + struct nvmet_ana_group ana_default_group; + enum nvme_ana_state *ana_state; void *priv; bool enabled; + int inline_data_size; }; static inline struct nvmet_port *to_nvmet_port(struct config_item *item) @@ -124,6 +142,13 @@ static inline struct nvmet_port *to_nvmet_port(struct config_item *item) group); } +static inline struct nvmet_port *ana_groups_to_port( + struct config_item *item) +{ + return container_of(to_config_group(item), struct nvmet_port, + ana_groups_group); +} + struct nvmet_ctrl { struct nvmet_subsys *subsys; struct nvmet_cq **cqs; @@ -138,6 +163,8 @@ struct nvmet_ctrl { u16 cntlid; u32 kato; + struct nvmet_port *port; + u32 aen_enabled; unsigned long aen_masked; struct nvmet_req *async_event_cmds[NVMET_ASYNC_EVENTS]; @@ -166,6 +193,7 @@ struct nvmet_subsys { struct kref ref; struct list_head namespaces; + unsigned int nr_namespaces; unsigned int max_nsid; struct list_head ctrls; @@ -225,7 +253,6 @@ struct nvmet_req; struct nvmet_fabrics_ops { struct module *owner; unsigned int type; - unsigned int sqe_inline_size; unsigned int msdbd; bool has_keyed_sgls : 1; void (*queue_response)(struct nvmet_req *req); @@ -269,6 +296,8 @@ struct nvmet_req { const struct nvmet_fabrics_ops *ops; }; +extern struct workqueue_struct *buffered_io_wq; + static inline void nvmet_set_status(struct nvmet_req *req, u16 status) { req->rsp->status = cpu_to_le16(status << 1); @@ -337,6 +366,10 @@ void nvmet_ns_disable(struct nvmet_ns *ns); struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid); void nvmet_ns_free(struct nvmet_ns *ns); +void nvmet_send_ana_event(struct nvmet_subsys *subsys, + struct nvmet_port *port); +void nvmet_port_send_ana_event(struct nvmet_port *port); + int nvmet_register_transport(const struct nvmet_fabrics_ops *ops); void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops); @@ -357,6 +390,22 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd); #define NVMET_QUEUE_SIZE 1024 #define NVMET_NR_QUEUES 128 #define NVMET_MAX_CMD NVMET_QUEUE_SIZE + +/* + * Nice round number that makes a list of nsids fit into a page. + * Should become tunable at some point in the future. + */ +#define NVMET_MAX_NAMESPACES 1024 + +/* + * 0 is not a valid ANA group ID, so we start numbering at 1. + * + * ANA Group 1 exists without manual intervention, has namespaces assigned to it + * by default, and is available in an optimized state through all ports. + */ +#define NVMET_MAX_ANAGRPS 128 +#define NVMET_DEFAULT_ANA_GRPID 1 + #define NVMET_KAS 10 #define NVMET_DISC_KATO 120 @@ -370,6 +419,10 @@ extern struct nvmet_subsys *nvmet_disc_subsys; extern u64 nvmet_genctr; extern struct rw_semaphore nvmet_config_sem; +extern u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1]; +extern u64 nvmet_ana_chgcnt; +extern struct rw_semaphore nvmet_ana_sem; + bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys, const char *hostnqn); @@ -377,6 +430,9 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns); int nvmet_file_ns_enable(struct nvmet_ns *ns); void nvmet_bdev_ns_disable(struct nvmet_ns *ns); void nvmet_file_ns_disable(struct nvmet_ns *ns); +u16 nvmet_bdev_flush(struct nvmet_req *req); +u16 nvmet_file_flush(struct nvmet_req *req); +void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid); static inline u32 nvmet_rw_len(struct nvmet_req *req) { diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 52e0c5d579a7..e7f43d1e1779 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -33,16 +33,17 @@ #include "nvmet.h" /* - * We allow up to a page of inline data to go with the SQE + * We allow at least 1 page, up to 4 SGEs, and up to 16KB of inline data */ -#define NVMET_RDMA_INLINE_DATA_SIZE PAGE_SIZE +#define NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE PAGE_SIZE +#define NVMET_RDMA_MAX_INLINE_SGE 4 +#define NVMET_RDMA_MAX_INLINE_DATA_SIZE max_t(int, SZ_16K, PAGE_SIZE) struct nvmet_rdma_cmd { - struct ib_sge sge[2]; + struct ib_sge sge[NVMET_RDMA_MAX_INLINE_SGE + 1]; struct ib_cqe cqe; struct ib_recv_wr wr; - struct scatterlist inline_sg; - struct page *inline_page; + struct scatterlist inline_sg[NVMET_RDMA_MAX_INLINE_SGE]; struct nvme_command *nvme_cmd; struct nvmet_rdma_queue *queue; }; @@ -116,6 +117,8 @@ struct nvmet_rdma_device { size_t srq_size; struct kref ref; struct list_head entry; + int inline_data_size; + int inline_page_count; }; static bool nvmet_rdma_use_srq; @@ -138,6 +141,11 @@ static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue); static const struct nvmet_fabrics_ops nvmet_rdma_ops; +static int num_pages(int len) +{ + return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); +} + /* XXX: really should move to a generic header sooner or later.. */ static inline u32 get_unaligned_le24(const u8 *p) { @@ -184,6 +192,71 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp) spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); } +static void nvmet_rdma_free_inline_pages(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_cmd *c) +{ + struct scatterlist *sg; + struct ib_sge *sge; + int i; + + if (!ndev->inline_data_size) + return; + + sg = c->inline_sg; + sge = &c->sge[1]; + + for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { + if (sge->length) + ib_dma_unmap_page(ndev->device, sge->addr, + sge->length, DMA_FROM_DEVICE); + if (sg_page(sg)) + __free_page(sg_page(sg)); + } +} + +static int nvmet_rdma_alloc_inline_pages(struct nvmet_rdma_device *ndev, + struct nvmet_rdma_cmd *c) +{ + struct scatterlist *sg; + struct ib_sge *sge; + struct page *pg; + int len; + int i; + + if (!ndev->inline_data_size) + return 0; + + sg = c->inline_sg; + sg_init_table(sg, ndev->inline_page_count); + sge = &c->sge[1]; + len = ndev->inline_data_size; + + for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { + pg = alloc_page(GFP_KERNEL); + if (!pg) + goto out_err; + sg_assign_page(sg, pg); + sge->addr = ib_dma_map_page(ndev->device, + pg, 0, PAGE_SIZE, DMA_FROM_DEVICE); + if (ib_dma_mapping_error(ndev->device, sge->addr)) + goto out_err; + sge->length = min_t(int, len, PAGE_SIZE); + sge->lkey = ndev->pd->local_dma_lkey; + len -= sge->length; + } + + return 0; +out_err: + for (; i >= 0; i--, sg--, sge--) { + if (sge->length) + ib_dma_unmap_page(ndev->device, sge->addr, + sge->length, DMA_FROM_DEVICE); + if (sg_page(sg)) + __free_page(sg_page(sg)); + } + return -ENOMEM; +} + static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { @@ -200,33 +273,17 @@ static int nvmet_rdma_alloc_cmd(struct nvmet_rdma_device *ndev, c->sge[0].length = sizeof(*c->nvme_cmd); c->sge[0].lkey = ndev->pd->local_dma_lkey; - if (!admin) { - c->inline_page = alloc_pages(GFP_KERNEL, - get_order(NVMET_RDMA_INLINE_DATA_SIZE)); - if (!c->inline_page) - goto out_unmap_cmd; - c->sge[1].addr = ib_dma_map_page(ndev->device, - c->inline_page, 0, NVMET_RDMA_INLINE_DATA_SIZE, - DMA_FROM_DEVICE); - if (ib_dma_mapping_error(ndev->device, c->sge[1].addr)) - goto out_free_inline_page; - c->sge[1].length = NVMET_RDMA_INLINE_DATA_SIZE; - c->sge[1].lkey = ndev->pd->local_dma_lkey; - } + if (!admin && nvmet_rdma_alloc_inline_pages(ndev, c)) + goto out_unmap_cmd; c->cqe.done = nvmet_rdma_recv_done; c->wr.wr_cqe = &c->cqe; c->wr.sg_list = c->sge; - c->wr.num_sge = admin ? 1 : 2; + c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; return 0; -out_free_inline_page: - if (!admin) { - __free_pages(c->inline_page, - get_order(NVMET_RDMA_INLINE_DATA_SIZE)); - } out_unmap_cmd: ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); @@ -240,12 +297,8 @@ out: static void nvmet_rdma_free_cmd(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *c, bool admin) { - if (!admin) { - ib_dma_unmap_page(ndev->device, c->sge[1].addr, - NVMET_RDMA_INLINE_DATA_SIZE, DMA_FROM_DEVICE); - __free_pages(c->inline_page, - get_order(NVMET_RDMA_INLINE_DATA_SIZE)); - } + if (!admin) + nvmet_rdma_free_inline_pages(ndev, c); ib_dma_unmap_single(ndev->device, c->sge[0].addr, sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); kfree(c->nvme_cmd); @@ -383,14 +436,21 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, struct nvmet_rdma_cmd *cmd) { struct ib_recv_wr *bad_wr; + int ret; ib_dma_sync_single_for_device(ndev->device, cmd->sge[0].addr, cmd->sge[0].length, DMA_FROM_DEVICE); if (ndev->srq) - return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); - return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); + ret = ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); + else + ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); + + if (unlikely(ret)) + pr_err("post_recv cmd failed\n"); + + return ret; } static void nvmet_rdma_process_wr_wait_list(struct nvmet_rdma_queue *queue) @@ -429,7 +489,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp) rsp->req.sg_cnt, nvmet_data_dir(&rsp->req)); } - if (rsp->req.sg != &rsp->cmd->inline_sg) + if (rsp->req.sg != rsp->cmd->inline_sg) sgl_free(rsp->req.sg); if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) @@ -493,7 +553,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) rsp->send_sge.addr, rsp->send_sge.length, DMA_TO_DEVICE); - if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { + if (unlikely(ib_post_send(cm_id->qp, first_wr, &bad_wr))) { pr_err("sending cmd response failed\n"); nvmet_rdma_release_rsp(rsp); } @@ -529,10 +589,25 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) static void nvmet_rdma_use_inline_sg(struct nvmet_rdma_rsp *rsp, u32 len, u64 off) { - sg_init_table(&rsp->cmd->inline_sg, 1); - sg_set_page(&rsp->cmd->inline_sg, rsp->cmd->inline_page, len, off); - rsp->req.sg = &rsp->cmd->inline_sg; - rsp->req.sg_cnt = 1; + int sg_count = num_pages(len); + struct scatterlist *sg; + int i; + + sg = rsp->cmd->inline_sg; + for (i = 0; i < sg_count; i++, sg++) { + if (i < sg_count - 1) + sg_unmark_end(sg); + else + sg_mark_end(sg); + sg->offset = off; + sg->length = min_t(int, len, PAGE_SIZE - off); + len -= sg->length; + if (!i) + off = 0; + } + + rsp->req.sg = rsp->cmd->inline_sg; + rsp->req.sg_cnt = sg_count; } static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) @@ -544,7 +619,7 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp) if (!nvme_is_write(rsp->req.cmd)) return NVME_SC_INVALID_FIELD | NVME_SC_DNR; - if (off + len > NVMET_RDMA_INLINE_DATA_SIZE) { + if (off + len > rsp->queue->dev->inline_data_size) { pr_err("invalid inline data offset!\n"); return NVME_SC_SGL_INVALID_OFFSET | NVME_SC_DNR; } @@ -743,7 +818,7 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) srq_size = 4095; /* XXX: tune */ srq_attr.attr.max_wr = srq_size; - srq_attr.attr.max_sge = 2; + srq_attr.attr.max_sge = 1 + ndev->inline_page_count; srq_attr.attr.srq_limit = 0; srq_attr.srq_type = IB_SRQT_BASIC; srq = ib_create_srq(ndev->pd, &srq_attr); @@ -765,11 +840,16 @@ static int nvmet_rdma_init_srq(struct nvmet_rdma_device *ndev) ndev->srq = srq; ndev->srq_size = srq_size; - for (i = 0; i < srq_size; i++) - nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); + for (i = 0; i < srq_size; i++) { + ret = nvmet_rdma_post_recv(ndev, &ndev->srq_cmds[i]); + if (ret) + goto out_free_cmds; + } return 0; +out_free_cmds: + nvmet_rdma_free_cmds(ndev, ndev->srq_cmds, ndev->srq_size, false); out_destroy_srq: ib_destroy_srq(srq); return ret; @@ -793,7 +873,10 @@ static void nvmet_rdma_free_dev(struct kref *ref) static struct nvmet_rdma_device * nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) { + struct nvmet_port *port = cm_id->context; struct nvmet_rdma_device *ndev; + int inline_page_count; + int inline_sge_count; int ret; mutex_lock(&device_list_mutex); @@ -807,6 +890,18 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id) if (!ndev) goto out_err; + inline_page_count = num_pages(port->inline_data_size); + inline_sge_count = max(cm_id->device->attrs.max_sge_rd, + cm_id->device->attrs.max_sge) - 1; + if (inline_page_count > inline_sge_count) { + pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n", + port->inline_data_size, cm_id->device->name, + inline_sge_count * PAGE_SIZE); + port->inline_data_size = inline_sge_count * PAGE_SIZE; + inline_page_count = inline_sge_count; + } + ndev->inline_data_size = port->inline_data_size; + ndev->inline_page_count = inline_page_count; ndev->device = cm_id->device; kref_init(&ndev->ref); @@ -881,7 +976,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) } else { /* +1 for drain */ qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; - qp_attr.cap.max_recv_sge = 2; + qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; } ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); @@ -899,13 +994,17 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue) if (!ndev->srq) { for (i = 0; i < queue->recv_queue_size; i++) { queue->cmds[i].queue = queue; - nvmet_rdma_post_recv(ndev, &queue->cmds[i]); + ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); + if (ret) + goto err_destroy_qp; } } out: return ret; +err_destroy_qp: + rdma_destroy_qp(queue->cm_id); err_destroy_cq: ib_free_cq(queue->cq); goto out; @@ -1379,6 +1478,15 @@ static int nvmet_rdma_add_port(struct nvmet_port *port) return -EINVAL; } + if (port->inline_data_size < 0) { + port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; + } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { + pr_warn("inline_data_size %u is too large, reducing to %u\n", + port->inline_data_size, + NVMET_RDMA_MAX_INLINE_DATA_SIZE); + port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; + } + ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr, port->disc_addr.trsvcid, &addr); if (ret) { @@ -1456,7 +1564,6 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req, static const struct nvmet_fabrics_ops nvmet_rdma_ops = { .owner = THIS_MODULE, .type = NVMF_TRTYPE_RDMA, - .sqe_inline_size = NVMET_RDMA_INLINE_DATA_SIZE, .msdbd = 1, .has_keyed_sgls = 1, .add_port = nvmet_rdma_add_port, diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 80aca2456353..768953881c9e 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -21,6 +21,7 @@ CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS obj-$(CONFIG_PCMCIA) += pcmcia/ obj-$(CONFIG_SCSI) += scsi_mod.o +obj-$(CONFIG_BLK_SCSI_REQUEST) += scsi_common.o obj-$(CONFIG_RAID_ATTRS) += raid_class.o @@ -156,7 +157,6 @@ obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas/ obj-$(CONFIG_SCSI_DEBUG) += scsi_debug.o scsi_mod-y += scsi.o hosts.o scsi_ioctl.o \ scsicam.o scsi_error.o scsi_lib.o -scsi_mod-y += scsi_common.o scsi_mod-$(CONFIG_SCSI_CONSTANTS) += constants.o scsi_mod-$(CONFIG_SCSI_DMA) += scsi_lib_dma.o scsi_mod-y += scsi_scan.o scsi_sysfs.o scsi_devinfo.o diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index e489d89cbb45..379890c4500b 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -339,7 +339,6 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) struct scsi_sense_hdr sshdr; u8 *cmd_buf = NULL; u8 *scsi_cmd = NULL; - u8 *sense_buf = NULL; int rc = 0; int result = 0; int retry_cnt = 0; @@ -348,8 +347,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) retry: cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); - sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); - if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { + if (unlikely(!cmd_buf || !scsi_cmd)) { rc = -ENOMEM; goto out; } @@ -364,7 +362,7 @@ retry: /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, &sshdr, to, CMD_RETRIES, + CMD_BUFSIZE, NULL, &sshdr, to, CMD_RETRIES, 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); @@ -395,7 +393,6 @@ retry: if (retry_cnt++ < 1) { kfree(cmd_buf); kfree(scsi_cmd); - kfree(sense_buf); goto retry; } } @@ -426,7 +423,6 @@ retry: out: kfree(cmd_buf); kfree(scsi_cmd); - kfree(sense_buf); dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", __func__, gli->max_lba, gli->blk_len, rc); diff --git a/drivers/scsi/cxlflash/vlun.c b/drivers/scsi/cxlflash/vlun.c index 66e445a17d6c..2c904bf16b65 100644 --- a/drivers/scsi/cxlflash/vlun.c +++ b/drivers/scsi/cxlflash/vlun.c @@ -426,7 +426,6 @@ static int write_same16(struct scsi_device *sdev, { u8 *cmd_buf = NULL; u8 *scsi_cmd = NULL; - u8 *sense_buf = NULL; int rc = 0; int result = 0; u64 offset = lba; @@ -440,8 +439,7 @@ static int write_same16(struct scsi_device *sdev, cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); - sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); - if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { + if (unlikely(!cmd_buf || !scsi_cmd)) { rc = -ENOMEM; goto out; } @@ -457,7 +455,7 @@ static int write_same16(struct scsi_device *sdev, /* Drop the ioctl read semahpore across lengthy call */ up_read(&cfg->ioctl_rwsem); result = scsi_execute(sdev, scsi_cmd, DMA_TO_DEVICE, cmd_buf, - CMD_BUFSIZE, sense_buf, NULL, to, + CMD_BUFSIZE, NULL, NULL, to, CMD_RETRIES, 0, 0, NULL); down_read(&cfg->ioctl_rwsem); rc = check_state(cfg); @@ -482,7 +480,6 @@ static int write_same16(struct scsi_device *sdev, out: kfree(cmd_buf); kfree(scsi_cmd); - kfree(sense_buf); dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); return rc; } diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index b8d131a455d0..dd738ae5c75b 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -4568,7 +4568,7 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; mpi_request->CDB.EEDP32.PrimaryReferenceTag = - cpu_to_be32(scsi_prot_ref_tag(scmd)); + cpu_to_be32(t10_pi_ref_tag(scmd->request)); break; case SCSI_PROT_DIF_TYPE3: diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 41e9ac9fc138..9cb9a166fa0c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -238,7 +238,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) /** - * scsi_execute - insert request and wait for the result + * __scsi_execute - insert request and wait for the result * @sdev: scsi device * @cmd: scsi command * @data_direction: data direction @@ -255,7 +255,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * Returns the scsi_cmnd result field if a command was executed, or a negative * Linux error code if we didn't get that far. */ -int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, +int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, unsigned char *sense, struct scsi_sense_hdr *sshdr, int timeout, int retries, u64 flags, req_flags_t rq_flags, @@ -309,7 +309,7 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, return ret; } -EXPORT_SYMBOL(scsi_execute); +EXPORT_SYMBOL(__scsi_execute); /* * Function: scsi_init_cmd_errh() diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 9421d9877730..bbebdc3769b0 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1119,7 +1119,7 @@ static int sd_setup_read_write_cmnd(struct scsi_cmnd *SCpnt) SCpnt->cmnd[0] = WRITE_6; if (blk_integrity_rq(rq)) - sd_dif_prepare(SCpnt); + t10_pi_prepare(SCpnt->request, sdkp->protection_type); } else if (rq_data_dir(rq) == READ) { SCpnt->cmnd[0] = READ_6; @@ -2047,8 +2047,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) "sd_done: completed %d of %d bytes\n", good_bytes, scsi_bufflen(SCpnt))); - if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) - sd_dif_complete(SCpnt, good_bytes); + if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) && + good_bytes) + t10_pi_complete(SCpnt->request, sdkp->protection_type, + good_bytes / scsi_prot_interval(SCpnt)); return good_bytes; } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 392c7d078ae3..a7d4f50b67d4 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -254,21 +254,12 @@ static inline unsigned int sd_prot_flag_mask(unsigned int prot_op) #ifdef CONFIG_BLK_DEV_INTEGRITY extern void sd_dif_config_host(struct scsi_disk *); -extern void sd_dif_prepare(struct scsi_cmnd *scmd); -extern void sd_dif_complete(struct scsi_cmnd *, unsigned int); #else /* CONFIG_BLK_DEV_INTEGRITY */ static inline void sd_dif_config_host(struct scsi_disk *disk) { } -static inline int sd_dif_prepare(struct scsi_cmnd *scmd) -{ - return 0; -} -static inline void sd_dif_complete(struct scsi_cmnd *cmd, unsigned int a) -{ -} #endif /* CONFIG_BLK_DEV_INTEGRITY */ diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c index 9035380c0dda..db72c82486e3 100644 --- a/drivers/scsi/sd_dif.c +++ b/drivers/scsi/sd_dif.c @@ -95,116 +95,3 @@ out: blk_integrity_register(disk, &bi); } -/* - * The virtual start sector is the one that was originally submitted - * by the block layer. Due to partitioning, MD/DM cloning, etc. the - * actual physical start sector is likely to be different. Remap - * protection information to match the physical LBA. - * - * From a protocol perspective there's a slight difference between - * Type 1 and 2. The latter uses 32-byte CDBs exclusively, and the - * reference tag is seeded in the CDB. This gives us the potential to - * avoid virt->phys remapping during write. However, at read time we - * don't know whether the virt sector is the same as when we wrote it - * (we could be reading from real disk as opposed to MD/DM device. So - * we always remap Type 2 making it identical to Type 1. - * - * Type 3 does not have a reference tag so no remapping is required. - */ -void sd_dif_prepare(struct scsi_cmnd *scmd) -{ - const int tuple_sz = sizeof(struct t10_pi_tuple); - struct bio *bio; - struct scsi_disk *sdkp; - struct t10_pi_tuple *pi; - u32 phys, virt; - - sdkp = scsi_disk(scmd->request->rq_disk); - - if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION) - return; - - phys = scsi_prot_ref_tag(scmd); - - __rq_for_each_bio(bio, scmd->request) { - struct bio_integrity_payload *bip = bio_integrity(bio); - struct bio_vec iv; - struct bvec_iter iter; - unsigned int j; - - /* Already remapped? */ - if (bip->bip_flags & BIP_MAPPED_INTEGRITY) - break; - - virt = bip_get_seed(bip) & 0xffffffff; - - bip_for_each_vec(iv, bip, iter) { - pi = kmap_atomic(iv.bv_page) + iv.bv_offset; - - for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) { - - if (be32_to_cpu(pi->ref_tag) == virt) - pi->ref_tag = cpu_to_be32(phys); - - virt++; - phys++; - } - - kunmap_atomic(pi); - } - - bip->bip_flags |= BIP_MAPPED_INTEGRITY; - } -} - -/* - * Remap physical sector values in the reference tag to the virtual - * values expected by the block layer. - */ -void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes) -{ - const int tuple_sz = sizeof(struct t10_pi_tuple); - struct scsi_disk *sdkp; - struct bio *bio; - struct t10_pi_tuple *pi; - unsigned int j, intervals; - u32 phys, virt; - - sdkp = scsi_disk(scmd->request->rq_disk); - - if (sdkp->protection_type == T10_PI_TYPE3_PROTECTION || good_bytes == 0) - return; - - intervals = good_bytes / scsi_prot_interval(scmd); - phys = scsi_prot_ref_tag(scmd); - - __rq_for_each_bio(bio, scmd->request) { - struct bio_integrity_payload *bip = bio_integrity(bio); - struct bio_vec iv; - struct bvec_iter iter; - - virt = bip_get_seed(bip) & 0xffffffff; - - bip_for_each_vec(iv, bip, iter) { - pi = kmap_atomic(iv.bv_page) + iv.bv_offset; - - for (j = 0; j < iv.bv_len; j += tuple_sz, pi++) { - - if (intervals == 0) { - kunmap_atomic(pi); - return; - } - - if (be32_to_cpu(pi->ref_tag) == phys) - pi->ref_tag = cpu_to_be32(virt); - - virt++; - phys++; - intervals--; - } - - kunmap_atomic(pi); - } - } -} - diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c index 35fab1e18adc..ffcf902da390 100644 --- a/drivers/scsi/sr_ioctl.c +++ b/drivers/scsi/sr_ioctl.c @@ -186,14 +186,13 @@ static int sr_play_trkind(struct cdrom_device_info *cdi, int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) { struct scsi_device *SDev; - struct scsi_sense_hdr sshdr; + struct scsi_sense_hdr local_sshdr, *sshdr = &local_sshdr; int result, err = 0, retries = 0; - unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE], *senseptr = NULL; SDev = cd->device; - if (cgc->sense) - senseptr = sense_buffer; + if (cgc->sshdr) + sshdr = cgc->sshdr; retry: if (!scsi_block_when_processing_errors(SDev)) { @@ -202,15 +201,12 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) } result = scsi_execute(SDev, cgc->cmd, cgc->data_direction, - cgc->buffer, cgc->buflen, senseptr, &sshdr, + cgc->buffer, cgc->buflen, NULL, sshdr, cgc->timeout, IOCTL_RETRIES, 0, 0, NULL); - if (cgc->sense) - memcpy(cgc->sense, sense_buffer, sizeof(*cgc->sense)); - /* Minimal error checking. Ignore cases we know about, and report the rest. */ if (driver_byte(result) != 0) { - switch (sshdr.sense_key) { + switch (sshdr->sense_key) { case UNIT_ATTENTION: SDev->changed = 1; if (!cgc->quiet) @@ -221,8 +217,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) err = -ENOMEDIUM; break; case NOT_READY: /* This happens if there is no disc in drive */ - if (sshdr.asc == 0x04 && - sshdr.ascq == 0x01) { + if (sshdr->asc == 0x04 && + sshdr->ascq == 0x01) { /* sense: Logical unit is in process of becoming ready */ if (!cgc->quiet) sr_printk(KERN_INFO, cd, @@ -245,8 +241,8 @@ int sr_do_ioctl(Scsi_CD *cd, struct packet_command *cgc) break; case ILLEGAL_REQUEST: err = -EIO; - if (sshdr.asc == 0x20 && - sshdr.ascq == 0x00) + if (sshdr->asc == 0x20 && + sshdr->ascq == 0x00) /* sense: Invalid command operation code */ err = -EDRIVE_CANT_DO_THIS; break; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 6dc8891ccb74..1c72db94270e 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -513,12 +513,12 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, if (sc->sc_data_direction == DMA_TO_DEVICE) cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, - blk_rq_sectors(rq) * - bi->tuple_size); + bio_integrity_bytes(bi, + blk_rq_sectors(rq))); else if (sc->sc_data_direction == DMA_FROM_DEVICE) cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, - blk_rq_sectors(rq) * - bi->tuple_size); + bio_integrity_bytes(bi, + blk_rq_sectors(rq))); } #endif diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig index 4c44d7bed01a..cb6f32ce7de8 100644 --- a/drivers/target/Kconfig +++ b/drivers/target/Kconfig @@ -1,10 +1,10 @@ menuconfig TARGET_CORE tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure" - depends on SCSI && BLOCK + depends on BLOCK select CONFIGFS_FS select CRC_T10DIF - select BLK_SCSI_REQUEST # only for scsi_command_size_tbl.. + select BLK_SCSI_REQUEST select SGL_ALLOC default n help @@ -29,6 +29,7 @@ config TCM_FILEIO config TCM_PSCSI tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI" + depends on SCSI help Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered passthrough access to Linux/SCSI device diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig index abe8ecbcdf06..158ee9d522f7 100644 --- a/drivers/target/loopback/Kconfig +++ b/drivers/target/loopback/Kconfig @@ -1,5 +1,6 @@ config LOOPBACK_TARGET tristate "TCM Virtual SAS target and Linux/SCSI LDD fabric loopback module" + depends on SCSI help Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD fabric loopback module. |