diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2021-10-26 14:57:31 -0300 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2021-10-28 11:06:31 -0600 |
commit | 0972c7dddf716a781dbe9abf4d042264b679ab53 (patch) | |
tree | 94e4564bb3c225b9b166982c6156a3b00c97069f /drivers/s390 | |
parent | d0a9329d460cbc2f8150da520b1b75e397bbef9f (diff) |
vfio/ccw: Use functions for alloc/free of the vfio_ccw_private
Makes the code easier to understand what is memory lifecycle and what is
other stuff.
Reviewed-by: Eric Farman <farman@linux.ibm.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v4-cea4f5bd2c00+b52-ccw_mdev_jgg@nvidia.com
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/cio/vfio_ccw_drv.c | 113 |
1 files changed, 66 insertions, 47 deletions
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c index 371558ec9204..e32678a71644 100644 --- a/drivers/s390/cio/vfio_ccw_drv.c +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -137,77 +137,107 @@ static void vfio_ccw_sch_irq(struct subchannel *sch) vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); } -static void vfio_ccw_free_regions(struct vfio_ccw_private *private) +static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) { - if (private->crw_region) - kmem_cache_free(vfio_ccw_crw_region, private->crw_region); - if (private->schib_region) - kmem_cache_free(vfio_ccw_schib_region, private->schib_region); - if (private->cmd_region) - kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); - if (private->io_region) - kmem_cache_free(vfio_ccw_io_region, private->io_region); -} - -static int vfio_ccw_sch_probe(struct subchannel *sch) -{ - struct pmcw *pmcw = &sch->schib.pmcw; struct vfio_ccw_private *private; - int ret = -ENOMEM; - - if (pmcw->qf) { - dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", - dev_name(&sch->dev)); - return -ENODEV; - } private = kzalloc(sizeof(*private), GFP_KERNEL); if (!private) - return -ENOMEM; + return ERR_PTR(-ENOMEM); + + private->sch = sch; + mutex_init(&private->io_mutex); + private->state = VFIO_CCW_STATE_NOT_OPER; + INIT_LIST_HEAD(&private->crw); + INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); + INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); + atomic_set(&private->avail, 1); private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), GFP_KERNEL); if (!private->cp.guest_cp) - goto out_free; + goto out_free_private; private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, GFP_KERNEL | GFP_DMA); if (!private->io_region) - goto out_free; + goto out_free_cp; private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, GFP_KERNEL | GFP_DMA); if (!private->cmd_region) - goto out_free; + goto out_free_io; private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, GFP_KERNEL | GFP_DMA); if (!private->schib_region) - goto out_free; + goto out_free_cmd; private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, GFP_KERNEL | GFP_DMA); if (!private->crw_region) - goto out_free; + goto out_free_schib; + return private; + +out_free_schib: + kmem_cache_free(vfio_ccw_schib_region, private->schib_region); +out_free_cmd: + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); +out_free_io: + kmem_cache_free(vfio_ccw_io_region, private->io_region); +out_free_cp: + kfree(private->cp.guest_cp); +out_free_private: + mutex_destroy(&private->io_mutex); + kfree(private); + return ERR_PTR(-ENOMEM); +} + +static void vfio_ccw_free_private(struct vfio_ccw_private *private) +{ + struct vfio_ccw_crw *crw, *temp; + + list_for_each_entry_safe(crw, temp, &private->crw, next) { + list_del(&crw->next); + kfree(crw); + } + + kmem_cache_free(vfio_ccw_crw_region, private->crw_region); + kmem_cache_free(vfio_ccw_schib_region, private->schib_region); + kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); + kmem_cache_free(vfio_ccw_io_region, private->io_region); + kfree(private->cp.guest_cp); + mutex_destroy(&private->io_mutex); + kfree(private); +} + +static int vfio_ccw_sch_probe(struct subchannel *sch) +{ + struct pmcw *pmcw = &sch->schib.pmcw; + struct vfio_ccw_private *private; + int ret = -ENOMEM; + + if (pmcw->qf) { + dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", + dev_name(&sch->dev)); + return -ENODEV; + } + + private = vfio_ccw_alloc_private(sch); + if (IS_ERR(private)) + return PTR_ERR(private); - private->sch = sch; dev_set_drvdata(&sch->dev, private); - mutex_init(&private->io_mutex); spin_lock_irq(sch->lock); - private->state = VFIO_CCW_STATE_NOT_OPER; sch->isc = VFIO_CCW_ISC; ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); spin_unlock_irq(sch->lock); if (ret) goto out_free; - INIT_LIST_HEAD(&private->crw); - INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); - INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); - atomic_set(&private->avail, 1); private->state = VFIO_CCW_STATE_STANDBY; ret = vfio_ccw_mdev_reg(sch); @@ -228,31 +258,20 @@ out_disable: cio_disable_subchannel(sch); out_free: dev_set_drvdata(&sch->dev, NULL); - vfio_ccw_free_regions(private); - kfree(private->cp.guest_cp); - kfree(private); + vfio_ccw_free_private(private); return ret; } static void vfio_ccw_sch_remove(struct subchannel *sch) { struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); - struct vfio_ccw_crw *crw, *temp; vfio_ccw_sch_quiesce(sch); - - list_for_each_entry_safe(crw, temp, &private->crw, next) { - list_del(&crw->next); - kfree(crw); - } - vfio_ccw_mdev_unreg(sch); dev_set_drvdata(&sch->dev, NULL); - vfio_ccw_free_regions(private); - kfree(private->cp.guest_cp); - kfree(private); + vfio_ccw_free_private(private); VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", sch->schid.cssid, sch->schid.ssid, |