diff options
Diffstat (limited to 'drivers/s390')
26 files changed, 2478 insertions, 585 deletions
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 4a3b62326183..0acb8c2f9475 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -14,6 +14,7 @@ config BLK_DEV_XPRAM config DCSSBLK def_tristate m + select DAX prompt "DCSSBLK support" depends on S390 && BLOCK help diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index 774da20ceb58..107cd3361e29 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1052,8 +1052,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense) } else { /* fatal error - set status to FAILED internal error 09 - Command Reject */ - dev_err(&device->cdev->dev, "An error occurred in the DASD " - "device driver, reason=%s\n", "09"); + if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags)) + dev_err(&device->cdev->dev, + "An error occurred in the DASD device driver, reason=09\n"); erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED); } diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 0b38217f8147..122456e4db89 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -4927,10 +4927,14 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, dasd_eckd_dump_sense_tcw(device, req, irb); } else { /* - * In some cases the 'No Record Found' error might be expected - * and log messages shouldn't be written then. Check if the - * according suppress bit is set. + * In some cases the 'Command Reject' or 'No Record Found' + * error might be expected and log messages shouldn't be + * written then. Check if the according suppress bit is set. */ + if (sense && sense[0] & SNS0_CMD_REJECT && + test_bit(DASD_CQR_SUPPRESS_CR, &req->flags)) + return; + if (sense && sense[1] & SNS1_NO_REC_FOUND && test_bit(DASD_CQR_SUPPRESS_NRF, &req->flags)) return; @@ -5172,6 +5176,10 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, if (!device->block && private->lcu->pav == HYPER_PAV) return -EOPNOTSUPP; + /* may not be supported by the storage server */ + if (!(private->features.feature[14] & 0x80)) + return -EOPNOTSUPP; + cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, sizeof(struct dasd_psf_prssd_data) + 1, device); @@ -5219,6 +5227,8 @@ static int dasd_eckd_query_host_access(struct dasd_device *device, cqr->buildclk = get_tod_clock(); cqr->status = DASD_CQR_FILLED; + /* the command might not be supported, suppress error message */ + __set_bit(DASD_CQR_SUPPRESS_CR, &cqr->flags); rc = dasd_sleep_on_interruptible(cqr); if (rc == 0) { *data = *host_access; diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 518dba2732d5..dca7cb1e6f65 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -239,11 +239,11 @@ struct dasd_ccw_req { */ /* * The following flags are used to suppress output of certain errors. - * These flags should only be used for format checks! */ #define DASD_CQR_SUPPRESS_NRF 4 /* Suppress 'No Record Found' error */ #define DASD_CQR_SUPPRESS_FP 5 /* Suppress 'File Protected' error*/ #define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ +#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ /* Signature for error recovery functions. */ typedef struct dasd_ccw_req *(*dasd_erp_fn_t) (struct dasd_ccw_req *); diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 415d10a67b7a..36e5280af3e4 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -18,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/pfn_t.h> +#include <linux/dax.h> #include <asm/extmem.h> #include <asm/io.h> @@ -30,8 +31,8 @@ static int dcssblk_open(struct block_device *bdev, fmode_t mode); static void dcssblk_release(struct gendisk *disk, fmode_t mode); static blk_qc_t dcssblk_make_request(struct request_queue *q, struct bio *bio); -static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum, - void **kaddr, pfn_t *pfn, long size); +static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn); static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0"; @@ -40,7 +41,10 @@ static const struct block_device_operations dcssblk_devops = { .owner = THIS_MODULE, .open = dcssblk_open, .release = dcssblk_release, - .direct_access = dcssblk_direct_access, +}; + +static const struct dax_operations dcssblk_dax_ops = { + .direct_access = dcssblk_dax_direct_access, }; struct dcssblk_dev_info { @@ -57,6 +61,7 @@ struct dcssblk_dev_info { struct request_queue *dcssblk_queue; int num_of_segments; struct list_head seg_list; + struct dax_device *dax_dev; }; struct segment_info { @@ -389,6 +394,8 @@ removeseg: } list_del(&dev_info->lh); + kill_dax(dev_info->dax_dev); + put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; @@ -654,6 +661,13 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char if (rc) goto put_dev; + dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name, + &dcssblk_dax_ops); + if (!dev_info->dax_dev) { + rc = -ENOMEM; + goto put_dev; + } + get_device(&dev_info->dev); device_add_disk(&dev_info->dev, dev_info->gd); @@ -752,6 +766,8 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch } list_del(&dev_info->lh); + kill_dax(dev_info->dax_dev); + put_dax(dev_info->dax_dev); del_gendisk(dev_info->gd); blk_cleanup_queue(dev_info->dcssblk_queue); dev_info->gd->queue = NULL; @@ -883,21 +899,26 @@ fail: } static long -dcssblk_direct_access (struct block_device *bdev, sector_t secnum, - void **kaddr, pfn_t *pfn, long size) +__dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) { - struct dcssblk_dev_info *dev_info; - unsigned long offset, dev_sz; + resource_size_t offset = pgoff * PAGE_SIZE; + unsigned long dev_sz; - dev_info = bdev->bd_disk->private_data; - if (!dev_info) - return -ENODEV; dev_sz = dev_info->end - dev_info->start + 1; - offset = secnum * 512; *kaddr = (void *) dev_info->start + offset; *pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV); - return dev_sz - offset; + return (dev_sz - offset) / PAGE_SIZE; +} + +static long +dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, + long nr_pages, void **kaddr, pfn_t *pfn) +{ + struct dcssblk_dev_info *dev_info = dax_get_private(dax_dev); + + return __dcssblk_direct_access(dev_info, pgoff, nr_pages, kaddr, pfn); } static void diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile index 3ab9aedeb84a..bdf47526038a 100644 --- a/drivers/s390/cio/Makefile +++ b/drivers/s390/cio/Makefile @@ -17,3 +17,6 @@ obj-$(CONFIG_CCWGROUP) += ccwgroup.o qdio-objs := qdio_main.o qdio_thinint.o qdio_debug.o qdio_setup.o obj-$(CONFIG_QDIO) += qdio.o + +vfio_ccw-objs += vfio_ccw_drv.o vfio_ccw_cp.o vfio_ccw_ops.o vfio_ccw_fsm.o +obj-$(CONFIG_VFIO_CCW) += vfio_ccw.o diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 1b350665c823..89216174fcbb 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -170,12 +170,14 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ return ccode; } } +EXPORT_SYMBOL_GPL(cio_start_key); int cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) { return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); } +EXPORT_SYMBOL_GPL(cio_start); /* * resume suspended I/O operation @@ -208,6 +210,7 @@ cio_resume (struct subchannel *sch) return -ENODEV; } } +EXPORT_SYMBOL_GPL(cio_resume); /* * halt I/O operation @@ -241,6 +244,7 @@ cio_halt(struct subchannel *sch) return -ENODEV; } } +EXPORT_SYMBOL_GPL(cio_halt); /* * Clear I/O operation @@ -271,6 +275,7 @@ cio_clear(struct subchannel *sch) return -ENODEV; } } +EXPORT_SYMBOL_GPL(cio_clear); /* * Function: cio_cancel @@ -308,7 +313,68 @@ cio_cancel (struct subchannel *sch) return -ENODEV; } } +EXPORT_SYMBOL_GPL(cio_cancel); +/** + * cio_cancel_halt_clear - Cancel running I/O by performing cancel, halt + * and clear ordinally if subchannel is valid. + * @sch: subchannel on which to perform the cancel_halt_clear operation + * @iretry: the number of the times remained to retry the next operation + * + * This should be called repeatedly since halt/clear are asynchronous + * operations. We do one try with cio_cancel, three tries with cio_halt, + * 255 tries with cio_clear. The caller should initialize @iretry with + * the value 255 for its first call to this, and keep using the same + * @iretry in the subsequent calls until it gets a non -EBUSY return. + * + * Returns 0 if device now idle, -ENODEV for device not operational, + * -EBUSY if an interrupt is expected (either from halt/clear or from a + * status pending), and -EIO if out of retries. + */ +int cio_cancel_halt_clear(struct subchannel *sch, int *iretry) +{ + int ret; + + if (cio_update_schib(sch)) + return -ENODEV; + if (!sch->schib.pmcw.ena) + /* Not operational -> done. */ + return 0; + /* Stage 1: cancel io. */ + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && + !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { + if (!scsw_is_tm(&sch->schib.scsw)) { + ret = cio_cancel(sch); + if (ret != -EINVAL) + return ret; + } + /* + * Cancel io unsuccessful or not applicable (transport mode). + * Continue with asynchronous instructions. + */ + *iretry = 3; /* 3 halt retries. */ + } + /* Stage 2: halt io. */ + if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { + if (*iretry) { + *iretry -= 1; + ret = cio_halt(sch); + if (ret != -EBUSY) + return (ret == 0) ? -EBUSY : ret; + } + /* Halt io unsuccessful. */ + *iretry = 255; /* 255 clear retries. */ + } + /* Stage 3: clear io. */ + if (*iretry) { + *iretry -= 1; + ret = cio_clear(sch); + return (ret == 0) ? -EBUSY : ret; + } + /* Function was unsuccessful */ + return -EIO; +} +EXPORT_SYMBOL_GPL(cio_cancel_halt_clear); static void cio_apply_config(struct subchannel *sch, struct schib *schib) { @@ -382,6 +448,7 @@ int cio_commit_config(struct subchannel *sch) } return ret; } +EXPORT_SYMBOL_GPL(cio_commit_config); /** * cio_update_schib - Perform stsch and update schib if subchannel is valid. @@ -987,6 +1054,7 @@ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key) return cio_start_handle_notoper(sch, lpm); } } +EXPORT_SYMBOL_GPL(cio_tm_start_key); /** * cio_tm_intrg - perform interrogate function @@ -1012,3 +1080,4 @@ int cio_tm_intrg(struct subchannel *sch) return -ENODEV; } } +EXPORT_SYMBOL_GPL(cio_tm_intrg); diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index f0e57aefb5f2..939596d81b73 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h @@ -123,6 +123,7 @@ extern int cio_enable_subchannel(struct subchannel *, u32); extern int cio_disable_subchannel (struct subchannel *); extern int cio_cancel (struct subchannel *); extern int cio_clear (struct subchannel *); +extern int cio_cancel_halt_clear(struct subchannel *, int *); extern int cio_resume (struct subchannel *); extern int cio_halt (struct subchannel *); extern int cio_start (struct subchannel *, struct ccw1 *, __u8); diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 9afb5ce13007..12016e32e519 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c @@ -124,14 +124,6 @@ ccw_device_set_timeout(struct ccw_device *cdev, int expires) add_timer(&cdev->private->timer); } -/* - * Cancel running i/o. This is called repeatedly since halt/clear are - * asynchronous operations. We do one try with cio_cancel, two tries - * with cio_halt, 255 tries with cio_clear. If everythings fails panic. - * Returns 0 if device now idle, -ENODEV for device not operational and - * -EBUSY if an interrupt is expected (either from halt/clear or from a - * status pending). - */ int ccw_device_cancel_halt_clear(struct ccw_device *cdev) { @@ -139,44 +131,14 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) int ret; sch = to_subchannel(cdev->dev.parent); - if (cio_update_schib(sch)) - return -ENODEV; - if (!sch->schib.pmcw.ena) - /* Not operational -> done. */ - return 0; - /* Stage 1: cancel io. */ - if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_HALT_PEND) && - !(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { - if (!scsw_is_tm(&sch->schib.scsw)) { - ret = cio_cancel(sch); - if (ret != -EINVAL) - return ret; - } - /* cancel io unsuccessful or not applicable (transport mode). - * Continue with asynchronous instructions. */ - cdev->private->iretry = 3; /* 3 halt retries. */ - } - if (!(scsw_actl(&sch->schib.scsw) & SCSW_ACTL_CLEAR_PEND)) { - /* Stage 2: halt io. */ - if (cdev->private->iretry) { - cdev->private->iretry--; - ret = cio_halt(sch); - if (ret != -EBUSY) - return (ret == 0) ? -EBUSY : ret; - } - /* halt io unsuccessful. */ - cdev->private->iretry = 255; /* 255 clear retries. */ - } - /* Stage 3: clear io. */ - if (cdev->private->iretry) { - cdev->private->iretry--; - ret = cio_clear (sch); - return (ret == 0) ? -EBUSY : ret; - } - /* Function was unsuccessful */ - CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", - cdev->private->dev_id.ssid, cdev->private->dev_id.devno); - return -EIO; + ret = cio_cancel_halt_clear(sch, &cdev->private->iretry); + + if (ret == -EIO) + CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n", + cdev->private->dev_id.ssid, + cdev->private->dev_id.devno); + + return ret; } void ccw_device_update_sense_data(struct ccw_device *cdev) diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c new file mode 100644 index 000000000000..ba6ac83a6c25 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -0,0 +1,842 @@ +/* + * channel program interfaces + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + */ + +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/iommu.h> +#include <linux/vfio.h> +#include <asm/idals.h> + +#include "vfio_ccw_cp.h" + +/* + * Max length for ccw chain. + * XXX: Limit to 256, need to check more? + */ +#define CCWCHAIN_LEN_MAX 256 + +struct pfn_array { + unsigned long pa_iova; + unsigned long *pa_iova_pfn; + unsigned long *pa_pfn; + int pa_nr; +}; + +struct pfn_array_table { + struct pfn_array *pat_pa; + int pat_nr; +}; + +struct ccwchain { + struct list_head next; + struct ccw1 *ch_ccw; + /* Guest physical address of the current chain. */ + u64 ch_iova; + /* Count of the valid ccws in chain. */ + int ch_len; + /* Pinned PAGEs for the original data. */ + struct pfn_array_table *ch_pat; +}; + +/* + * pfn_array_pin() - pin user pages in memory + * @pa: pfn_array on which to perform the operation + * @mdev: the mediated device to perform pin/unpin operations + * + * Attempt to pin user pages in memory. + * + * Usage of pfn_array: + * @pa->pa_iova starting guest physical I/O address. Assigned by caller. + * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated + * by caller. + * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by + * caller. + * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by + * caller. + * number of pages pinned. Assigned by callee. + * + * Returns: + * Number of pages pinned on success. + * If @pa->pa_nr is 0 or negative, returns 0. + * If no pages were pinned, returns -errno. + */ +static int pfn_array_pin(struct pfn_array *pa, struct device *mdev) +{ + int i, ret; + + if (pa->pa_nr <= 0) { + pa->pa_nr = 0; + return 0; + } + + pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT; + for (i = 1; i < pa->pa_nr; i++) + pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1; + + ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr, + IOMMU_READ | IOMMU_WRITE, pa->pa_pfn); + + if (ret > 0 && ret != pa->pa_nr) { + vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret); + pa->pa_nr = 0; + return 0; + } + + return ret; +} + +/* Unpin the pages before releasing the memory. */ +static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev) +{ + vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr); + pa->pa_nr = 0; + kfree(pa->pa_iova_pfn); +} + +/* Alloc memory for PFNs, then pin pages with them. */ +static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, + u64 iova, unsigned int len) +{ + int ret = 0; + + if (!len || pa->pa_nr) + return -EINVAL; + + pa->pa_iova = iova; + + pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; + if (!pa->pa_nr) + return -EINVAL; + + pa->pa_iova_pfn = kcalloc(pa->pa_nr, + sizeof(*pa->pa_iova_pfn) + + sizeof(*pa->pa_pfn), + GFP_KERNEL); + if (unlikely(!pa->pa_iova_pfn)) + return -ENOMEM; + pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr; + + ret = pfn_array_pin(pa, mdev); + + if (ret > 0) + return ret; + else if (!ret) + ret = -EINVAL; + + kfree(pa->pa_iova_pfn); + + return ret; +} + +static int pfn_array_table_init(struct pfn_array_table *pat, int nr) +{ + pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL); + if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) { + pat->pat_nr = 0; + return -ENOMEM; + } + + pat->pat_nr = nr; + + return 0; +} + +static void pfn_array_table_unpin_free(struct pfn_array_table *pat, + struct device *mdev) +{ + int i; + + for (i = 0; i < pat->pat_nr; i++) + pfn_array_unpin_free(pat->pat_pa + i, mdev); + + if (pat->pat_nr) { + kfree(pat->pat_pa); + pat->pat_pa = NULL; + pat->pat_nr = 0; + } +} + +static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat, + unsigned long iova) +{ + struct pfn_array *pa = pat->pat_pa; + unsigned long iova_pfn = iova >> PAGE_SHIFT; + int i, j; + + for (i = 0; i < pat->pat_nr; i++, pa++) + for (j = 0; j < pa->pa_nr; j++) + if (pa->pa_iova_pfn[i] == iova_pfn) + return true; + + return false; +} +/* Create the list idal words for a pfn_array_table. */ +static inline void pfn_array_table_idal_create_words( + struct pfn_array_table *pat, + unsigned long *idaws) +{ + struct pfn_array *pa; + int i, j, k; + + /* + * Idal words (execept the first one) rely on the memory being 4k + * aligned. If a user virtual address is 4K aligned, then it's + * corresponding kernel physical address will also be 4K aligned. Thus + * there will be no problem here to simply use the phys to create an + * idaw. + */ + k = 0; + for (i = 0; i < pat->pat_nr; i++) { + pa = pat->pat_pa + i; + for (j = 0; j < pa->pa_nr; j++) { + idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT; + if (k == 0) + idaws[k] += pa->pa_iova & (PAGE_SIZE - 1); + k++; + } + } +} + + +/* + * Within the domain (@mdev), copy @n bytes from a guest physical + * address (@iova) to a host physical address (@to). + */ +static long copy_from_iova(struct device *mdev, + void *to, u64 iova, + unsigned long n) +{ + struct pfn_array pa = {0}; + u64 from; + int i, ret; + unsigned long l, m; + + ret = pfn_array_alloc_pin(&pa, mdev, iova, n); + if (ret <= 0) + return ret; + + l = n; + for (i = 0; i < pa.pa_nr; i++) { + from = pa.pa_pfn[i] << PAGE_SHIFT; + m = PAGE_SIZE; + if (i == 0) { + from += iova & (PAGE_SIZE - 1); + m -= iova & (PAGE_SIZE - 1); + } + + m = min(l, m); + memcpy(to + (n - l), (void *)from, m); + + l -= m; + if (l == 0) + break; + } + + pfn_array_unpin_free(&pa, mdev); + + return l; +} + +static long copy_ccw_from_iova(struct channel_program *cp, + struct ccw1 *to, u64 iova, + unsigned long len) +{ + struct ccw0 ccw0; + struct ccw1 *pccw1; + int ret; + int i; + + ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1)); + if (ret) + return ret; + + if (!cp->orb.cmd.fmt) { + pccw1 = to; + for (i = 0; i < len; i++) { + ccw0 = *(struct ccw0 *)pccw1; + if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) { + pccw1->cmd_code = CCW_CMD_TIC; + pccw1->flags = 0; + pccw1->count = 0; + } else { + pccw1->cmd_code = ccw0.cmd_code; + pccw1->flags = ccw0.flags; + pccw1->count = ccw0.count; + } + pccw1->cda = ccw0.cda; + pccw1++; + } + } + + return ret; +} + +/* + * Helpers to operate ccwchain. + */ +#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0) + +#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP) + +#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC) + +#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA) + + +#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC)) + +static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len) +{ + struct ccwchain *chain; + void *data; + size_t size; + + /* Make ccw address aligned to 8. */ + size = ((sizeof(*chain) + 7L) & -8L) + + sizeof(*chain->ch_ccw) * len + + sizeof(*chain->ch_pat) * len; + chain = kzalloc(size, GFP_DMA | GFP_KERNEL); + if (!chain) + return NULL; + + data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L); + chain->ch_ccw = (struct ccw1 *)data; + + data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len; + chain->ch_pat = (struct pfn_array_table *)data; + + chain->ch_len = len; + + list_add_tail(&chain->next, &cp->ccwchain_list); + + return chain; +} + +static void ccwchain_free(struct ccwchain *chain) +{ + list_del(&chain->next); + kfree(chain); +} + +/* Free resource for a ccw that allocated memory for its cda. */ +static void ccwchain_cda_free(struct ccwchain *chain, int idx) +{ + struct ccw1 *ccw = chain->ch_ccw + idx; + + if (!ccw->count) + return; + + kfree((void *)(u64)ccw->cda); +} + +/* Unpin the pages then free the memory resources. */ +static void cp_unpin_free(struct channel_program *cp) +{ + struct ccwchain *chain, *temp; + int i; + + list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) { + for (i = 0; i < chain->ch_len; i++) { + pfn_array_table_unpin_free(chain->ch_pat + i, + cp->mdev); + ccwchain_cda_free(chain, i); + } + ccwchain_free(chain); + } +} + +/** + * ccwchain_calc_length - calculate the length of the ccw chain. + * @iova: guest physical address of the target ccw chain + * @cp: channel_program on which to perform the operation + * + * This is the chain length not considering any TICs. + * You need to do a new round for each TIC target. + * + * Returns: the length of the ccw chain or -errno. + */ +static int ccwchain_calc_length(u64 iova, struct channel_program *cp) +{ + struct ccw1 *ccw, *p; + int cnt; + + /* + * Copy current chain from guest to host kernel. + * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256). + * So copying 2K is enough (safe). + */ + p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL); + if (!ccw) + return -ENOMEM; + + cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX); + if (cnt) { + kfree(ccw); + return cnt; + } + + cnt = 0; + do { + cnt++; + + if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw))) + break; + + ccw++; + } while (cnt < CCWCHAIN_LEN_MAX + 1); + + if (cnt == CCWCHAIN_LEN_MAX + 1) + cnt = -EINVAL; + + kfree(p); + return cnt; +} + +static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp) +{ + struct ccwchain *chain; + u32 ccw_head, ccw_tail; + + list_for_each_entry(chain, &cp->ccwchain_list, next) { + ccw_head = chain->ch_iova; + ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1); + + if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail)) + return 1; + } + + return 0; +} + +static int ccwchain_loop_tic(struct ccwchain *chain, + struct channel_program *cp); + +static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp) +{ + struct ccwchain *chain; + int len, ret; + + /* May transfer to an existing chain. */ + if (tic_target_chain_exists(tic, cp)) + return 0; + + /* Get chain length. */ + len = ccwchain_calc_length(tic->cda, cp); + if (len < 0) + return len; + + /* Need alloc a new chain for this one. */ + chain = ccwchain_alloc(cp, len); + if (!chain) + return -ENOMEM; + chain->ch_iova = tic->cda; + + /* Copy the new chain from user. */ + ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len); + if (ret) { + ccwchain_free(chain); + return ret; + } + + /* Loop for tics on this new chain. */ + return ccwchain_loop_tic(chain, cp); +} + +/* Loop for TICs. */ +static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp) +{ + struct ccw1 *tic; + int i, ret; + + for (i = 0; i < chain->ch_len; i++) { + tic = chain->ch_ccw + i; + + if (!ccw_is_tic(tic)) + continue; + + ret = ccwchain_handle_tic(tic, cp); + if (ret) + return ret; + } + + return 0; +} + +static int ccwchain_fetch_tic(struct ccwchain *chain, + int idx, + struct channel_program *cp) +{ + struct ccw1 *ccw = chain->ch_ccw + idx; + struct ccwchain *iter; + u32 ccw_head, ccw_tail; + + list_for_each_entry(iter, &cp->ccwchain_list, next) { + ccw_head = iter->ch_iova; + ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1); + + if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) { + ccw->cda = (__u32) (addr_t) (iter->ch_ccw + + (ccw->cda - ccw_head)); + return 0; + } + } + + return -EFAULT; +} + +static int ccwchain_fetch_direct(struct ccwchain *chain, + int idx, + struct channel_program *cp) +{ + struct ccw1 *ccw; + struct pfn_array_table *pat; + unsigned long *idaws; + int idaw_nr; + + ccw = chain->ch_ccw + idx; + + /* + * Pin data page(s) in memory. + * The number of pages actually is the count of the idaws which will be + * needed when translating a direct ccw to a idal ccw. + */ + pat = chain->ch_pat + idx; + if (pfn_array_table_init(pat, 1)) + return -ENOMEM; + idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, + ccw->cda, ccw->count); + if (idaw_nr < 0) + return idaw_nr; + + /* Translate this direct ccw to a idal ccw. */ + idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL); + if (!idaws) { + pfn_array_table_unpin_free(pat, cp->mdev); + return -ENOMEM; + } + ccw->cda = (__u32) virt_to_phys(idaws); + ccw->flags |= CCW_FLAG_IDA; + + pfn_array_table_idal_create_words(pat, idaws); + + return 0; +} + +static int ccwchain_fetch_idal(struct ccwchain *chain, + int idx, + struct channel_program *cp) +{ + struct ccw1 *ccw; + struct pfn_array_table *pat; + unsigned long *idaws; + u64 idaw_iova; + unsigned int idaw_nr, idaw_len; + int i, ret; + + ccw = chain->ch_ccw + idx; + + /* Calculate size of idaws. */ + ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova)); + if (ret) + return ret; + idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count); + idaw_len = idaw_nr * sizeof(*idaws); + + /* Pin data page(s) in memory. */ + pat = chain->ch_pat + idx; + ret = pfn_array_table_init(pat, idaw_nr); + if (ret) + return ret; + + /* Translate idal ccw to use new allocated idaws. */ + idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL); + if (!idaws) { + ret = -ENOMEM; + goto out_unpin; + } + + ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len); + if (ret) + goto out_free_idaws; + + ccw->cda = virt_to_phys(idaws); + + for (i = 0; i < idaw_nr; i++) { + idaw_iova = *(idaws + i); + if (IS_ERR_VALUE(idaw_iova)) { + ret = -EFAULT; + goto out_free_idaws; + } + + ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev, + idaw_iova, 1); + if (ret < 0) + goto out_free_idaws; + } + + pfn_array_table_idal_create_words(pat, idaws); + + return 0; + +out_free_idaws: + kfree(idaws); +out_unpin: + pfn_array_table_unpin_free(pat, cp->mdev); + return ret; +} + +/* + * Fetch one ccw. + * To reduce memory copy, we'll pin the cda page in memory, + * and to get rid of the cda 2G limitiaion of ccw1, we'll translate + * direct ccws to idal ccws. + */ +static int ccwchain_fetch_one(struct ccwchain *chain, + int idx, + struct channel_program *cp) +{ + struct ccw1 *ccw = chain->ch_ccw + idx; + + if (ccw_is_test(ccw) || ccw_is_noop(ccw)) + return 0; + + if (ccw_is_tic(ccw)) + return ccwchain_fetch_tic(chain, idx, cp); + + if (ccw_is_idal(ccw)) + return ccwchain_fetch_idal(chain, idx, cp); + + return ccwchain_fetch_direct(chain, idx, cp); +} + +/** + * cp_init() - allocate ccwchains for a channel program. + * @cp: channel_program on which to perform the operation + * @mdev: the mediated device to perform pin/unpin operations + * @orb: control block for the channel program from the guest + * + * This creates one or more ccwchain(s), and copies the raw data of + * the target channel program from @orb->cmd.iova to the new ccwchain(s). + * + * Limitations: + * 1. Supports only prefetch enabled mode. + * 2. Supports idal(c64) ccw chaining. + * 3. Supports 4k idaw. + * + * Returns: + * %0 on success and a negative error value on failure. + */ +int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb) +{ + u64 iova = orb->cmd.cpa; + struct ccwchain *chain; + int len, ret; + + /* + * XXX: + * Only support prefetch enable mode now. + * Only support 64bit addressing idal. + * Only support 4k IDAW. + */ + if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k) + return -EOPNOTSUPP; + + INIT_LIST_HEAD(&cp->ccwchain_list); + memcpy(&cp->orb, orb, sizeof(*orb)); + cp->mdev = mdev; + + /* Get chain length. */ + len = ccwchain_calc_length(iova, cp); + if (len < 0) + return len; + + /* Alloc mem for the head chain. */ + chain = ccwchain_alloc(cp, len); + if (!chain) + return -ENOMEM; + chain->ch_iova = iova; + + /* Copy the head chain from guest. */ + ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len); + if (ret) { + ccwchain_free(chain); + return ret; + } + + /* Now loop for its TICs. */ + ret = ccwchain_loop_tic(chain, cp); + if (ret) + cp_unpin_free(cp); + + return ret; +} + + +/** + * cp_free() - free resources for channel program. + * @cp: channel_program on which to perform the operation + * + * This unpins the memory pages and frees the memory space occupied by + * @cp, which must have been returned by a previous call to cp_init(). + * Otherwise, undefined behavior occurs. + */ +void cp_free(struct channel_program *cp) +{ + cp_unpin_free(cp); +} + +/** + * cp_prefetch() - translate a guest physical address channel program to + * a real-device runnable channel program. + * @cp: channel_program on which to perform the operation + * + * This function translates the guest-physical-address channel program + * and stores the result to ccwchain list. @cp must have been + * initialized by a previous call with cp_init(). Otherwise, undefined + * behavior occurs. + * + * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced + * as helpers to do ccw chain translation inside the kernel. Basically + * they accept a channel program issued by a virtual machine, and + * translate the channel program to a real-device runnable channel + * program. + * + * These APIs will copy the ccws into kernel-space buffers, and update + * the guest phsical addresses with their corresponding host physical + * addresses. Then channel I/O device drivers could issue the + * translated channel program to real devices to perform an I/O + * operation. + * + * These interfaces are designed to support translation only for + * channel programs, which are generated and formatted by a + * guest. Thus this will make it possible for things like VFIO to + * leverage the interfaces to passthrough a channel I/O mediated + * device in QEMU. + * + * We support direct ccw chaining by translating them to idal ccws. + * + * Returns: + * %0 on success and a negative error value on failure. + */ +int cp_prefetch(struct channel_program *cp) +{ + struct ccwchain *chain; + int len, idx, ret; + + list_for_each_entry(chain, &cp->ccwchain_list, next) { + len = chain->ch_len; + for (idx = 0; idx < len; idx++) { + ret = ccwchain_fetch_one(chain, idx, cp); + if (ret) + return ret; + } + } + + return 0; +} + +/** + * cp_get_orb() - get the orb of the channel program + * @cp: channel_program on which to perform the operation + * @intparm: new intparm for the returned orb + * @lpm: candidate value of the logical-path mask for the returned orb + * + * This function returns the address of the updated orb of the channel + * program. Channel I/O device drivers could use this orb to issue a + * ssch. + */ +union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm) +{ + union orb *orb; + struct ccwchain *chain; + struct ccw1 *cpa; + + orb = &cp->orb; + + orb->cmd.intparm = intparm; + orb->cmd.fmt = 1; + orb->cmd.key = PAGE_DEFAULT_KEY >> 4; + + if (orb->cmd.lpm == 0) + orb->cmd.lpm = lpm; + + chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next); + cpa = chain->ch_ccw; + orb->cmd.cpa = (__u32) __pa(cpa); + + return orb; +} + +/** + * cp_update_scsw() - update scsw for a channel program. + * @cp: channel_program on which to perform the operation + * @scsw: I/O results of the channel program and also the target to be + * updated + * + * @scsw contains the I/O results of the channel program that pointed + * to by @cp. However what @scsw->cpa stores is a host physical + * address, which is meaningless for the guest, which is waiting for + * the I/O results. + * + * This function updates @scsw->cpa to its coressponding guest physical + * address. + */ +void cp_update_scsw(struct channel_program *cp, union scsw *scsw) +{ + struct ccwchain *chain; + u32 cpa = scsw->cmd.cpa; + u32 ccw_head, ccw_tail; + + /* + * LATER: + * For now, only update the cmd.cpa part. We may need to deal with + * other portions of the schib as well, even if we don't return them + * in the ioctl directly. Path status changes etc. + */ + list_for_each_entry(chain, &cp->ccwchain_list, next) { + ccw_head = (u32)(u64)chain->ch_ccw; + ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1); + + if ((ccw_head <= cpa) && (cpa <= ccw_tail)) { + /* + * (cpa - ccw_head) is the offset value of the host + * physical ccw to its chain head. + * Adding this value to the guest physical ccw chain + * head gets us the guest cpa. + */ + cpa = chain->ch_iova + (cpa - ccw_head); + break; + } + } + + scsw->cmd.cpa = cpa; +} + +/** + * cp_iova_pinned() - check if an iova is pinned for a ccw chain. + * @cmd: ccwchain command on which to perform the operation + * @iova: the iova to check + * + * If the @iova is currently pinned for the ccw chain, return true; + * else return false. + */ +bool cp_iova_pinned(struct channel_program *cp, u64 iova) +{ + struct ccwchain *chain; + int i; + + list_for_each_entry(chain, &cp->ccwchain_list, next) { + for (i = 0; i < chain->ch_len; i++) + if (pfn_array_table_iova_pinned(chain->ch_pat + i, + iova)) + return true; + } + + return false; +} diff --git a/drivers/s390/cio/vfio_ccw_cp.h b/drivers/s390/cio/vfio_ccw_cp.h new file mode 100644 index 000000000000..7a1996b3b36d --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_cp.h @@ -0,0 +1,42 @@ +/* + * channel program interfaces + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + */ + +#ifndef _VFIO_CCW_CP_H_ +#define _VFIO_CCW_CP_H_ + +#include <asm/cio.h> +#include <asm/scsw.h> + +#include "orb.h" + +/** + * struct channel_program - manage information for channel program + * @ccwchain_list: list head of ccwchains + * @orb: orb for the currently processed ssch request + * @mdev: the mediated device to perform page pinning/unpinning + * + * @ccwchain_list is the head of a ccwchain list, that contents the + * translated result of the guest channel program that pointed out by + * the iova parameter when calling cp_init. + */ +struct channel_program { + struct list_head ccwchain_list; + union orb orb; + struct device *mdev; +}; + +extern int cp_init(struct channel_program *cp, struct device *mdev, + union orb *orb); +extern void cp_free(struct channel_program *cp); +extern int cp_prefetch(struct channel_program *cp); +extern union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm); +extern void cp_update_scsw(struct channel_program *cp, union scsw *scsw); +extern bool cp_iova_pinned(struct channel_program *cp, u64 iova); + +#endif diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c new file mode 100644 index 000000000000..e90dd43d2a55 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_drv.c @@ -0,0 +1,308 @@ +/* + * VFIO based Physical Subchannel device driver + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/mdev.h> + +#include <asm/isc.h> + +#include "ioasm.h" +#include "css.h" +#include "vfio_ccw_private.h" + +struct workqueue_struct *vfio_ccw_work_q; + +/* + * Helpers + */ +int vfio_ccw_sch_quiesce(struct subchannel *sch) +{ + struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + DECLARE_COMPLETION_ONSTACK(completion); + int iretry, ret = 0; + + spin_lock_irq(sch->lock); + if (!sch->schib.pmcw.ena) + goto out_unlock; + ret = cio_disable_subchannel(sch); + if (ret != -EBUSY) + goto out_unlock; + + do { + iretry = 255; + + ret = cio_cancel_halt_clear(sch, &iretry); + while (ret == -EBUSY) { + /* + * Flush all I/O and wait for + * cancel/halt/clear completion. + */ + private->completion = &completion; + spin_unlock_irq(sch->lock); + + wait_for_completion_timeout(&completion, 3*HZ); + + spin_lock_irq(sch->lock); + private->completion = NULL; + flush_workqueue(vfio_ccw_work_q); + ret = cio_cancel_halt_clear(sch, &iretry); + }; + + ret = cio_disable_subchannel(sch); + } while (ret == -EBUSY); +out_unlock: + private->state = VFIO_CCW_STATE_NOT_OPER; + spin_unlock_irq(sch->lock); + return ret; +} + +static void vfio_ccw_sch_io_todo(struct work_struct *work) +{ + struct vfio_ccw_private *private; + struct subchannel *sch; + struct irb *irb; + + private = container_of(work, struct vfio_ccw_private, io_work); + irb = &private->irb; + sch = private->sch; + + if (scsw_is_solicited(&irb->scsw)) { + cp_update_scsw(&private->cp, &irb->scsw); + cp_free(&private->cp); + } + memcpy(private->io_region.irb_area, irb, sizeof(*irb)); + + if (private->io_trigger) + eventfd_signal(private->io_trigger, 1); + + if (private->mdev) + private->state = VFIO_CCW_STATE_IDLE; +} + +/* + * Sysfs interfaces + */ +static ssize_t chpids_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + struct chsc_ssd_info *ssd = &sch->ssd_info; + ssize_t ret = 0; + int chp; + int mask; + + for (chp = 0; chp < 8; chp++) { + mask = 0x80 >> chp; + if (ssd->path_mask & mask) + ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); + else + ret += sprintf(buf + ret, "00 "); + } + ret += sprintf(buf+ret, "\n"); + return ret; +} + +static ssize_t pimpampom_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct subchannel *sch = to_subchannel(dev); + struct pmcw *pmcw = &sch->schib.pmcw; + + return sprintf(buf, "%02x %02x %02x\n", + pmcw->pim, pmcw->pam, pmcw->pom); +} + +static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); +static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); + +static struct attribute *vfio_subchannel_attrs[] = { + &dev_attr_chpids.attr, + &dev_attr_pimpampom.attr, + NULL, +}; + +static struct attribute_group vfio_subchannel_attr_group = { + .attrs = vfio_subchannel_attrs, +}; + +/* + * Css driver callbacks + */ +static void vfio_ccw_sch_irq(struct subchannel *sch) +{ + struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + + inc_irq_stat(IRQIO_CIO); + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); +} + +static int vfio_ccw_sch_probe(struct subchannel *sch) +{ + struct pmcw *pmcw = &sch->schib.pmcw; + struct vfio_ccw_private *private; + int ret; + + if (pmcw->qf) { + dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", + dev_name(&sch->dev)); + return -ENODEV; + } + + private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); + if (!private) + return -ENOMEM; + private->sch = sch; + dev_set_drvdata(&sch->dev, private); + + spin_lock_irq(sch->lock); + private->state = VFIO_CCW_STATE_NOT_OPER; + sch->isc = VFIO_CCW_ISC; + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + spin_unlock_irq(sch->lock); + if (ret) + goto out_free; + + ret = sysfs_create_group(&sch->dev.kobj, &vfio_subchannel_attr_group); + if (ret) + goto out_disable; + + ret = vfio_ccw_mdev_reg(sch); + if (ret) + goto out_rm_group; + + INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); + atomic_set(&private->avail, 1); + private->state = VFIO_CCW_STATE_STANDBY; + + return 0; + +out_rm_group: + sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); +out_disable: + cio_disable_subchannel(sch); +out_free: + dev_set_drvdata(&sch->dev, NULL); + kfree(private); + return ret; +} + +static int vfio_ccw_sch_remove(struct subchannel *sch) +{ + struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + + vfio_ccw_sch_quiesce(sch); + + vfio_ccw_mdev_unreg(sch); + + sysfs_remove_group(&sch->dev.kobj, &vfio_subchannel_attr_group); + + dev_set_drvdata(&sch->dev, NULL); + + kfree(private); + + return 0; +} + +static void vfio_ccw_sch_shutdown(struct subchannel *sch) +{ + vfio_ccw_sch_quiesce(sch); +} + +/** + * vfio_ccw_sch_event - process subchannel event + * @sch: subchannel + * @process: non-zero if function is called in process context + * + * An unspecified event occurred for this subchannel. Adjust data according + * to the current operational state of the subchannel. Return zero when the + * event has been handled sufficiently or -EAGAIN when this function should + * be called again in process context. + */ +static int vfio_ccw_sch_event(struct subchannel *sch, int process) +{ + struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); + unsigned long flags; + + spin_lock_irqsave(sch->lock, flags); + if (!device_is_registered(&sch->dev)) + goto out_unlock; + + if (work_pending(&sch->todo_work)) + goto out_unlock; + + if (cio_update_schib(sch)) { + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); + goto out_unlock; + } + + private = dev_get_drvdata(&sch->dev); + if (private->state == VFIO_CCW_STATE_NOT_OPER) { + private->state = private->mdev ? VFIO_CCW_STATE_IDLE : + VFIO_CCW_STATE_STANDBY; + } + +out_unlock: + spin_unlock_irqrestore(sch->lock, flags); + + return 0; +} + +static struct css_device_id vfio_ccw_sch_ids[] = { + { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); + +static struct css_driver vfio_ccw_sch_driver = { + .drv = { + .name = "vfio_ccw", + .owner = THIS_MODULE, + }, + .subchannel_type = vfio_ccw_sch_ids, + .irq = vfio_ccw_sch_irq, + .probe = vfio_ccw_sch_probe, + .remove = vfio_ccw_sch_remove, + .shutdown = vfio_ccw_sch_shutdown, + .sch_event = vfio_ccw_sch_event, +}; + +static int __init vfio_ccw_sch_init(void) +{ + int ret; + + vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); + if (!vfio_ccw_work_q) + return -ENOMEM; + + isc_register(VFIO_CCW_ISC); + ret = css_driver_register(&vfio_ccw_sch_driver); + if (ret) { + isc_unregister(VFIO_CCW_ISC); + destroy_workqueue(vfio_ccw_work_q); + } + + return ret; +} + +static void __exit vfio_ccw_sch_exit(void) +{ + css_driver_unregister(&vfio_ccw_sch_driver); + isc_unregister(VFIO_CCW_ISC); + destroy_workqueue(vfio_ccw_work_q); +} +module_init(vfio_ccw_sch_init); +module_exit(vfio_ccw_sch_exit); + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c new file mode 100644 index 000000000000..80a0559cd7ce --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_fsm.c @@ -0,0 +1,203 @@ +/* + * Finite state machine for vfio-ccw device handling + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + */ + +#include <linux/vfio.h> +#include <linux/mdev.h> + +#include "ioasm.h" +#include "vfio_ccw_private.h" + +static int fsm_io_helper(struct vfio_ccw_private *private) +{ + struct subchannel *sch; + union orb *orb; + int ccode; + __u8 lpm; + unsigned long flags; + + sch = private->sch; + + spin_lock_irqsave(sch->lock, flags); + private->state = VFIO_CCW_STATE_BUSY; + spin_unlock_irqrestore(sch->lock, flags); + + orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm); + + /* Issue "Start Subchannel" */ + ccode = ssch(sch->schid, orb); + + switch (ccode) { + case 0: + /* + * Initialize device status information + */ + sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND; + return 0; + case 1: /* Status pending */ + case 2: /* Busy */ + return -EBUSY; + case 3: /* Device/path not operational */ + { + lpm = orb->cmd.lpm; + if (lpm != 0) + sch->lpm &= ~lpm; + else + sch->lpm = 0; + + if (cio_update_schib(sch)) + return -ENODEV; + + return sch->lpm ? -EACCES : -ENODEV; + } + default: + return ccode; + } +} + +static void fsm_notoper(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + struct subchannel *sch = private->sch; + + /* + * TODO: + * Probably we should send the machine check to the guest. + */ + css_sched_sch_todo(sch, SCH_TODO_UNREG); + private->state = VFIO_CCW_STATE_NOT_OPER; +} + +/* + * No operation action. + */ +static void fsm_nop(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ +} + +static void fsm_io_error(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + pr_err("vfio-ccw: FSM: I/O request from state:%d\n", private->state); + private->io_region.ret_code = -EIO; +} + +static void fsm_io_busy(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + private->io_region.ret_code = -EBUSY; +} + +static void fsm_disabled_irq(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + struct subchannel *sch = private->sch; + + /* + * An interrupt in a disabled state means a previous disable was not + * successful - should not happen, but we try to disable again. + */ + cio_disable_subchannel(sch); +} + +/* + * Deal with the ccw command request from the userspace. + */ +static void fsm_io_request(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + union orb *orb; + union scsw *scsw = &private->scsw; + struct ccw_io_region *io_region = &private->io_region; + struct mdev_device *mdev = private->mdev; + + private->state = VFIO_CCW_STATE_BOXED; + + memcpy(scsw, io_region->scsw_area, sizeof(*scsw)); + + if (scsw->cmd.fctl & SCSW_FCTL_START_FUNC) { + orb = (union orb *)io_region->orb_area; + + io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev), + orb); + if (io_region->ret_code) + goto err_out; + + io_region->ret_code = cp_prefetch(&private->cp); + if (io_region->ret_code) { + cp_free(&private->cp); + goto err_out; + } + + /* Start channel program and wait for I/O interrupt. */ + io_region->ret_code = fsm_io_helper(private); + if (io_region->ret_code) { + cp_free(&private->cp); + goto err_out; + } + return; + } else if (scsw->cmd.fctl & SCSW_FCTL_HALT_FUNC) { + /* XXX: Handle halt. */ + io_region->ret_code = -EOPNOTSUPP; + goto err_out; + } else if (scsw->cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { + /* XXX: Handle clear. */ + io_region->ret_code = -EOPNOTSUPP; + goto err_out; + } + +err_out: + private->state = VFIO_CCW_STATE_IDLE; +} + +/* + * Got an interrupt for a normal io (state busy). + */ +static void fsm_irq(struct vfio_ccw_private *private, + enum vfio_ccw_event event) +{ + struct irb *irb = this_cpu_ptr(&cio_irb); + + memcpy(&private->irb, irb, sizeof(*irb)); + + queue_work(vfio_ccw_work_q, &private->io_work); + + if (private->completion) + complete(private->completion); +} + +/* + * Device statemachine + */ +fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS] = { + [VFIO_CCW_STATE_NOT_OPER] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_nop, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_disabled_irq, + }, + [VFIO_CCW_STATE_STANDBY] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_error, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, + }, + [VFIO_CCW_STATE_IDLE] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_request, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, + }, + [VFIO_CCW_STATE_BOXED] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, + }, + [VFIO_CCW_STATE_BUSY] = { + [VFIO_CCW_EVENT_NOT_OPER] = fsm_notoper, + [VFIO_CCW_EVENT_IO_REQ] = fsm_io_busy, + [VFIO_CCW_EVENT_INTERRUPT] = fsm_irq, + }, +}; diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c new file mode 100644 index 000000000000..e72abbc18ee3 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_ops.c @@ -0,0 +1,425 @@ +/* + * Physical device callbacks for vfio_ccw + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + */ + +#include <linux/vfio.h> +#include <linux/mdev.h> + +#include "vfio_ccw_private.h" + +static int vfio_ccw_mdev_reset(struct mdev_device *mdev) +{ + struct vfio_ccw_private *private; + struct subchannel *sch; + int ret; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); + sch = private->sch; + /* + * TODO: + * In the cureent stage, some things like "no I/O running" and "no + * interrupt pending" are clear, but we are not sure what other state + * we need to care about. + * There are still a lot more instructions need to be handled. We + * should come back here later. + */ + ret = vfio_ccw_sch_quiesce(sch); + if (ret) + return ret; + + ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); + if (!ret) + private->state = VFIO_CCW_STATE_IDLE; + + return ret; +} + +static int vfio_ccw_mdev_notifier(struct notifier_block *nb, + unsigned long action, + void *data) +{ + struct vfio_ccw_private *private = + container_of(nb, struct vfio_ccw_private, nb); + + /* + * Vendor drivers MUST unpin pages in response to an + * invalidation. + */ + if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) { + struct vfio_iommu_type1_dma_unmap *unmap = data; + + if (!cp_iova_pinned(&private->cp, unmap->iova)) + return NOTIFY_OK; + + if (vfio_ccw_mdev_reset(private->mdev)) + return NOTIFY_BAD; + + cp_free(&private->cp); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static ssize_t name_show(struct kobject *kobj, struct device *dev, char *buf) +{ + return sprintf(buf, "I/O subchannel (Non-QDIO)\n"); +} +MDEV_TYPE_ATTR_RO(name); + +static ssize_t device_api_show(struct kobject *kobj, struct device *dev, + char *buf) +{ + return sprintf(buf, "%s\n", VFIO_DEVICE_API_CCW_STRING); +} +MDEV_TYPE_ATTR_RO(device_api); + +static ssize_t available_instances_show(struct kobject *kobj, + struct device *dev, char *buf) +{ + struct vfio_ccw_private *private = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", atomic_read(&private->avail)); +} +MDEV_TYPE_ATTR_RO(available_instances); + +static struct attribute *mdev_types_attrs[] = { + &mdev_type_attr_name.attr, + &mdev_type_attr_device_api.attr, + &mdev_type_attr_available_instances.attr, + NULL, +}; + +static struct attribute_group mdev_type_group = { + .name = "io", + .attrs = mdev_types_attrs, +}; + +struct attribute_group *mdev_type_groups[] = { + &mdev_type_group, + NULL, +}; + +static int vfio_ccw_mdev_create(struct kobject *kobj, struct mdev_device *mdev) +{ + struct vfio_ccw_private *private = + dev_get_drvdata(mdev_parent_dev(mdev)); + + if (private->state == VFIO_CCW_STATE_NOT_OPER) + return -ENODEV; + + if (atomic_dec_if_positive(&private->avail) < 0) + return -EPERM; + + private->mdev = mdev; + private->state = VFIO_CCW_STATE_IDLE; + + return 0; +} + +static int vfio_ccw_mdev_remove(struct mdev_device *mdev) +{ + struct vfio_ccw_private *private = + dev_get_drvdata(mdev_parent_dev(mdev)); + + if ((private->state != VFIO_CCW_STATE_NOT_OPER) && + (private->state != VFIO_CCW_STATE_STANDBY)) { + if (!vfio_ccw_mdev_reset(mdev)) + private->state = VFIO_CCW_STATE_STANDBY; + /* The state will be NOT_OPER on error. */ + } + + private->mdev = NULL; + atomic_inc(&private->avail); + + return 0; +} + +static int vfio_ccw_mdev_open(struct mdev_device *mdev) +{ + struct vfio_ccw_private *private = + dev_get_drvdata(mdev_parent_dev(mdev)); + unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP; + + private->nb.notifier_call = vfio_ccw_mdev_notifier; + + return vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &events, &private->nb); +} + +void vfio_ccw_mdev_release(struct mdev_device *mdev) +{ + struct vfio_ccw_private *private = + dev_get_drvdata(mdev_parent_dev(mdev)); + + vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, + &private->nb); +} + +static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev, + char __user *buf, + size_t count, + loff_t *ppos) +{ + struct vfio_ccw_private *private; + struct ccw_io_region *region; + + if (*ppos + count > sizeof(*region)) + return -EINVAL; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); + region = &private->io_region; + if (copy_to_user(buf, (void *)region + *ppos, count)) + return -EFAULT; + + return count; +} + +static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + struct vfio_ccw_private *private; + struct ccw_io_region *region; + + if (*ppos + count > sizeof(*region)) + return -EINVAL; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); + if (private->state != VFIO_CCW_STATE_IDLE) + return -EACCES; + + region = &private->io_region; + if (copy_from_user((void *)region + *ppos, buf, count)) + return -EFAULT; + + vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_IO_REQ); + if (region->ret_code != 0) { + private->state = VFIO_CCW_STATE_IDLE; + return region->ret_code; + } + + return count; +} + +static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info) +{ + info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET; + info->num_regions = VFIO_CCW_NUM_REGIONS; + info->num_irqs = VFIO_CCW_NUM_IRQS; + + return 0; +} + +static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info, + u16 *cap_type_id, + void **cap_type) +{ + switch (info->index) { + case VFIO_CCW_CONFIG_REGION_INDEX: + info->offset = 0; + info->size = sizeof(struct ccw_io_region); + info->flags = VFIO_REGION_INFO_FLAG_READ + | VFIO_REGION_INFO_FLAG_WRITE; + return 0; + default: + return -EINVAL; + } +} + +int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info) +{ + if (info->index != VFIO_CCW_IO_IRQ_INDEX) + return -EINVAL; + + info->count = 1; + info->flags = VFIO_IRQ_INFO_EVENTFD; + + return 0; +} + +static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev, + uint32_t flags, + void __user *data) +{ + struct vfio_ccw_private *private; + struct eventfd_ctx **ctx; + + if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER)) + return -EINVAL; + + private = dev_get_drvdata(mdev_parent_dev(mdev)); + ctx = &private->io_trigger; + + switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { + case VFIO_IRQ_SET_DATA_NONE: + { + if (*ctx) + eventfd_signal(*ctx, 1); + return 0; + } + case VFIO_IRQ_SET_DATA_BOOL: + { + uint8_t trigger; + + if (get_user(trigger, (uint8_t __user *)data)) + return -EFAULT; + + if (trigger && *ctx) + eventfd_signal(*ctx, 1); + return 0; + } + case VFIO_IRQ_SET_DATA_EVENTFD: + { + int32_t fd; + + if (get_user(fd, (int32_t __user *)data)) + return -EFAULT; + + if (fd == -1) { + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = NULL; + } else if (fd >= 0) { + struct eventfd_ctx *efdctx; + + efdctx = eventfd_ctx_fdget(fd); + if (IS_ERR(efdctx)) + return PTR_ERR(efdctx); + + if (*ctx) + eventfd_ctx_put(*ctx); + + *ctx = efdctx; + } else + return -EINVAL; + + return 0; + } + default: + return -EINVAL; + } +} + +static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev, + unsigned int cmd, + unsigned long arg) +{ + int ret = 0; + unsigned long minsz; + + switch (cmd) { + case VFIO_DEVICE_GET_INFO: + { + struct vfio_device_info info; + + minsz = offsetofend(struct vfio_device_info, num_irqs); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = vfio_ccw_mdev_get_device_info(&info); + if (ret) + return ret; + + return copy_to_user((void __user *)arg, &info, minsz); + } + case VFIO_DEVICE_GET_REGION_INFO: + { + struct vfio_region_info info; + u16 cap_type_id = 0; + void *cap_type = NULL; + + minsz = offsetofend(struct vfio_region_info, offset); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz) + return -EINVAL; + + ret = vfio_ccw_mdev_get_region_info(&info, &cap_type_id, + &cap_type); + if (ret) + return ret; + + return copy_to_user((void __user *)arg, &info, minsz); + } + case VFIO_DEVICE_GET_IRQ_INFO: + { + struct vfio_irq_info info; + + minsz = offsetofend(struct vfio_irq_info, count); + + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; + + if (info.argsz < minsz || info.index >= VFIO_CCW_NUM_IRQS) + return -EINVAL; + + ret = vfio_ccw_mdev_get_irq_info(&info); + if (ret) + return ret; + + if (info.count == -1) + return -EINVAL; + + return copy_to_user((void __user *)arg, &info, minsz); + } + case VFIO_DEVICE_SET_IRQS: + { + struct vfio_irq_set hdr; + size_t data_size; + void __user *data; + + minsz = offsetofend(struct vfio_irq_set, count); + + if (copy_from_user(&hdr, (void __user *)arg, minsz)) + return -EFAULT; + + ret = vfio_set_irqs_validate_and_prepare(&hdr, 1, + VFIO_CCW_NUM_IRQS, + &data_size); + if (ret) + return ret; + + data = (void __user *)(arg + minsz); + return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, data); + } + case VFIO_DEVICE_RESET: + return vfio_ccw_mdev_reset(mdev); + default: + return -ENOTTY; + } +} + +static const struct mdev_parent_ops vfio_ccw_mdev_ops = { + .owner = THIS_MODULE, + .supported_type_groups = mdev_type_groups, + .create = vfio_ccw_mdev_create, + .remove = vfio_ccw_mdev_remove, + .open = vfio_ccw_mdev_open, + .release = vfio_ccw_mdev_release, + .read = vfio_ccw_mdev_read, + .write = vfio_ccw_mdev_write, + .ioctl = vfio_ccw_mdev_ioctl, +}; + +int vfio_ccw_mdev_reg(struct subchannel *sch) +{ + return mdev_register_device(&sch->dev, &vfio_ccw_mdev_ops); +} + +void vfio_ccw_mdev_unreg(struct subchannel *sch) +{ + mdev_unregister_device(&sch->dev); +} diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h new file mode 100644 index 000000000000..fc0f01c16ef9 --- /dev/null +++ b/drivers/s390/cio/vfio_ccw_private.h @@ -0,0 +1,96 @@ +/* + * Private stuff for vfio_ccw driver + * + * Copyright IBM Corp. 2017 + * + * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> + * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> + */ + +#ifndef _VFIO_CCW_PRIVATE_H_ +#define _VFIO_CCW_PRIVATE_H_ + +#include <linux/completion.h> +#include <linux/eventfd.h> +#include <linux/workqueue.h> +#include <linux/vfio_ccw.h> + +#include "css.h" +#include "vfio_ccw_cp.h" + +/** + * struct vfio_ccw_private + * @sch: pointer to the subchannel + * @state: internal state of the device + * @completion: synchronization helper of the I/O completion + * @avail: available for creating a mediated device + * @mdev: pointer to the mediated device + * @nb: notifier for vfio events + * @io_region: MMIO region to input/output I/O arguments/results + * @cp: channel program for the current I/O operation + * @irb: irb info received from interrupt + * @scsw: scsw info + * @io_trigger: eventfd ctx for signaling userspace I/O results + * @io_work: work for deferral process of I/O handling + */ +struct vfio_ccw_private { + struct subchannel *sch; + int state; + struct completion *completion; + atomic_t avail; + struct mdev_device *mdev; + struct notifier_block nb; + struct ccw_io_region io_region; + + struct channel_program cp; + struct irb irb; + union scsw scsw; + + struct eventfd_ctx *io_trigger; + struct work_struct io_work; +} __aligned(8); + +extern int vfio_ccw_mdev_reg(struct subchannel *sch); +extern void vfio_ccw_mdev_unreg(struct subchannel *sch); + +extern int vfio_ccw_sch_quiesce(struct subchannel *sch); + +/* + * States of the device statemachine. + */ +enum vfio_ccw_state { + VFIO_CCW_STATE_NOT_OPER, + VFIO_CCW_STATE_STANDBY, + VFIO_CCW_STATE_IDLE, + VFIO_CCW_STATE_BOXED, + VFIO_CCW_STATE_BUSY, + /* last element! */ + NR_VFIO_CCW_STATES +}; + +/* + * Asynchronous events of the device statemachine. + */ +enum vfio_ccw_event { + VFIO_CCW_EVENT_NOT_OPER, + VFIO_CCW_EVENT_IO_REQ, + VFIO_CCW_EVENT_INTERRUPT, + /* last element! */ + NR_VFIO_CCW_EVENTS +}; + +/* + * Action called through jumptable. + */ +typedef void (fsm_func_t)(struct vfio_ccw_private *, enum vfio_ccw_event); +extern fsm_func_t *vfio_ccw_jumptable[NR_VFIO_CCW_STATES][NR_VFIO_CCW_EVENTS]; + +static inline void vfio_ccw_fsm_event(struct vfio_ccw_private *private, + int event) +{ + vfio_ccw_jumptable[private->state][event](private, event); +} + +extern struct workqueue_struct *vfio_ccw_work_q; + +#endif diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 40f1136f5568..ea86da8c75f9 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -80,7 +80,7 @@ struct secaeskeytoken { * token. If keybitsize is given, the bitsize of the key is * also checked. Returns 0 on success or errno value on failure. */ -static int check_secaeskeytoken(u8 *token, int keybitsize) +static int check_secaeskeytoken(const u8 *token, int keybitsize) { struct secaeskeytoken *t = (struct secaeskeytoken *) token; @@ -572,6 +572,12 @@ int pkey_sec2protkey(u16 cardnr, u16 domain, rc = -EIO; goto out; } + if (prepcblk->ccp_rscode != 0) { + DEBUG_WARN( + "pkey_sec2protkey unwrap secure key warning, card response %d/%d\n", + (int) prepcblk->ccp_rtcode, + (int) prepcblk->ccp_rscode); + } /* process response cprb param block */ prepcblk->rpl_parmb = ((u8 *) prepcblk) + sizeof(struct CPRBX); @@ -761,9 +767,10 @@ out: } /* - * Fetch just the mkvp value via query_crypto_facility from adapter. + * Fetch the current and old mkvp values via + * query_crypto_facility from adapter. */ -static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) +static int fetch_mkvp(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc, found = 0; size_t rlen, vlen; @@ -779,9 +786,10 @@ static int fetch_mkvp(u16 cardnr, u16 domain, u64 *mkvp) rc = query_crypto_facility(cardnr, domain, "STATICSA", rarray, &rlen, varray, &vlen); if (rc == 0 && rlen > 8*8 && vlen > 184+8) { - if (rarray[64] == '2') { + if (rarray[8*8] == '2') { /* current master key state is valid */ - *mkvp = *((u64 *)(varray + 184)); + mkvp[0] = *((u64 *)(varray + 184)); + mkvp[1] = *((u64 *)(varray + 172)); found = 1; } } @@ -796,14 +804,14 @@ struct mkvp_info { struct list_head list; u16 cardnr; u16 domain; - u64 mkvp; + u64 mkvp[2]; }; /* a list with mkvp_info entries */ static LIST_HEAD(mkvp_list); static DEFINE_SPINLOCK(mkvp_list_lock); -static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) +static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 mkvp[2]) { int rc = -ENOENT; struct mkvp_info *ptr; @@ -812,7 +820,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - *mkvp = ptr->mkvp; + memcpy(mkvp, ptr->mkvp, 2 * sizeof(u64)); rc = 0; break; } @@ -822,7 +830,7 @@ static int mkvp_cache_fetch(u16 cardnr, u16 domain, u64 *mkvp) return rc; } -static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) +static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp[2]) { int found = 0; struct mkvp_info *ptr; @@ -831,7 +839,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) list_for_each_entry(ptr, &mkvp_list, list) { if (ptr->cardnr == cardnr && ptr->domain == domain) { - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); found = 1; break; } @@ -844,7 +852,7 @@ static void mkvp_cache_update(u16 cardnr, u16 domain, u64 mkvp) } ptr->cardnr = cardnr; ptr->domain = domain; - ptr->mkvp = mkvp; + memcpy(ptr->mkvp, mkvp, 2 * sizeof(u64)); list_add(&ptr->list, &mkvp_list); } spin_unlock_bh(&mkvp_list_lock); @@ -888,8 +896,8 @@ int pkey_findcard(const struct pkey_seckey *seckey, struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; struct zcrypt_device_matrix *device_matrix; u16 card, dom; - u64 mkvp; - int i, rc; + u64 mkvp[2]; + int i, rc, oi = -1; /* mkvp must not be zero */ if (t->mkvp == 0) @@ -910,14 +918,14 @@ int pkey_findcard(const struct pkey_seckey *seckey, device_matrix->device[i].functions & 0x04) { /* an enabled CCA Coprocessor card */ /* try cached mkvp */ - if (mkvp_cache_fetch(card, dom, &mkvp) == 0 && - t->mkvp == mkvp) { + if (mkvp_cache_fetch(card, dom, mkvp) == 0 && + t->mkvp == mkvp[0]) { if (!verify) break; /* verify: fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; } } @@ -936,14 +944,21 @@ int pkey_findcard(const struct pkey_seckey *seckey, card = AP_QID_CARD(device_matrix->device[i].qid); dom = AP_QID_QUEUE(device_matrix->device[i].qid); /* fresh fetch mkvp from adapter */ - if (fetch_mkvp(card, dom, &mkvp) == 0) { + if (fetch_mkvp(card, dom, mkvp) == 0) { mkvp_cache_update(card, dom, mkvp); - if (t->mkvp == mkvp) + if (t->mkvp == mkvp[0]) break; + if (t->mkvp == mkvp[1] && oi < 0) + oi = i; } } + if (i >= MAX_ZDEV_ENTRIES && oi >= 0) { + /* old mkvp matched, use this card then */ + card = AP_QID_CARD(device_matrix->device[oi].qid); + dom = AP_QID_QUEUE(device_matrix->device[oi].qid); + } } - if (i < MAX_ZDEV_ENTRIES) { + if (i < MAX_ZDEV_ENTRIES || oi >= 0) { if (pcardnr) *pcardnr = card; if (pdomain) @@ -989,6 +1004,53 @@ int pkey_skey2pkey(const struct pkey_seckey *seckey, EXPORT_SYMBOL(pkey_skey2pkey); /* + * Verify key and give back some info about the key. + */ +int pkey_verifykey(const struct pkey_seckey *seckey, + u16 *pcardnr, u16 *pdomain, + u16 *pkeysize, u32 *pattributes) +{ + struct secaeskeytoken *t = (struct secaeskeytoken *) seckey; + u16 cardnr, domain; + u64 mkvp[2]; + int rc; + + /* check the secure key for valid AES secure key */ + rc = check_secaeskeytoken((u8 *) seckey, 0); + if (rc) + goto out; + if (pattributes) + *pattributes = PKEY_VERIFY_ATTR_AES; + if (pkeysize) + *pkeysize = t->bitsize; + + /* try to find a card which can handle this key */ + rc = pkey_findcard(seckey, &cardnr, &domain, 1); + if (rc) + goto out; + + /* check mkvp for old mkvp match */ + rc = mkvp_cache_fetch(cardnr, domain, mkvp); + if (rc) + goto out; + if (t->mkvp == mkvp[1]) { + DEBUG_DBG("pkey_verifykey secure key has old mkvp\n"); + if (pattributes) + *pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP; + } + + if (pcardnr) + *pcardnr = cardnr; + if (pdomain) + *pdomain = domain; + +out: + DEBUG_DBG("pkey_verifykey rc=%d\n", rc); + return rc; +} +EXPORT_SYMBOL(pkey_verifykey); + +/* * File io functions */ @@ -1089,6 +1151,21 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd, return -EFAULT; break; } + case PKEY_VERIFYKEY: { + struct pkey_verifykey __user *uvk = (void __user *) arg; + struct pkey_verifykey kvk; + + if (copy_from_user(&kvk, uvk, sizeof(kvk))) + return -EFAULT; + rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain, + &kvk.keysize, &kvk.attributes); + DEBUG_DBG("pkey_ioctl pkey_verifykey()=%d\n", rc); + if (rc) + break; + if (copy_to_user(uvk, &kvk, sizeof(kvk))) + return -EFAULT; + break; + } default: /* unknown/unsupported ioctl cmd */ return -ENOTTY; diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index fd5944bbe224..730d9619400e 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1283,7 +1283,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) p_header = (struct pdu *) (skb_tail_pointer(ch->trans_skb) - skb->len); p_header->pdu_flag = 0x00; - if (skb->protocol == ntohs(ETH_P_SNAP)) + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) p_header->pdu_flag |= 0x60; else p_header->pdu_flag |= 0x20; diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index ac65f12bcd43..198842ce6876 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -106,7 +106,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) priv->stats.rx_frame_errors++; return; } - pskb->protocol = ntohs(header->type); + pskb->protocol = cpu_to_be16(header->type); if ((header->length <= LL_HEADER_LENGTH) || (len <= LL_HEADER_LENGTH)) { if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) { @@ -125,7 +125,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb) header->length -= LL_HEADER_LENGTH; len -= LL_HEADER_LENGTH; if ((header->length > skb_tailroom(pskb)) || - (header->length > len)) { + (header->length > len)) { if (!(ch->logflags & LOG_FLAG_OVERRUN)) { CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, "%s(%s): Packet size %d (overrun)" @@ -485,7 +485,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) } else { atomic_inc(&skb->users); header.length = l; - header.type = skb->protocol; + header.type = be16_to_cpu(skb->protocol); header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); @@ -503,7 +503,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb) atomic_inc(&skb->users); ch->prof.txlen += skb->len; header.length = skb->len + LL_HEADER_LENGTH; - header.type = skb->protocol; + header.type = be16_to_cpu(skb->protocol); header.unused = 0; memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH); block_len = skb->len + 2; @@ -690,7 +690,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) p_header->pdu_offset = skb->len; p_header->pdu_proto = 0x01; p_header->pdu_flag = 0x00; - if (skb->protocol == ntohs(ETH_P_SNAP)) { + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; } else { p_header->pdu_flag |= PDU_FIRST; @@ -745,7 +745,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb) p_header->pdu_proto = 0x01; p_header->pdu_flag = 0x00; p_header->pdu_seq = 0; - if (skb->protocol == ntohs(ETH_P_SNAP)) { + if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) { p_header->pdu_flag |= PDU_FIRST | PDU_CNTL; } else { p_header->pdu_flag |= PDU_FIRST; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 3f85b97ab8d2..dba94b486f05 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -635,7 +635,7 @@ static void netiucv_unpack_skb(struct iucv_connection *conn, skb_put(pskb, NETIUCV_HDRLEN); pskb->dev = dev; pskb->ip_summed = CHECKSUM_NONE; - pskb->protocol = ntohs(ETH_P_IP); + pskb->protocol = cpu_to_be16(ETH_P_IP); while (1) { struct sk_buff *skb; diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index e7addea8741b..f6aa21176d89 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -240,7 +240,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa, #define QETH_TX_TIMEOUT 100 * HZ #define QETH_RCD_TIMEOUT 60 * HZ #define QETH_RECLAIM_WORK_TIME HZ -#define QETH_HEADER_SIZE 32 #define QETH_MAX_PORTNO 15 /*IPv6 address autoconfiguration stuff*/ @@ -447,7 +446,7 @@ struct qeth_qdio_out_buffer { atomic_t state; int next_element_to_fill; struct sk_buff_head skb_list; - int is_header[16]; + int is_header[QDIO_MAX_ELEMENTS_PER_BUFFER]; struct qaob *aob; struct qeth_qdio_out_q *q; @@ -503,22 +502,12 @@ struct qeth_qdio_info { int default_out_queue; }; -enum qeth_send_errors { - QETH_SEND_ERROR_NONE, - QETH_SEND_ERROR_LINK_FAILURE, - QETH_SEND_ERROR_RETRY, - QETH_SEND_ERROR_KICK_IT, -}; - #define QETH_ETH_MAC_V4 0x0100 /* like v4 */ #define QETH_ETH_MAC_V6 0x3333 /* like v6 */ /* tr mc mac is longer, but that will be enough to detect mc frames */ #define QETH_TR_MAC_NC 0xc000 /* non-canonical */ #define QETH_TR_MAC_C 0x0300 /* canonical */ -#define DEFAULT_ADD_HHLEN 0 -#define MAX_ADD_HHLEN 1024 - /** * buffer stuff for read channel */ @@ -644,7 +633,6 @@ struct qeth_reply { atomic_t refcnt; }; - struct qeth_card_blkt { int time_total; int inter_packet; @@ -685,7 +673,6 @@ struct qeth_card_options { struct qeth_ipa_info ipa6; struct qeth_sbp_info sbp; /* SETBRIDGEPORT options */ int fake_broadcast; - int add_hhlen; int layer2; int performance_stats; int rx_sg_cb; @@ -717,17 +704,16 @@ struct qeth_discipline { void (*start_poll)(struct ccw_device *, int, unsigned long); qdio_handler_t *input_handler; qdio_handler_t *output_handler; + int (*process_rx_buffer)(struct qeth_card *card, int budget, int *done); int (*recover)(void *ptr); int (*setup) (struct ccwgroup_device *); void (*remove) (struct ccwgroup_device *); int (*set_online) (struct ccwgroup_device *); int (*set_offline) (struct ccwgroup_device *); - void (*shutdown)(struct ccwgroup_device *); - int (*prepare) (struct ccwgroup_device *); - void (*complete) (struct ccwgroup_device *); int (*freeze)(struct ccwgroup_device *); int (*thaw) (struct ccwgroup_device *); int (*restore)(struct ccwgroup_device *); + int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd); int (*control_event_handler)(struct qeth_card *card, struct qeth_ipa_cmd *cmd); }; @@ -856,9 +842,9 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) { __be16 *p = &((struct ethhdr *)skb->data)->h_proto; - if (*p == ETH_P_8021Q) + if (be16_to_cpu(*p) == ETH_P_8021Q) p += 2; - switch (*p) { + switch (be16_to_cpu(*p)) { case ETH_P_IPV6: return 6; case ETH_P_IP: @@ -920,14 +906,12 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, enum qeth_ipa_cmds, enum qeth_prot_versions); int qeth_query_setadapterparms(struct qeth_card *); -int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *, - unsigned int, const char *); -void qeth_queue_input_buffer(struct qeth_card *, int); struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, struct qeth_qdio_buffer *, struct qdio_buffer_element **, int *, struct qeth_hdr **); void qeth_schedule_recovery(struct qeth_card *); void qeth_qdio_start_poll(struct ccw_device *, int, unsigned long); +int qeth_poll(struct napi_struct *napi, int budget); void qeth_qdio_input_handler(struct ccw_device *, unsigned int, unsigned int, int, int, unsigned long); @@ -948,9 +932,6 @@ void qeth_prepare_control_data(struct qeth_card *, int, void qeth_release_buffer(struct qeth_channel *, struct qeth_cmd_buffer *); void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char); struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); -int qeth_mdio_read(struct net_device *, int, int); -int qeth_snmp_command(struct qeth_card *, char __user *); -int qeth_query_oat_command(struct qeth_card *, char __user *); int qeth_query_switch_attributes(struct qeth_card *card, struct qeth_switch_info *sw_info); int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, @@ -961,19 +942,22 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role); int qeth_bridgeport_an_set(struct qeth_card *card, int enable); int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); -int qeth_get_elements_no(struct qeth_card *, struct sk_buff *, int); +int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, + int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int, int, int); + struct sk_buff *, struct qeth_hdr *, int, int); int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, struct sk_buff *, struct qeth_hdr *, int); +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, struct ethtool_stats *, u64 *); void qeth_core_get_strings(struct net_device *, u32, u8 *); void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...); -int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback); int qeth_hdr_chk_and_bounce(struct sk_buff *, struct qeth_hdr **, int); int qeth_configure_cq(struct qeth_card *, enum qeth_cq); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 315d8a2db7c0..38114a8d56e0 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -55,7 +55,6 @@ static struct mutex qeth_mod_mutex; static void qeth_send_control_data_cb(struct qeth_channel *, struct qeth_cmd_buffer *); -static int qeth_issue_next_read(struct qeth_card *); static struct qeth_cmd_buffer *qeth_get_buffer(struct qeth_channel *); static void qeth_setup_ccw(struct qeth_channel *, unsigned char *, __u32); static void qeth_free_buffer_pool(struct qeth_card *); @@ -1202,7 +1201,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q, while (skb) { QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); - if (skb->protocol == ETH_P_AF_IUCV) { + if (be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { if (skb->sk) { struct iucv_sock *iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, notification); @@ -1233,7 +1232,8 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) while (skb) { QETH_CARD_TEXT(buf->q->card, 5, "skbr"); QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb); - if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) { + if (notify_general_error && + be16_to_cpu(skb->protocol) == ETH_P_AF_IUCV) { if (skb->sk) { iucv = iucv_sk(skb->sk); iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR); @@ -1396,7 +1396,6 @@ static void qeth_set_intial_options(struct qeth_card *card) card->options.route4.type = NO_ROUTER; card->options.route6.type = NO_ROUTER; card->options.fake_broadcast = 0; - card->options.add_hhlen = DEFAULT_ADD_HHLEN; card->options.performance_stats = 0; card->options.rx_sg_cb = QETH_RX_SG_CB; card->options.isolation = ISOLATION_MODE_NONE; @@ -3217,8 +3216,10 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action) } EXPORT_SYMBOL_GPL(qeth_hw_trap); -int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, - unsigned int qdio_error, const char *dbftext) +static int qeth_check_qdio_errors(struct qeth_card *card, + struct qdio_buffer *buf, + unsigned int qdio_error, + const char *dbftext) { if (qdio_error) { QETH_CARD_TEXT(card, 2, dbftext); @@ -3235,18 +3236,8 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf, } return 0; } -EXPORT_SYMBOL_GPL(qeth_check_qdio_errors); -static void qeth_buffer_reclaim_work(struct work_struct *work) -{ - struct qeth_card *card = container_of(work, struct qeth_card, - buffer_reclaim_work.work); - - QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); - qeth_queue_input_buffer(card, card->reclaim_index); -} - -void qeth_queue_input_buffer(struct qeth_card *card, int index) +static void qeth_queue_input_buffer(struct qeth_card *card, int index) { struct qeth_qdio_q *queue = card->qdio.in_q; struct list_head *lh; @@ -3320,9 +3311,17 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) QDIO_MAX_BUFFERS_PER_Q; } } -EXPORT_SYMBOL_GPL(qeth_queue_input_buffer); -static int qeth_handle_send_error(struct qeth_card *card, +static void qeth_buffer_reclaim_work(struct work_struct *work) +{ + struct qeth_card *card = container_of(work, struct qeth_card, + buffer_reclaim_work.work); + + QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index); + qeth_queue_input_buffer(card, card->reclaim_index); +} + +static void qeth_handle_send_error(struct qeth_card *card, struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) { int sbalf15 = buffer->buffer->element[15].sflags; @@ -3338,15 +3337,14 @@ static int qeth_handle_send_error(struct qeth_card *card, qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr"); if (!qdio_err) - return QETH_SEND_ERROR_NONE; + return; if ((sbalf15 >= 15) && (sbalf15 <= 31)) - return QETH_SEND_ERROR_RETRY; + return; QETH_CARD_TEXT(card, 1, "lnkfail"); QETH_CARD_TEXT_(card, 1, "%04x %02x", (u16)qdio_err, (u8)sbalf15); - return QETH_SEND_ERROR_LINK_FAILURE; } /* @@ -3799,9 +3797,9 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, return qeth_cut_iqd_prio(card, ~skb->priority >> 1 & 3); case QETH_PRIO_Q_ING_VLAN: tci = &((struct ethhdr *)skb->data)->h_proto; - if (*tci == ETH_P_8021Q) - return qeth_cut_iqd_prio(card, ~*(tci + 1) >> - (VLAN_PRIO_SHIFT + 1) & 3); + if (be16_to_cpu(*tci) == ETH_P_8021Q) + return qeth_cut_iqd_prio(card, + ~be16_to_cpu(*(tci + 1)) >> (VLAN_PRIO_SHIFT + 1) & 3); break; default: break; @@ -3837,6 +3835,7 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * @card: qeth card structure, to check max. elems. * @skb: SKB address * @extra_elems: extra elems needed, to check against max. + * @data_offset: range starts at skb->data + data_offset * * Returns the number of pages, and thus QDIO buffer elements, needed to cover * skb data, including linear part and fragments. Checks if the result plus @@ -3844,10 +3843,10 @@ EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); * Note: extra_elems is not included in the returned result. */ int qeth_get_elements_no(struct qeth_card *card, - struct sk_buff *skb, int extra_elems) + struct sk_buff *skb, int extra_elems, int data_offset) { int elements = qeth_get_elements_for_range( - (addr_t)skb->data, + (addr_t)skb->data + data_offset, (addr_t)skb->data + skb_headlen(skb)) + qeth_get_elements_for_frags(skb); @@ -4025,8 +4024,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, int qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, - struct qeth_hdr *hdr, int elements_needed, - int offset, int hd_len) + struct qeth_hdr *hdr, int offset, int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4418,7 +4416,7 @@ void qeth_tx_timeout(struct net_device *dev) } EXPORT_SYMBOL_GPL(qeth_tx_timeout); -int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) +static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) { struct qeth_card *card = dev->ml_priv; int rc = 0; @@ -4481,7 +4479,6 @@ int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum) } return rc; } -EXPORT_SYMBOL_GPL(qeth_mdio_read); static int qeth_send_ipa_snmp_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob, int len, @@ -4571,7 +4568,7 @@ static int qeth_snmp_command_cb(struct qeth_card *card, return 0; } -int qeth_snmp_command(struct qeth_card *card, char __user *udata) +static int qeth_snmp_command(struct qeth_card *card, char __user *udata) { struct qeth_cmd_buffer *iob; struct qeth_ipa_cmd *cmd; @@ -4631,7 +4628,6 @@ out: kfree(qinfo.udata); return rc; } -EXPORT_SYMBOL_GPL(qeth_snmp_command); static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -4663,7 +4659,7 @@ static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, return 0; } -int qeth_query_oat_command(struct qeth_card *card, char __user *udata) +static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) { int rc = 0; struct qeth_cmd_buffer *iob; @@ -4733,7 +4729,6 @@ out_free: out: return rc; } -EXPORT_SYMBOL_GPL(qeth_query_oat_command); static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) @@ -4774,12 +4769,10 @@ static int qeth_query_card_info(struct qeth_card *card, static inline int qeth_get_qdio_q_format(struct qeth_card *card) { - switch (card->info.type) { - case QETH_CARD_TYPE_IQD: - return 2; - default: - return 0; - } + if (card->info.type == QETH_CARD_TYPE_IQD) + return QDIO_IQDIO_QFMT; + else + return QDIO_QETH_QFMT; } static void qeth_determine_capabilities(struct qeth_card *card) @@ -4818,8 +4811,9 @@ static void qeth_determine_capabilities(struct qeth_card *card) QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); QETH_DBF_TEXT_(SETUP, 2, "qfmt%d", card->ssqd.qfmt); - QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac1); - QETH_DBF_TEXT_(SETUP, 2, "%d", card->ssqd.qdioac3); + QETH_DBF_TEXT_(SETUP, 2, "ac1:%02x", card->ssqd.qdioac1); + QETH_DBF_TEXT_(SETUP, 2, "ac2:%04x", card->ssqd.qdioac2); + QETH_DBF_TEXT_(SETUP, 2, "ac3:%04x", card->ssqd.qdioac3); QETH_DBF_TEXT_(SETUP, 2, "icnt%d", card->ssqd.icnt); if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) || ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) || @@ -5287,6 +5281,83 @@ no_mem: } EXPORT_SYMBOL_GPL(qeth_core_get_next_skb); +int qeth_poll(struct napi_struct *napi, int budget) +{ + struct qeth_card *card = container_of(napi, struct qeth_card, napi); + int work_done = 0; + struct qeth_qdio_buffer *buffer; + int done; + int new_budget = budget; + + if (card->options.performance_stats) { + card->perf_stats.inbound_cnt++; + card->perf_stats.inbound_start_time = qeth_get_micros(); + } + + while (1) { + if (!card->rx.b_count) { + card->rx.qdio_err = 0; + card->rx.b_count = qdio_get_next_buffers( + card->data.ccwdev, 0, &card->rx.b_index, + &card->rx.qdio_err); + if (card->rx.b_count <= 0) { + card->rx.b_count = 0; + break; + } + card->rx.b_element = + &card->qdio.in_q->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + + while (card->rx.b_count) { + buffer = &card->qdio.in_q->bufs[card->rx.b_index]; + if (!(card->rx.qdio_err && + qeth_check_qdio_errors(card, buffer->buffer, + card->rx.qdio_err, "qinerr"))) + work_done += + card->discipline->process_rx_buffer( + card, new_budget, &done); + else + done = 1; + + if (done) { + if (card->options.performance_stats) + card->perf_stats.bufs_rec++; + qeth_put_buffer_pool_entry(card, + buffer->pool_entry); + qeth_queue_input_buffer(card, card->rx.b_index); + card->rx.b_count--; + if (card->rx.b_count) { + card->rx.b_index = + (card->rx.b_index + 1) % + QDIO_MAX_BUFFERS_PER_Q; + card->rx.b_element = + &card->qdio.in_q + ->bufs[card->rx.b_index] + .buffer->element[0]; + card->rx.e_offset = 0; + } + } + + if (work_done >= budget) + goto out; + else + new_budget = budget - work_done; + } + } + + napi_complete(napi); + if (qdio_start_irq(card->data.ccwdev, 0)) + napi_schedule(&card->napi); +out: + if (card->options.performance_stats) + card->perf_stats.inbound_time += qeth_get_micros() - + card->perf_stats.inbound_start_time; + return work_done; +} +EXPORT_SYMBOL_GPL(qeth_poll); + int qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { @@ -5677,23 +5748,12 @@ static int qeth_core_set_offline(struct ccwgroup_device *gdev) static void qeth_core_shutdown(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->shutdown) - card->discipline->shutdown(gdev); -} - -static int qeth_core_prepare(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->prepare) - return card->discipline->prepare(gdev); - return 0; -} - -static void qeth_core_complete(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - if (card->discipline && card->discipline->complete) - card->discipline->complete(gdev); + qeth_set_allowed_threads(card, 0, 1); + if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) + qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); + qeth_qdio_clear_card(card, 0); + qeth_clear_qdio_buffers(card); + qdio_free(CARD_DDEV(card)); } static int qeth_core_freeze(struct ccwgroup_device *gdev) @@ -5730,8 +5790,8 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .set_online = qeth_core_set_online, .set_offline = qeth_core_set_offline, .shutdown = qeth_core_shutdown, - .prepare = qeth_core_prepare, - .complete = qeth_core_complete, + .prepare = NULL, + .complete = NULL, .freeze = qeth_core_freeze, .thaw = qeth_core_thaw, .restore = qeth_core_restore, @@ -5761,6 +5821,60 @@ static const struct attribute_group *qeth_drv_attr_groups[] = { NULL, }; +int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct qeth_card *card = dev->ml_priv; + struct mii_ioctl_data *mii_data; + int rc = 0; + + if (!card) + return -ENODEV; + + if (!qeth_card_hw_is_reachable(card)) + return -ENODEV; + + if (card->info.type == QETH_CARD_TYPE_OSN) + return -EPERM; + + switch (cmd) { + case SIOC_QETH_ADP_SET_SNMP_CONTROL: + rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); + break; + case SIOC_QETH_GET_CARD_TYPE: + if ((card->info.type == QETH_CARD_TYPE_OSD || + card->info.type == QETH_CARD_TYPE_OSM || + card->info.type == QETH_CARD_TYPE_OSX) && + !card->info.guestlan) + return 1; + else + return 0; + case SIOCGMIIPHY: + mii_data = if_mii(rq); + mii_data->phy_id = 0; + break; + case SIOCGMIIREG: + mii_data = if_mii(rq); + if (mii_data->phy_id != 0) + rc = -EINVAL; + else + mii_data->val_out = qeth_mdio_read(dev, + mii_data->phy_id, mii_data->reg_num); + break; + case SIOC_QETH_QUERY_OAT: + rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); + break; + default: + if (card->discipline->do_ioctl) + rc = card->discipline->do_ioctl(dev, rq, cmd); + else + rc = -EOPNOTSUPP; + } + if (rc) + QETH_CARD_TEXT_(card, 2, "ioce%x", rc); + return rc; +} +EXPORT_SYMBOL_GPL(qeth_do_ioctl); + static struct { const char str[ETH_GSTRING_LEN]; } qeth_ethtool_stats_keys[] = { @@ -5895,104 +6009,124 @@ void qeth_core_get_drvinfo(struct net_device *dev, } EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); -/* Helper function to fill 'advertizing' and 'supported' which are the same. */ -/* Autoneg and full-duplex are supported and advertized uncondionally. */ -/* Always advertize and support all speeds up to specified, and only one */ +/* Helper function to fill 'advertising' and 'supported' which are the same. */ +/* Autoneg and full-duplex are supported and advertised unconditionally. */ +/* Always advertise and support all speeds up to specified, and only one */ /* specified port type. */ -static void qeth_set_ecmd_adv_sup(struct ethtool_cmd *ecmd, +static void qeth_set_cmd_adv_sup(struct ethtool_link_ksettings *cmd, int maxspeed, int porttype) { - int port_sup, port_adv, spd_sup, spd_adv; + ethtool_link_ksettings_zero_link_mode(cmd, supported); + ethtool_link_ksettings_zero_link_mode(cmd, advertising); + ethtool_link_ksettings_zero_link_mode(cmd, lp_advertising); + + ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg); switch (porttype) { case PORT_TP: - port_sup = SUPPORTED_TP; - port_adv = ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); break; case PORT_FIBRE: - port_sup = SUPPORTED_FIBRE; - port_adv = ADVERTISED_FIBRE; + ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); + ethtool_link_ksettings_add_link_mode(cmd, advertising, FIBRE); break; default: - port_sup = SUPPORTED_TP; - port_adv = ADVERTISED_TP; + ethtool_link_ksettings_add_link_mode(cmd, supported, TP); + ethtool_link_ksettings_add_link_mode(cmd, advertising, TP); WARN_ON_ONCE(1); } - /* "Fallthrough" case'es ordered from high to low result in setting */ - /* flags cumulatively, starting from the specified speed and down to */ - /* the lowest possible. */ - spd_sup = 0; - spd_adv = 0; + /* fallthrough from high to low, to select all legal speeds: */ switch (maxspeed) { case SPEED_10000: - spd_sup |= SUPPORTED_10000baseT_Full; - spd_adv |= ADVERTISED_10000baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10000baseT_Full); case SPEED_1000: - spd_sup |= SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; - spd_adv |= ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 1000baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 1000baseT_Half); case SPEED_100: - spd_sup |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; - spd_adv |= ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 100baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 100baseT_Half); case SPEED_10: - spd_sup |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - spd_adv |= ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; - break; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); + /* end fallthrough */ + break; default: - spd_sup = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - spd_adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Full); + ethtool_link_ksettings_add_link_mode(cmd, supported, + 10baseT_Half); + ethtool_link_ksettings_add_link_mode(cmd, advertising, + 10baseT_Half); WARN_ON_ONCE(1); } - ecmd->advertising = ADVERTISED_Autoneg | port_adv | spd_adv; - ecmd->supported = SUPPORTED_Autoneg | port_sup | spd_sup; } -int qeth_core_ethtool_get_settings(struct net_device *netdev, - struct ethtool_cmd *ecmd) +int qeth_core_ethtool_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) { struct qeth_card *card = netdev->ml_priv; enum qeth_link_types link_type; struct carrier_info carrier_info; int rc; - u32 speed; if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) link_type = QETH_LINK_TYPE_10GBIT_ETH; else link_type = card->info.link_type; - ecmd->transceiver = XCVR_INTERNAL; - ecmd->duplex = DUPLEX_FULL; - ecmd->autoneg = AUTONEG_ENABLE; + cmd->base.duplex = DUPLEX_FULL; + cmd->base.autoneg = AUTONEG_ENABLE; + cmd->base.phy_address = 0; + cmd->base.mdio_support = 0; + cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID; + cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID; switch (link_type) { case QETH_LINK_TYPE_FAST_ETH: case QETH_LINK_TYPE_LANE_ETH100: - qeth_set_ecmd_adv_sup(ecmd, SPEED_100, PORT_TP); - speed = SPEED_100; - ecmd->port = PORT_TP; + cmd->base.speed = SPEED_100; + cmd->base.port = PORT_TP; break; - case QETH_LINK_TYPE_GBIT_ETH: case QETH_LINK_TYPE_LANE_ETH1000: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); - speed = SPEED_1000; - ecmd->port = PORT_FIBRE; + cmd->base.speed = SPEED_1000; + cmd->base.port = PORT_FIBRE; break; - case QETH_LINK_TYPE_10GBIT_ETH: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); - speed = SPEED_10000; - ecmd->port = PORT_FIBRE; + cmd->base.speed = SPEED_10000; + cmd->base.port = PORT_FIBRE; break; - default: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10, PORT_TP); - speed = SPEED_10; - ecmd->port = PORT_TP; + cmd->base.speed = SPEED_10; + cmd->base.port = PORT_TP; } - ethtool_cmd_speed_set(ecmd, speed); + qeth_set_cmd_adv_sup(cmd, cmd->base.speed, cmd->base.port); /* Check if we can obtain more accurate information. */ /* If QUERY_CARD_INFO command is not supported or fails, */ @@ -6017,49 +6151,48 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, switch (carrier_info.card_type) { case CARD_INFO_TYPE_1G_COPPER_A: case CARD_INFO_TYPE_1G_COPPER_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_TP); - ecmd->port = PORT_TP; + cmd->base.port = PORT_TP; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); break; case CARD_INFO_TYPE_1G_FIBRE_A: case CARD_INFO_TYPE_1G_FIBRE_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_1000, PORT_FIBRE); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_1000, cmd->base.port); break; case CARD_INFO_TYPE_10G_FIBRE_A: case CARD_INFO_TYPE_10G_FIBRE_B: - qeth_set_ecmd_adv_sup(ecmd, SPEED_10000, PORT_FIBRE); - ecmd->port = PORT_FIBRE; + cmd->base.port = PORT_FIBRE; + qeth_set_cmd_adv_sup(cmd, SPEED_10000, cmd->base.port); break; } switch (carrier_info.port_mode) { case CARD_INFO_PORTM_FULLDUPLEX: - ecmd->duplex = DUPLEX_FULL; + cmd->base.duplex = DUPLEX_FULL; break; case CARD_INFO_PORTM_HALFDUPLEX: - ecmd->duplex = DUPLEX_HALF; + cmd->base.duplex = DUPLEX_HALF; break; } switch (carrier_info.port_speed) { case CARD_INFO_PORTS_10M: - speed = SPEED_10; + cmd->base.speed = SPEED_10; break; case CARD_INFO_PORTS_100M: - speed = SPEED_100; + cmd->base.speed = SPEED_100; break; case CARD_INFO_PORTS_1G: - speed = SPEED_1000; + cmd->base.speed = SPEED_1000; break; case CARD_INFO_PORTS_10G: - speed = SPEED_10000; + cmd->base.speed = SPEED_10000; break; } - ethtool_cmd_speed_set(ecmd, speed); return 0; } -EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); +EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_link_ksettings); /* Callback to handle checksum offload command reply from OSA card. * Verify that required features have been enabled on the card. diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index bc69d0a338ad..4accb0a61ce0 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -29,7 +29,6 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_TIMEOUT (10 * HZ) #define QETH_IPA_TIMEOUT (45 * HZ) #define QETH_IDX_COMMAND_SEQNO 0xffff0000 -#define SR_INFO_LEN 16 #define QETH_CLEAR_CHANNEL_PARM -10 #define QETH_HALT_CHANNEL_PARM -11 @@ -65,7 +64,6 @@ enum qeth_link_types { QETH_LINK_TYPE_LANE_TR = 0x82, QETH_LINK_TYPE_LANE_ETH1000 = 0x83, QETH_LINK_TYPE_LANE = 0x88, - QETH_LINK_TYPE_ATM_NATIVE = 0x90, }; /* @@ -185,8 +183,6 @@ enum qeth_ipa_return_codes { IPA_RC_ENOMEM = 0xfffe, IPA_RC_FFFF = 0xffff }; -/* for DELIP */ -#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED /* for SET_DIAGNOSTIC_ASSIST */ #define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL #define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR @@ -631,14 +627,6 @@ enum qeth_ipa_addr_change_code { IPA_ADDR_CHANGE_CODE_MACADDR = 0x02, IPA_ADDR_CHANGE_CODE_REMOVAL = 0x80, /* else addition */ }; -enum qeth_ipa_addr_change_retcode { - IPA_ADDR_CHANGE_RETCODE_OK = 0x0000, - IPA_ADDR_CHANGE_RETCODE_LOSTEVENTS = 0x0010, -}; -enum qeth_ipa_addr_change_lostmask { - IPA_ADDR_CHANGE_MASK_OVERFLOW = 0x01, - IPA_ADDR_CHANGE_MASK_STATECHANGE = 0x02, -}; struct qeth_ipacmd_addr_change_entry { struct net_if_token token; @@ -817,9 +805,4 @@ extern unsigned char IDX_ACTIVATE_WRITE[]; ((buffer) && \ (*(buffer + ((*(buffer + 0x0b)) + 4)) == 0xc1)) -#define ADDR_FRAME_TYPE_DIX 1 -#define ADDR_FRAME_TYPE_802_3 2 -#define ADDR_FRAME_TYPE_TR_WITHOUT_SR 0x10 -#define ADDR_FRAME_TYPE_TR_WITH_SR 0x20 - #endif diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index bea483307618..1b07f382d74c 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/etherdevice.h> -#include <linux/mii.h> #include <linux/ip.h> #include <linux/list.h> #include <linux/hash.h> @@ -28,63 +27,12 @@ static int qeth_l2_set_offline(struct ccwgroup_device *); static int qeth_l2_stop(struct net_device *); static void qeth_l2_set_rx_mode(struct net_device *); -static int qeth_l2_recover(void *); static void qeth_bridgeport_query_support(struct qeth_card *card); static void qeth_bridge_state_change(struct qeth_card *card, struct qeth_ipa_cmd *cmd); static void qeth_bridge_host_event(struct qeth_card *card, struct qeth_ipa_cmd *cmd); -static int qeth_l2_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -{ - struct qeth_card *card = dev->ml_priv; - struct mii_ioctl_data *mii_data; - int rc = 0; - - if (!card) - return -ENODEV; - - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - - if (card->info.type == QETH_CARD_TYPE_OSN) - return -EPERM; - - switch (cmd) { - case SIOC_QETH_ADP_SET_SNMP_CONTROL: - rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); - break; - case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSM || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) - return 1; - return 0; - break; - case SIOCGMIIPHY: - mii_data = if_mii(rq); - mii_data->phy_id = 0; - break; - case SIOCGMIIREG: - mii_data = if_mii(rq); - if (mii_data->phy_id != 0) - rc = -EINVAL; - else - mii_data->val_out = qeth_mdio_read(dev, - mii_data->phy_id, mii_data->reg_num); - break; - case SIOC_QETH_QUERY_OAT: - rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); - break; - default: - rc = -EOPNOTSUPP; - } - if (rc) - QETH_CARD_TEXT_(card, 2, "ioce%d", rc); - return rc; -} - static int qeth_l2_verify_dev(struct net_device *dev) { struct qeth_card *card; @@ -332,7 +280,7 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, else hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; - hdr->hdr.l2.pkt_length = skb->len-QETH_HEADER_SIZE; + hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr); /* VSWITCH relies on the VLAN * information to be present in * the QDIO header */ @@ -552,81 +500,6 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card, return work_done; } -static int qeth_l2_poll(struct napi_struct *napi, int budget) -{ - struct qeth_card *card = container_of(napi, struct qeth_card, napi); - int work_done = 0; - struct qeth_qdio_buffer *buffer; - int done; - int new_budget = budget; - - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - - while (1) { - if (!card->rx.b_count) { - card->rx.qdio_err = 0; - card->rx.b_count = qdio_get_next_buffers( - card->data.ccwdev, 0, &card->rx.b_index, - &card->rx.qdio_err); - if (card->rx.b_count <= 0) { - card->rx.b_count = 0; - break; - } - card->rx.b_element = - &card->qdio.in_q->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - - while (card->rx.b_count) { - buffer = &card->qdio.in_q->bufs[card->rx.b_index]; - if (!(card->rx.qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, - card->rx.qdio_err, "qinerr"))) - work_done += qeth_l2_process_inbound_buffer( - card, new_budget, &done); - else - done = 1; - - if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - qeth_put_buffer_pool_entry(card, - buffer->pool_entry); - qeth_queue_input_buffer(card, card->rx.b_index); - card->rx.b_count--; - if (card->rx.b_count) { - card->rx.b_index = - (card->rx.b_index + 1) % - QDIO_MAX_BUFFERS_PER_Q; - card->rx.b_element = - &card->qdio.in_q - ->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - } - - if (work_done >= budget) - goto out; - else - new_budget = budget - work_done; - } - } - - napi_complete(napi); - if (qdio_start_irq(card->data.ccwdev, 0)) - napi_schedule(&card->napi); -out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; - return work_done; -} - static int qeth_l2_request_initial_mac(struct qeth_card *card) { int rc = 0; @@ -808,7 +681,8 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) { int rc; struct qeth_hdr *hdr = NULL; @@ -849,7 +723,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * chaining we can not send long frag lists */ if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0)) { + !qeth_get_elements_no(card, new_skb, 0, 0)) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -894,7 +768,8 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) } } - elements = qeth_get_elements_no(card, new_skb, elements_needed); + elements = qeth_get_elements_no(card, new_skb, elements_needed, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); @@ -909,7 +784,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - elements, data_offset, hd_len); + data_offset, hd_len); if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; @@ -1042,7 +917,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = { .get_ethtool_stats = qeth_core_get_ethtool_stats, .get_sset_count = qeth_core_get_sset_count, .get_drvinfo = qeth_core_get_drvinfo, - .get_settings = qeth_core_ethtool_get_settings, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, }; static const struct ethtool_ops qeth_l2_osn_ops = { @@ -1059,7 +934,7 @@ static const struct net_device_ops qeth_l2_netdev_ops = { .ndo_start_xmit = qeth_l2_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l2_set_rx_mode, - .ndo_do_ioctl = qeth_l2_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_set_mac_address = qeth_l2_set_mac_address, .ndo_change_mtu = qeth_change_mtu, .ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid, @@ -1116,7 +991,7 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); - netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); return register_netdev(card->dev); } @@ -1326,17 +1201,6 @@ static void __exit qeth_l2_exit(void) pr_info("unregister layer 2 discipline\n"); } -static void qeth_l2_shutdown(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); - if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); - qdio_free(CARD_DDEV(card)); -} - static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); @@ -1408,15 +1272,16 @@ struct qeth_discipline qeth_l2_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .process_rx_buffer = qeth_l2_process_inbound_buffer, .recover = qeth_l2_recover, .setup = qeth_l2_probe_device, .remove = qeth_l2_remove_device, .set_online = qeth_l2_set_online, .set_offline = qeth_l2_set_offline, - .shutdown = qeth_l2_shutdown, .freeze = qeth_l2_pm_suspend, .thaw = qeth_l2_pm_resume, .restore = qeth_l2_pm_resume, + .do_ioctl = NULL, .control_event_handler = qeth_l2_control_event, }; EXPORT_SYMBOL_GPL(qeth_l2_discipline); diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index 692db49e3d2a..687972356d6b 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c @@ -8,9 +8,6 @@ #include "qeth_core.h" #include "qeth_l2.h" -#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ -struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) - static ssize_t qeth_bridge_port_role_state_show(struct device *dev, struct device_attribute *attr, char *buf, int show_state) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 06d0addcc058..6e0354ef4b86 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -16,7 +16,6 @@ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/etherdevice.h> -#include <linux/mii.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/inetdevice.h> @@ -36,16 +35,12 @@ static int qeth_l3_set_offline(struct ccwgroup_device *); -static int qeth_l3_recover(void *); static int qeth_l3_stop(struct net_device *); static void qeth_l3_set_multicast_list(struct net_device *); -static int qeth_l3_neigh_setup(struct net_device *, struct neigh_parms *); static int qeth_l3_register_addr_entry(struct qeth_card *, struct qeth_ipaddr *); static int qeth_l3_deregister_addr_entry(struct qeth_card *, struct qeth_ipaddr *); -static int __qeth_l3_set_online(struct ccwgroup_device *, int); -static int __qeth_l3_set_offline(struct ccwgroup_device *, int); static int qeth_l3_isxdigit(char *buf) { @@ -1341,7 +1336,7 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd) return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL); } -static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac) +static void qeth_l3_get_mac_for_ipm(__be32 ipm, char *mac) { ip_eth_mc_map(ipm, mac); } @@ -1414,7 +1409,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) im4 = rcu_dereference(im4->next_rcu)) { qeth_l3_get_mac_for_ipm(im4->multiaddr, buf); - tmp->u.a4.addr = im4->multiaddr; + tmp->u.a4.addr = be32_to_cpu(im4->multiaddr); memcpy(tmp->mac, buf, sizeof(tmp->mac)); ipm = qeth_l3_ip_from_hash(card, tmp); @@ -1425,7 +1420,7 @@ qeth_l3_add_mc_to_hash(struct qeth_card *card, struct in_device *in4_dev) if (!ipm) continue; memcpy(ipm->mac, buf, sizeof(tmp->mac)); - ipm->u.a4.addr = im4->multiaddr; + ipm->u.a4.addr = be32_to_cpu(im4->multiaddr); ipm->is_multicast = 1; ipm->disp_flag = QETH_DISP_ADDR_ADD; hash_add(card->ip_mc_htable, @@ -1598,8 +1593,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card, spin_lock_bh(&card->ip_lock); for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { - addr->u.a4.addr = ifa->ifa_address; - addr->u.a4.mask = ifa->ifa_mask; + addr->u.a4.addr = be32_to_cpu(ifa->ifa_address); + addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask); addr->type = QETH_IP_TYPE_NORMAL; qeth_l3_delete_ip(card, addr); } @@ -1690,25 +1685,25 @@ static inline int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, struct qeth_hdr *hdr, unsigned short *vlan_id) { - __be16 prot; + __u16 prot; struct iphdr *ip_hdr; unsigned char tg_addr[MAX_ADDR_LEN]; int is_vlan = 0; if (!(hdr->hdr.l3.flags & QETH_HDR_PASSTHRU)) { - prot = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : - ETH_P_IP); + prot = (hdr->hdr.l3.flags & QETH_HDR_IPV6) ? ETH_P_IPV6 : + ETH_P_IP; switch (hdr->hdr.l3.flags & QETH_HDR_CAST_MASK) { case QETH_CAST_MULTICAST: switch (prot) { #ifdef CONFIG_QETH_IPV6 - case __constant_htons(ETH_P_IPV6): + case ETH_P_IPV6: ndisc_mc_map((struct in6_addr *) skb->data + 24, tg_addr, card->dev, 0); break; #endif - case __constant_htons(ETH_P_IP): + case ETH_P_IP: ip_hdr = (struct iphdr *)skb->data; ip_eth_mc_map(ip_hdr->daddr, tg_addr); break; @@ -1795,7 +1790,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, magic = *(__u16 *)skb->data; if ((card->info.type == QETH_CARD_TYPE_IQD) && (magic == ETH_P_AF_IUCV)) { - skb->protocol = ETH_P_AF_IUCV; + skb->protocol = cpu_to_be16(ETH_P_AF_IUCV); skb->pkt_type = PACKET_HOST; skb->mac_header = NET_SKB_PAD; skb->dev = card->dev; @@ -1834,81 +1829,6 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card, return work_done; } -static int qeth_l3_poll(struct napi_struct *napi, int budget) -{ - struct qeth_card *card = container_of(napi, struct qeth_card, napi); - int work_done = 0; - struct qeth_qdio_buffer *buffer; - int done; - int new_budget = budget; - - if (card->options.performance_stats) { - card->perf_stats.inbound_cnt++; - card->perf_stats.inbound_start_time = qeth_get_micros(); - } - - while (1) { - if (!card->rx.b_count) { - card->rx.qdio_err = 0; - card->rx.b_count = qdio_get_next_buffers( - card->data.ccwdev, 0, &card->rx.b_index, - &card->rx.qdio_err); - if (card->rx.b_count <= 0) { - card->rx.b_count = 0; - break; - } - card->rx.b_element = - &card->qdio.in_q->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - - while (card->rx.b_count) { - buffer = &card->qdio.in_q->bufs[card->rx.b_index]; - if (!(card->rx.qdio_err && - qeth_check_qdio_errors(card, buffer->buffer, - card->rx.qdio_err, "qinerr"))) - work_done += qeth_l3_process_inbound_buffer( - card, new_budget, &done); - else - done = 1; - - if (done) { - if (card->options.performance_stats) - card->perf_stats.bufs_rec++; - qeth_put_buffer_pool_entry(card, - buffer->pool_entry); - qeth_queue_input_buffer(card, card->rx.b_index); - card->rx.b_count--; - if (card->rx.b_count) { - card->rx.b_index = - (card->rx.b_index + 1) % - QDIO_MAX_BUFFERS_PER_Q; - card->rx.b_element = - &card->qdio.in_q - ->bufs[card->rx.b_index] - .buffer->element[0]; - card->rx.e_offset = 0; - } - } - - if (work_done >= budget) - goto out; - else - new_budget = budget - work_done; - } - } - - napi_complete(napi); - if (qdio_start_irq(card->data.ccwdev, 0)) - napi_schedule(&card->napi); -out: - if (card->options.performance_stats) - card->perf_stats.inbound_time += qeth_get_micros() - - card->perf_stats.inbound_start_time; - return work_done; -} - static int qeth_l3_verify_vlan_dev(struct net_device *dev, struct qeth_card *card) { @@ -2461,15 +2381,8 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct qeth_card *card = dev->ml_priv; struct qeth_arp_cache_entry arp_entry; - struct mii_ioctl_data *mii_data; int rc = 0; - if (!card) - return -ENODEV; - - if (!qeth_card_hw_is_reachable(card)) - return -ENODEV; - switch (cmd) { case SIOC_QETH_ARP_SET_NO_ENTRIES: if (!capable(CAP_NET_ADMIN)) { @@ -2514,37 +2427,9 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } rc = qeth_l3_arp_flush_cache(card); break; - case SIOC_QETH_ADP_SET_SNMP_CONTROL: - rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data); - break; - case SIOC_QETH_GET_CARD_TYPE: - if ((card->info.type == QETH_CARD_TYPE_OSD || - card->info.type == QETH_CARD_TYPE_OSX) && - !card->info.guestlan) - return 1; - return 0; - break; - case SIOCGMIIPHY: - mii_data = if_mii(rq); - mii_data->phy_id = 0; - break; - case SIOCGMIIREG: - mii_data = if_mii(rq); - if (mii_data->phy_id != 0) - rc = -EINVAL; - else - mii_data->val_out = qeth_mdio_read(dev, - mii_data->phy_id, - mii_data->reg_num); - break; - case SIOC_QETH_QUERY_OAT: - rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data); - break; default: rc = -EOPNOTSUPP; } - if (rc) - QETH_CARD_TEXT_(card, 2, "ioce%d", rc); return rc; } @@ -2572,10 +2457,10 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) rcu_read_unlock(); /* try something else */ - if (skb->protocol == ETH_P_IPV6) + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) return (skb_network_header(skb)[24] == 0xff) ? RTN_MULTICAST : 0; - else if (skb->protocol == ETH_P_IP) + else if (be16_to_cpu(skb->protocol) == ETH_P_IP) return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; /* ... */ @@ -2609,17 +2494,13 @@ static void qeth_l3_fill_af_iucv_hdr(struct qeth_card *card, char daddr[16]; struct af_iucv_trans_hdr *iucv_hdr; - skb_pull(skb, 14); - card->dev->header_ops->create(skb, card->dev, 0, - card->dev->dev_addr, card->dev->dev_addr, - card->dev->addr_len); - skb_pull(skb, 14); - iucv_hdr = (struct af_iucv_trans_hdr *)skb->data; memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; hdr->hdr.l3.ext_flags = 0; - hdr->hdr.l3.length = skb->len; + hdr->hdr.l3.length = skb->len - ETH_HLEN; hdr->hdr.l3.flags = QETH_HDR_IPV6 | QETH_CAST_UNICAST; + + iucv_hdr = (struct af_iucv_trans_hdr *) (skb->data + ETH_HLEN); memset(daddr, 0, sizeof(daddr)); daddr[0] = 0xfe; daddr[1] = 0x80; @@ -2730,7 +2611,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - sizeof(struct qeth_hdr_tso)); tcph->check = 0; - if (skb->protocol == ETH_P_IPV6) { + if (be16_to_cpu(skb->protocol) == ETH_P_IPV6) { ip6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, 0, IPPROTO_TCP, 0); @@ -2774,10 +2655,11 @@ static int qeth_l3_get_elements_no_tso(struct qeth_card *card, return elements; } -static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) +static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) { int rc; - u16 *tag; + __be16 *tag; struct qeth_hdr *hdr = NULL; int hdr_elements = 0; int elements; @@ -2798,7 +2680,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if (((card->info.type == QETH_CARD_TYPE_IQD) && (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || ((card->options.cq == QETH_CQ_ENABLED) && - (skb->protocol != ETH_P_AF_IUCV)))) || + (be16_to_cpu(skb->protocol) != ETH_P_AF_IUCV)))) || card->options.sniffer) goto tx_drop; @@ -2823,10 +2705,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) if ((card->info.type == QETH_CARD_TYPE_IQD) && !skb_is_nonlinear(skb)) { new_skb = skb; - if (new_skb->protocol == ETH_P_AF_IUCV) - data_offset = 0; - else - data_offset = ETH_HLEN; + data_offset = ETH_HLEN; hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2854,9 +2733,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) new_skb->data + 8, 4); skb_copy_to_linear_data_offset(new_skb, 8, new_skb->data + 12, 4); - tag = (u16 *)(new_skb->data + 12); - *tag = __constant_htons(ETH_P_8021Q); - *(tag + 1) = htons(skb_vlan_tag_get(new_skb)); + tag = (__be16 *)(new_skb->data + 12); + *tag = cpu_to_be16(ETH_P_8021Q); + *(tag + 1) = cpu_to_be16(skb_vlan_tag_get(new_skb)); } } @@ -2867,7 +2746,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) */ if ((card->info.type != QETH_CARD_TYPE_IQD) && ((use_tso && !qeth_l3_get_elements_no_tso(card, new_skb, 1)) || - (!use_tso && !qeth_get_elements_no(card, new_skb, 0)))) { + (!use_tso && !qeth_get_elements_no(card, new_skb, 0, 0)))) { int lin_rc = skb_linearize(new_skb); if (card->options.performance_stats) { @@ -2894,7 +2773,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); } else { - if (new_skb->protocol == ETH_P_AF_IUCV) + if (be16_to_cpu(new_skb->protocol) == ETH_P_AF_IUCV) qeth_l3_fill_af_iucv_hdr(card, hdr, new_skb); else { qeth_l3_fill_header(card, hdr, new_skb, ipv, @@ -2909,7 +2788,8 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) elements = use_tso ? qeth_l3_get_elements_no_tso(card, new_skb, hdr_elements) : - qeth_get_elements_no(card, new_skb, hdr_elements); + qeth_get_elements_no(card, new_skb, hdr_elements, + (data_offset > 0) ? data_offset : 0); if (!elements) { if (data_offset >= 0) kmem_cache_free(qeth_core_header_cache, hdr); @@ -2931,7 +2811,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - elements, data_offset, 0); + data_offset, 0); if (!rc) { card->stats.tx_packets++; @@ -3032,7 +2912,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = { .get_ethtool_stats = qeth_core_get_ethtool_stats, .get_sset_count = qeth_core_get_sset_count, .get_drvinfo = qeth_core_get_drvinfo, - .get_settings = qeth_core_ethtool_get_settings, + .get_link_ksettings = qeth_core_ethtool_get_link_ksettings, }; /* @@ -3066,7 +2946,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = { .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, @@ -3082,7 +2962,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = { .ndo_start_xmit = qeth_l3_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = qeth_l3_set_multicast_list, - .ndo_do_ioctl = qeth_l3_do_ioctl, + .ndo_do_ioctl = qeth_do_ioctl, .ndo_change_mtu = qeth_change_mtu, .ndo_fix_features = qeth_fix_features, .ndo_set_features = qeth_set_features, @@ -3151,7 +3031,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card) PAGE_SIZE; SET_NETDEV_DEV(card->dev, &card->gdev->dev); - netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT); + netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT); netif_carrier_off(card->dev); return register_netdev(card->dev); } @@ -3372,17 +3252,6 @@ static int qeth_l3_recover(void *ptr) return 0; } -static void qeth_l3_shutdown(struct ccwgroup_device *gdev) -{ - struct qeth_card *card = dev_get_drvdata(&gdev->dev); - qeth_set_allowed_threads(card, 0, 1); - if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap) - qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM); - qeth_qdio_clear_card(card, 0); - qeth_clear_qdio_buffers(card); - qdio_free(CARD_DDEV(card)); -} - static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) { struct qeth_card *card = dev_get_drvdata(&gdev->dev); @@ -3440,15 +3309,16 @@ struct qeth_discipline qeth_l3_discipline = { .start_poll = qeth_qdio_start_poll, .input_handler = (qdio_handler_t *) qeth_qdio_input_handler, .output_handler = (qdio_handler_t *) qeth_qdio_output_handler, + .process_rx_buffer = qeth_l3_process_inbound_buffer, .recover = qeth_l3_recover, .setup = qeth_l3_probe_device, .remove = qeth_l3_remove_device, .set_online = qeth_l3_set_online, .set_offline = qeth_l3_set_offline, - .shutdown = qeth_l3_shutdown, .freeze = qeth_l3_pm_suspend, .thaw = qeth_l3_pm_resume, .restore = qeth_l3_pm_resume, + .do_ioctl = qeth_l3_do_ioctl, .control_event_handler = qeth_l3_control_event, }; EXPORT_SYMBOL_GPL(qeth_l3_discipline); @@ -3472,8 +3342,8 @@ static int qeth_l3_ip_event(struct notifier_block *this, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV4); if (addr) { - addr->u.a4.addr = ifa->ifa_address; - addr->u.a4.mask = ifa->ifa_mask; + addr->u.a4.addr = be32_to_cpu(ifa->ifa_address); + addr->u.a4.mask = be32_to_cpu(ifa->ifa_mask); addr->type = QETH_IP_TYPE_NORMAL; } else return NOTIFY_DONE; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 05e9471e3d3f..ff29a4b416b4 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -286,7 +286,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, if (!addr) return -ENOMEM; - addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000); addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = @@ -320,7 +320,7 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev, addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); if (addr != NULL) { - addr->u.a6.addr.s6_addr32[0] = 0xfe800000; + addr->u.a6.addr.s6_addr32[0] = cpu_to_be32(0xfe800000); addr->u.a6.addr.s6_addr32[1] = 0x00000000; for (i = 8; i < 16; i++) addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8]; |