diff options
Diffstat (limited to 'drivers')
71 files changed, 3759 insertions, 648 deletions
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 656448c7fef9..7f701cbe14ab 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -105,7 +105,7 @@ enum { board_ahci_ign_iferr = 2, board_ahci_sb600 = 3, board_ahci_mv = 4, - board_ahci_sb700 = 5, + board_ahci_sb700 = 5, /* for SB700 and SB800 */ board_ahci_mcp65 = 6, board_ahci_nopmp = 7, @@ -439,7 +439,7 @@ static const struct ata_port_info ahci_port_info[] = { .udma_mask = ATA_UDMA6, .port_ops = &ahci_ops, }, - /* board_ahci_sb700 */ + /* board_ahci_sb700, for SB700 and SB800 */ { AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), .flags = AHCI_FLAG_COMMON, @@ -2446,6 +2446,8 @@ static void ahci_print_info(struct ata_host *host) speed_s = "1.5"; else if (speed == 2) speed_s = "3"; + else if (speed == 3) + speed_s = "6"; else speed_s = "?"; @@ -2610,6 +2612,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (pdev->revision == 0xa1 || pdev->revision == 0xa2)) hpriv->flags |= AHCI_HFLAG_NO_MSI; + /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */ + if (board_id == board_ahci_sb700 && pdev->revision >= 0x40) + hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL; + if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) pci_intx(pdev, 1); diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 5fdf1678d0cc..887d8f46a287 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c @@ -154,11 +154,13 @@ struct piix_map_db { struct piix_host_priv { const int *map; + u32 saved_iocfg; void __iomem *sidpr; }; static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static void piix_remove_one(struct pci_dev *pdev); static int piix_pata_prereset(struct ata_link *link, unsigned long deadline); static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); @@ -296,7 +298,7 @@ static struct pci_driver piix_pci_driver = { .name = DRV_NAME, .id_table = piix_pci_tbl, .probe = piix_init_one, - .remove = ata_pci_remove_one, + .remove = piix_remove_one, #ifdef CONFIG_PM .suspend = piix_pci_device_suspend, .resume = piix_pci_device_resume, @@ -308,7 +310,7 @@ static struct scsi_host_template piix_sht = { }; static struct ata_port_operations piix_pata_ops = { - .inherits = &ata_bmdma_port_ops, + .inherits = &ata_bmdma32_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = piix_set_piomode, .set_dmamode = piix_set_dmamode, @@ -610,8 +612,9 @@ static const struct ich_laptop ich_laptop[] = { static int ich_pata_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); + struct piix_host_priv *hpriv = ap->host->private_data; const struct ich_laptop *lap = &ich_laptop[0]; - u8 tmp, mask; + u8 mask; /* Check for specials - Acer Aspire 5602WLMi */ while (lap->device) { @@ -625,8 +628,7 @@ static int ich_pata_cable_detect(struct ata_port *ap) /* check BIOS cable detect results */ mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; - pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); - if ((tmp & mask) == 0) + if ((hpriv->saved_iocfg & mask) == 0) return ATA_CBL_PATA40; return ATA_CBL_PATA80; } @@ -1350,7 +1352,7 @@ static int __devinit piix_init_sidpr(struct ata_host *host) return 0; } -static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) +static void piix_iocfg_bit18_quirk(struct ata_host *host) { static const struct dmi_system_id sysids[] = { { @@ -1367,7 +1369,8 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) { } /* terminate list */ }; - u32 iocfg; + struct pci_dev *pdev = to_pci_dev(host->dev); + struct piix_host_priv *hpriv = host->private_data; if (!dmi_check_system(sysids)) return; @@ -1376,12 +1379,11 @@ static void piix_iocfg_bit18_quirk(struct pci_dev *pdev) * seem to use it to disable a channel. Clear the bit on the * affected systems. */ - pci_read_config_dword(pdev, PIIX_IOCFG, &iocfg); - if (iocfg & (1 << 18)) { + if (hpriv->saved_iocfg & (1 << 18)) { dev_printk(KERN_INFO, &pdev->dev, "applying IOCFG bit18 quirk\n"); - iocfg &= ~(1 << 18); - pci_write_config_dword(pdev, PIIX_IOCFG, iocfg); + pci_write_config_dword(pdev, PIIX_IOCFG, + hpriv->saved_iocfg & ~(1 << 18)); } } @@ -1430,6 +1432,17 @@ static int __devinit piix_init_one(struct pci_dev *pdev, if (rc) return rc; + hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) + return -ENOMEM; + + /* Save IOCFG, this will be used for cable detection, quirk + * detection and restoration on detach. This is necessary + * because some ACPI implementations mess up cable related + * bits on _STM. Reported on kernel bz#11879. + */ + pci_read_config_dword(pdev, PIIX_IOCFG, &hpriv->saved_iocfg); + /* ICH6R may be driven by either ata_piix or ahci driver * regardless of BIOS configuration. Make sure AHCI mode is * off. @@ -1441,10 +1454,6 @@ static int __devinit piix_init_one(struct pci_dev *pdev, } /* SATA map init can change port_info, do it before prepping host */ - hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL); - if (!hpriv) - return -ENOMEM; - if (port_flags & ATA_FLAG_SATA) hpriv->map = piix_init_sata_map(pdev, port_info, piix_map_db_table[ent->driver_data]); @@ -1463,7 +1472,7 @@ static int __devinit piix_init_one(struct pci_dev *pdev, } /* apply IOCFG bit18 quirk */ - piix_iocfg_bit18_quirk(pdev); + piix_iocfg_bit18_quirk(host); /* On ICH5, some BIOSen disable the interrupt using the * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. @@ -1488,6 +1497,16 @@ static int __devinit piix_init_one(struct pci_dev *pdev, return ata_pci_sff_activate_host(host, ata_sff_interrupt, &piix_sht); } +static void piix_remove_one(struct pci_dev *pdev) +{ + struct ata_host *host = dev_get_drvdata(&pdev->dev); + struct piix_host_priv *hpriv = host->private_data; + + pci_write_config_dword(pdev, PIIX_IOCFG, hpriv->saved_iocfg); + + ata_pci_remove_one(pdev); +} + static int __init piix_init(void) { int rc; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index f178a450ec08..175df54eb664 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1007,6 +1007,7 @@ static const char *sata_spd_string(unsigned int spd) static const char * const spd_str[] = { "1.5 Gbps", "3.0 Gbps", + "6.0 Gbps", }; if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) @@ -2000,6 +2001,10 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) as the caller should know this */ if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) return 0; + /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ + if (ata_id_is_cfa(adev->id) + && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) + return 0; /* PIO3 and higher it is mandatory */ if (adev->pio_mode > XFER_PIO_2) return 1; diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 9033d164c4ec..c59ad76c84b1 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -66,6 +66,7 @@ const struct ata_port_operations ata_sff_port_ops = { .port_start = ata_sff_port_start, }; +EXPORT_SYMBOL_GPL(ata_sff_port_ops); const struct ata_port_operations ata_bmdma_port_ops = { .inherits = &ata_sff_port_ops, @@ -77,6 +78,14 @@ const struct ata_port_operations ata_bmdma_port_ops = { .bmdma_stop = ata_bmdma_stop, .bmdma_status = ata_bmdma_status, }; +EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); + +const struct ata_port_operations ata_bmdma32_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .sff_data_xfer = ata_sff_data_xfer32, +}; +EXPORT_SYMBOL_GPL(ata_bmdma32_port_ops); /** * ata_fill_sg - Fill PCI IDE PRD table @@ -166,8 +175,9 @@ static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) blen = len & 0xffff; ap->prd[pi].addr = cpu_to_le32(addr); if (blen == 0) { - /* Some PATA chipsets like the CS5530 can't - cope with 0x0000 meaning 64K as the spec says */ + /* Some PATA chipsets like the CS5530 can't + cope with 0x0000 meaning 64K as the spec + says */ ap->prd[pi].flags_len = cpu_to_le32(0x8000); blen = 0x8000; ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); @@ -200,6 +210,7 @@ void ata_sff_qc_prep(struct ata_queued_cmd *qc) ata_fill_sg(qc); } +EXPORT_SYMBOL_GPL(ata_sff_qc_prep); /** * ata_sff_dumb_qc_prep - Prepare taskfile for submission @@ -217,6 +228,7 @@ void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc) ata_fill_sg_dumb(qc); } +EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); /** * ata_sff_check_status - Read device status reg & clear interrupt @@ -233,6 +245,7 @@ u8 ata_sff_check_status(struct ata_port *ap) { return ioread8(ap->ioaddr.status_addr); } +EXPORT_SYMBOL_GPL(ata_sff_check_status); /** * ata_sff_altstatus - Read device alternate status reg @@ -275,7 +288,7 @@ static u8 ata_sff_irq_status(struct ata_port *ap) status = ata_sff_altstatus(ap); /* Not us: We are busy */ if (status & ATA_BUSY) - return status; + return status; } /* Clear INTRQ latch */ status = ap->ops->sff_check_status(ap); @@ -319,6 +332,7 @@ void ata_sff_pause(struct ata_port *ap) ata_sff_sync(ap); ndelay(400); } +EXPORT_SYMBOL_GPL(ata_sff_pause); /** * ata_sff_dma_pause - Pause before commencing DMA @@ -327,7 +341,7 @@ void ata_sff_pause(struct ata_port *ap) * Perform I/O fencing and ensure sufficient cycle delays occur * for the HDMA1:0 transition */ - + void ata_sff_dma_pause(struct ata_port *ap) { if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { @@ -341,6 +355,7 @@ void ata_sff_dma_pause(struct ata_port *ap) corruption. */ BUG(); } +EXPORT_SYMBOL_GPL(ata_sff_dma_pause); /** * ata_sff_busy_sleep - sleep until BSY clears, or timeout @@ -396,6 +411,7 @@ int ata_sff_busy_sleep(struct ata_port *ap, return 0; } +EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); static int ata_sff_check_ready(struct ata_link *link) { @@ -422,6 +438,7 @@ int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline) { return ata_wait_ready(link, deadline, ata_sff_check_ready); } +EXPORT_SYMBOL_GPL(ata_sff_wait_ready); /** * ata_sff_dev_select - Select device 0/1 on ATA bus @@ -449,6 +466,7 @@ void ata_sff_dev_select(struct ata_port *ap, unsigned int device) iowrite8(tmp, ap->ioaddr.device_addr); ata_sff_pause(ap); /* needed; also flushes, for mmio */ } +EXPORT_SYMBOL_GPL(ata_sff_dev_select); /** * ata_dev_select - Select device 0/1 on ATA bus @@ -513,6 +531,7 @@ u8 ata_sff_irq_on(struct ata_port *ap) return tmp; } +EXPORT_SYMBOL_GPL(ata_sff_irq_on); /** * ata_sff_irq_clear - Clear PCI IDE BMDMA interrupt. @@ -534,6 +553,7 @@ void ata_sff_irq_clear(struct ata_port *ap) iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS); } +EXPORT_SYMBOL_GPL(ata_sff_irq_clear); /** * ata_sff_tf_load - send taskfile registers to host controller @@ -593,6 +613,7 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) ata_wait_idle(ap); } +EXPORT_SYMBOL_GPL(ata_sff_tf_load); /** * ata_sff_tf_read - input device's ATA taskfile shadow registers @@ -633,6 +654,7 @@ void ata_sff_tf_read(struct ata_port *ap, struct ata_taskfile *tf) WARN_ON(1); } } +EXPORT_SYMBOL_GPL(ata_sff_tf_read); /** * ata_sff_exec_command - issue ATA command to host controller @@ -652,6 +674,7 @@ void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) iowrite8(tf->command, ap->ioaddr.command_addr); ata_sff_pause(ap); } +EXPORT_SYMBOL_GPL(ata_sff_exec_command); /** * ata_tf_to_host - issue ATA taskfile to host controller @@ -717,6 +740,53 @@ unsigned int ata_sff_data_xfer(struct ata_device *dev, unsigned char *buf, return words << 1; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer); + +/** + * ata_sff_data_xfer32 - Transfer data by PIO + * @dev: device to target + * @buf: data buffer + * @buflen: buffer length + * @rw: read/write + * + * Transfer data from/to the device data register by PIO using 32bit + * I/O operations. + * + * LOCKING: + * Inherited from caller. + * + * RETURNS: + * Bytes consumed. + */ + +unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf, + unsigned int buflen, int rw) +{ + struct ata_port *ap = dev->link->ap; + void __iomem *data_addr = ap->ioaddr.data_addr; + unsigned int words = buflen >> 2; + int slop = buflen & 3; + + /* Transfer multiple of 4 bytes */ + if (rw == READ) + ioread32_rep(data_addr, buf, words); + else + iowrite32_rep(data_addr, buf, words); + + if (unlikely(slop)) { + __le32 pad; + if (rw == READ) { + pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr)); + memcpy(buf + buflen - slop, &pad, slop); + } else { + memcpy(&pad, buf + buflen - slop, slop); + iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr); + } + words++; + } + return words << 2; +} +EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); /** * ata_sff_data_xfer_noirq - Transfer data by PIO @@ -746,6 +816,7 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, return consumed; } +EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); /** * ata_pio_sector - Transfer a sector of data. @@ -922,13 +993,15 @@ next_sg: buf = kmap_atomic(page, KM_IRQ0); /* do the actual data transfer */ - consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); kunmap_atomic(buf, KM_IRQ0); local_irq_restore(flags); } else { buf = page_address(page); - consumed = ap->ops->sff_data_xfer(dev, buf + offset, count, rw); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, + count, rw); } bytes -= min(bytes, consumed); @@ -1013,18 +1086,19 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) * RETURNS: * 1 if ok in workqueue, 0 otherwise. */ -static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) +static inline int ata_hsm_ok_in_wq(struct ata_port *ap, + struct ata_queued_cmd *qc) { if (qc->tf.flags & ATA_TFLAG_POLLING) return 1; if (ap->hsm_task_state == HSM_ST_FIRST) { if (qc->tf.protocol == ATA_PROT_PIO && - (qc->tf.flags & ATA_TFLAG_WRITE)) + (qc->tf.flags & ATA_TFLAG_WRITE)) return 1; if (ata_is_atapi(qc->tf.protocol) && - !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) + !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) return 1; } @@ -1338,6 +1412,7 @@ fsm_start: return poll_next; } +EXPORT_SYMBOL_GPL(ata_sff_hsm_move); void ata_pio_task(struct work_struct *work) { @@ -1507,6 +1582,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc) return 0; } +EXPORT_SYMBOL_GPL(ata_sff_qc_issue); /** * ata_sff_qc_fill_rtf - fill result TF using ->sff_tf_read @@ -1526,6 +1602,7 @@ bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc) qc->ap->ops->sff_tf_read(qc->ap, &qc->result_tf); return true; } +EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); /** * ata_sff_host_intr - Handle host interrupt for given (port, task) @@ -1623,6 +1700,7 @@ idle_irq: #endif return 0; /* irq not handled */ } +EXPORT_SYMBOL_GPL(ata_sff_host_intr); /** * ata_sff_interrupt - Default ATA host interrupt handler @@ -1667,6 +1745,7 @@ irqreturn_t ata_sff_interrupt(int irq, void *dev_instance) return IRQ_RETVAL(handled); } +EXPORT_SYMBOL_GPL(ata_sff_interrupt); /** * ata_sff_freeze - Freeze SFF controller port @@ -1695,6 +1774,7 @@ void ata_sff_freeze(struct ata_port *ap) ap->ops->sff_irq_clear(ap); } +EXPORT_SYMBOL_GPL(ata_sff_freeze); /** * ata_sff_thaw - Thaw SFF controller port @@ -1712,6 +1792,7 @@ void ata_sff_thaw(struct ata_port *ap) ap->ops->sff_irq_clear(ap); ap->ops->sff_irq_on(ap); } +EXPORT_SYMBOL_GPL(ata_sff_thaw); /** * ata_sff_prereset - prepare SFF link for reset @@ -1753,6 +1834,7 @@ int ata_sff_prereset(struct ata_link *link, unsigned long deadline) return 0; } +EXPORT_SYMBOL_GPL(ata_sff_prereset); /** * ata_devchk - PATA device presence detection @@ -1865,6 +1947,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present, return class; } +EXPORT_SYMBOL_GPL(ata_sff_dev_classify); /** * ata_sff_wait_after_reset - wait for devices to become ready after reset @@ -1941,6 +2024,7 @@ int ata_sff_wait_after_reset(struct ata_link *link, unsigned int devmask, return ret; } +EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, unsigned long deadline) @@ -2013,6 +2097,7 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes, DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); return 0; } +EXPORT_SYMBOL_GPL(ata_sff_softreset); /** * sata_sff_hardreset - reset host port via SATA phy reset @@ -2045,6 +2130,7 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class, DPRINTK("EXIT, class=%u\n", *class); return rc; } +EXPORT_SYMBOL_GPL(sata_sff_hardreset); /** * ata_sff_postreset - SFF postreset callback @@ -2080,6 +2166,7 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes) if (ap->ioaddr.ctl_addr) iowrite8(ap->ctl, ap->ioaddr.ctl_addr); } +EXPORT_SYMBOL_GPL(ata_sff_postreset); /** * ata_sff_error_handler - Stock error handler for BMDMA controller @@ -2152,6 +2239,7 @@ void ata_sff_error_handler(struct ata_port *ap) ata_do_eh(ap, ap->ops->prereset, softreset, hardreset, ap->ops->postreset); } +EXPORT_SYMBOL_GPL(ata_sff_error_handler); /** * ata_sff_post_internal_cmd - Stock post_internal_cmd for SFF controller @@ -2174,6 +2262,7 @@ void ata_sff_post_internal_cmd(struct ata_queued_cmd *qc) spin_unlock_irqrestore(ap->lock, flags); } +EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); /** * ata_sff_port_start - Set port up for dma. @@ -2194,6 +2283,7 @@ int ata_sff_port_start(struct ata_port *ap) return ata_port_start(ap); return 0; } +EXPORT_SYMBOL_GPL(ata_sff_port_start); /** * ata_sff_std_ports - initialize ioaddr with standard port offsets. @@ -2219,6 +2309,7 @@ void ata_sff_std_ports(struct ata_ioports *ioaddr) ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; } +EXPORT_SYMBOL_GPL(ata_sff_std_ports); unsigned long ata_bmdma_mode_filter(struct ata_device *adev, unsigned long xfer_mask) @@ -2230,6 +2321,7 @@ unsigned long ata_bmdma_mode_filter(struct ata_device *adev, xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); return xfer_mask; } +EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); /** * ata_bmdma_setup - Set up PCI IDE BMDMA transaction @@ -2258,6 +2350,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc) /* issue r/w command */ ap->ops->sff_exec_command(ap, &qc->tf); } +EXPORT_SYMBOL_GPL(ata_bmdma_setup); /** * ata_bmdma_start - Start a PCI IDE BMDMA transaction @@ -2290,6 +2383,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc) * unneccessarily delayed for MMIO */ } +EXPORT_SYMBOL_GPL(ata_bmdma_start); /** * ata_bmdma_stop - Stop PCI IDE BMDMA transfer @@ -2314,6 +2408,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ ata_sff_dma_pause(ap); } +EXPORT_SYMBOL_GPL(ata_bmdma_stop); /** * ata_bmdma_status - Read PCI IDE BMDMA status @@ -2330,6 +2425,7 @@ u8 ata_bmdma_status(struct ata_port *ap) { return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); } +EXPORT_SYMBOL_GPL(ata_bmdma_status); /** * ata_bus_reset - reset host port and associated ATA channel @@ -2422,6 +2518,7 @@ err_out: DPRINTK("EXIT\n"); } +EXPORT_SYMBOL_GPL(ata_bus_reset); #ifdef CONFIG_PCI @@ -2449,6 +2546,7 @@ int ata_pci_bmdma_clear_simplex(struct pci_dev *pdev) return -EOPNOTSUPP; return 0; } +EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); /** * ata_pci_bmdma_init - acquire PCI BMDMA resources and init ATA host @@ -2501,11 +2599,12 @@ int ata_pci_bmdma_init(struct ata_host *host) host->flags |= ATA_HOST_SIMPLEX; ata_port_desc(ap, "bmdma 0x%llx", - (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); + (unsigned long long)pci_resource_start(pdev, 4) + 8 * i); } return 0; } +EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); static int ata_resources_present(struct pci_dev *pdev, int port) { @@ -2513,7 +2612,7 @@ static int ata_resources_present(struct pci_dev *pdev, int port) /* Check the PCI resources for this channel are enabled */ port = port * 2; - for (i = 0; i < 2; i ++) { + for (i = 0; i < 2; i++) { if (pci_resource_start(pdev, port + i) == 0 || pci_resource_len(pdev, port + i) == 0) return 0; @@ -2598,6 +2697,7 @@ int ata_pci_sff_init_host(struct ata_host *host) return 0; } +EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); /** * ata_pci_sff_prepare_host - helper to prepare native PCI ATA host @@ -2615,7 +2715,7 @@ int ata_pci_sff_init_host(struct ata_host *host) * 0 on success, -errno otherwise. */ int ata_pci_sff_prepare_host(struct pci_dev *pdev, - const struct ata_port_info * const * ppi, + const struct ata_port_info * const *ppi, struct ata_host **r_host) { struct ata_host *host; @@ -2645,17 +2745,18 @@ int ata_pci_sff_prepare_host(struct pci_dev *pdev, *r_host = host; return 0; - err_bmdma: +err_bmdma: /* This is necessary because PCI and iomap resources are * merged and releasing the top group won't release the * acquired resources if some of those have been acquired * before entering this function. */ pcim_iounmap_regions(pdev, 0xf); - err_out: +err_out: devres_release_group(&pdev->dev, NULL); return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); /** * ata_pci_sff_activate_host - start SFF host, request IRQ and register it @@ -2741,7 +2842,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, } rc = ata_host_register(host, sht); - out: +out: if (rc == 0) devres_remove_group(dev, NULL); else @@ -2749,6 +2850,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); /** * ata_pci_sff_init_one - Initialize/register PCI IDE host controller @@ -2776,7 +2878,7 @@ int ata_pci_sff_activate_host(struct ata_host *host, * Zero on success, negative on errno-based value on error. */ int ata_pci_sff_init_one(struct pci_dev *pdev, - const struct ata_port_info * const * ppi, + const struct ata_port_info * const *ppi, struct scsi_host_template *sht, void *host_priv) { struct device *dev = &pdev->dev; @@ -2815,7 +2917,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, pci_set_master(pdev); rc = ata_pci_sff_activate_host(host, ata_sff_interrupt, sht); - out: +out: if (rc == 0) devres_remove_group(&pdev->dev, NULL); else @@ -2823,54 +2925,7 @@ int ata_pci_sff_init_one(struct pci_dev *pdev, return rc; } +EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); #endif /* CONFIG_PCI */ -EXPORT_SYMBOL_GPL(ata_sff_port_ops); -EXPORT_SYMBOL_GPL(ata_bmdma_port_ops); -EXPORT_SYMBOL_GPL(ata_sff_qc_prep); -EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); -EXPORT_SYMBOL_GPL(ata_sff_dev_select); -EXPORT_SYMBOL_GPL(ata_sff_check_status); -EXPORT_SYMBOL_GPL(ata_sff_dma_pause); -EXPORT_SYMBOL_GPL(ata_sff_pause); -EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); -EXPORT_SYMBOL_GPL(ata_sff_wait_ready); -EXPORT_SYMBOL_GPL(ata_sff_tf_load); -EXPORT_SYMBOL_GPL(ata_sff_tf_read); -EXPORT_SYMBOL_GPL(ata_sff_exec_command); -EXPORT_SYMBOL_GPL(ata_sff_data_xfer); -EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); -EXPORT_SYMBOL_GPL(ata_sff_irq_on); -EXPORT_SYMBOL_GPL(ata_sff_irq_clear); -EXPORT_SYMBOL_GPL(ata_sff_hsm_move); -EXPORT_SYMBOL_GPL(ata_sff_qc_issue); -EXPORT_SYMBOL_GPL(ata_sff_qc_fill_rtf); -EXPORT_SYMBOL_GPL(ata_sff_host_intr); -EXPORT_SYMBOL_GPL(ata_sff_interrupt); -EXPORT_SYMBOL_GPL(ata_sff_freeze); -EXPORT_SYMBOL_GPL(ata_sff_thaw); -EXPORT_SYMBOL_GPL(ata_sff_prereset); -EXPORT_SYMBOL_GPL(ata_sff_dev_classify); -EXPORT_SYMBOL_GPL(ata_sff_wait_after_reset); -EXPORT_SYMBOL_GPL(ata_sff_softreset); -EXPORT_SYMBOL_GPL(sata_sff_hardreset); -EXPORT_SYMBOL_GPL(ata_sff_postreset); -EXPORT_SYMBOL_GPL(ata_sff_error_handler); -EXPORT_SYMBOL_GPL(ata_sff_post_internal_cmd); -EXPORT_SYMBOL_GPL(ata_sff_port_start); -EXPORT_SYMBOL_GPL(ata_sff_std_ports); -EXPORT_SYMBOL_GPL(ata_bmdma_mode_filter); -EXPORT_SYMBOL_GPL(ata_bmdma_setup); -EXPORT_SYMBOL_GPL(ata_bmdma_start); -EXPORT_SYMBOL_GPL(ata_bmdma_stop); -EXPORT_SYMBOL_GPL(ata_bmdma_status); -EXPORT_SYMBOL_GPL(ata_bus_reset); -#ifdef CONFIG_PCI -EXPORT_SYMBOL_GPL(ata_pci_bmdma_clear_simplex); -EXPORT_SYMBOL_GPL(ata_pci_bmdma_init); -EXPORT_SYMBOL_GPL(ata_pci_sff_init_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_prepare_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_activate_host); -EXPORT_SYMBOL_GPL(ata_pci_sff_init_one); -#endif /* CONFIG_PCI */ diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c index 73c466e452ca..a7999c19f0c9 100644 --- a/drivers/ata/pata_ali.c +++ b/drivers/ata/pata_ali.c @@ -19,7 +19,9 @@ * * TODO/CHECK * Cannot have ATAPI on both master & slave for rev < c2 (???) but - * otherwise should do atapi DMA. + * otherwise should do atapi DMA (For now for old we do PIO only for + * ATAPI) + * Review Sunblade workaround. */ #include <linux/kernel.h> @@ -33,12 +35,14 @@ #include <linux/dmi.h> #define DRV_NAME "pata_ali" -#define DRV_VERSION "0.7.5" +#define DRV_VERSION "0.7.8" static int ali_atapi_dma = 0; module_param_named(atapi_dma, ali_atapi_dma, int, 0644); MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)"); +static struct pci_dev *isa_bridge; + /* * Cable special cases */ @@ -147,8 +151,7 @@ static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int o pci_read_config_byte(pdev, pio_fifo, &fifo); fifo &= ~(0x0F << shift); - if (on) - fifo |= (on << shift); + fifo |= (on << shift); pci_write_config_byte(pdev, pio_fifo, fifo); } @@ -337,6 +340,23 @@ static int ali_check_atapi_dma(struct ata_queued_cmd *qc) return 0; } +static void ali_c2_c3_postreset(struct ata_link *link, unsigned int *classes) +{ + u8 r; + int port_bit = 4 << link->ap->port_no; + + /* If our bridge is an ALI 1533 then do the extra work */ + if (isa_bridge) { + /* Tristate and re-enable the bus signals */ + pci_read_config_byte(isa_bridge, 0x58, &r); + r &= ~port_bit; + pci_write_config_byte(isa_bridge, 0x58, r); + r |= port_bit; + pci_write_config_byte(isa_bridge, 0x58, r); + } + ata_sff_postreset(link, classes); +} + static struct scsi_host_template ali_sht = { ATA_BMDMA_SHT(DRV_NAME), }; @@ -349,10 +369,11 @@ static struct ata_port_operations ali_early_port_ops = { .inherits = &ata_sff_port_ops, .cable_detect = ata_cable_40wire, .set_piomode = ali_set_piomode, + .sff_data_xfer = ata_sff_data_xfer32, }; static const struct ata_port_operations ali_dma_base_ops = { - .inherits = &ata_bmdma_port_ops, + .inherits = &ata_bmdma32_port_ops, .set_piomode = ali_set_piomode, .set_dmamode = ali_set_dmamode, }; @@ -377,6 +398,17 @@ static struct ata_port_operations ali_c2_port_ops = { .check_atapi_dma = ali_check_atapi_dma, .cable_detect = ali_c2_cable_detect, .dev_config = ali_lock_sectors, + .postreset = ali_c2_c3_postreset, +}; + +/* + * Port operations for DMA capable ALi with cable detect + */ +static struct ata_port_operations ali_c4_port_ops = { + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, + .dev_config = ali_lock_sectors, }; /* @@ -401,52 +433,49 @@ static struct ata_port_operations ali_c5_port_ops = { static void ali_init_chipset(struct pci_dev *pdev) { u8 tmp; - struct pci_dev *north, *isa_bridge; + struct pci_dev *north; /* * The chipset revision selects the driver operations and * mode data. */ - if (pdev->revision >= 0x20 && pdev->revision < 0xC2) { - /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ - pci_read_config_byte(pdev, 0x4B, &tmp); - /* Clear CD-ROM DMA write bit */ - tmp &= 0x7F; - pci_write_config_byte(pdev, 0x4B, tmp); - } else if (pdev->revision >= 0xC2) { - /* Enable cable detection logic */ + if (pdev->revision <= 0x20) { + pci_read_config_byte(pdev, 0x53, &tmp); + tmp |= 0x03; + pci_write_config_byte(pdev, 0x53, tmp); + } else { + pci_read_config_byte(pdev, 0x4a, &tmp); + pci_write_config_byte(pdev, 0x4a, tmp | 0x20); pci_read_config_byte(pdev, 0x4B, &tmp); - pci_write_config_byte(pdev, 0x4B, tmp | 0x08); - } - north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); - isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - - if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { - /* Configure the ALi bridge logic. For non ALi rely on BIOS. - Set the south bridge enable bit */ - pci_read_config_byte(isa_bridge, 0x79, &tmp); - if (pdev->revision == 0xC2) - pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); - else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) - pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); - } - if (pdev->revision >= 0x20) { + if (pdev->revision < 0xC2) + /* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */ + /* Clear CD-ROM DMA write bit */ + tmp &= 0x7F; + /* Cable and UDMA */ + pci_write_config_byte(pdev, 0x4B, tmp | 0x09); /* * CD_ROM DMA on (0x53 bit 0). Enable this even if we want * to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control * via 0x54/55. */ pci_read_config_byte(pdev, 0x53, &tmp); - if (pdev->revision <= 0x20) - tmp &= ~0x02; if (pdev->revision >= 0xc7) tmp |= 0x03; else tmp |= 0x01; /* CD_ROM enable for DMA */ pci_write_config_byte(pdev, 0x53, tmp); } - pci_dev_put(isa_bridge); + north = pci_get_bus_and_slot(0, PCI_DEVFN(0,0)); + if (north && north->vendor == PCI_VENDOR_ID_AL && isa_bridge) { + /* Configure the ALi bridge logic. For non ALi rely on BIOS. + Set the south bridge enable bit */ + pci_read_config_byte(isa_bridge, 0x79, &tmp); + if (pdev->revision == 0xC2) + pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04); + else if (pdev->revision > 0xC2 && pdev->revision < 0xC5) + pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02); + } pci_dev_put(north); ata_pci_bmdma_clear_simplex(pdev); } @@ -503,7 +532,7 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = ATA_UDMA5, - .port_ops = &ali_c2_port_ops + .port_ops = &ali_c4_port_ops }; /* Revision 0xC5 is UDMA133 with LBA48 DMA */ static const struct ata_port_info info_c5 = { @@ -516,7 +545,6 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) const struct ata_port_info *ppi[] = { NULL, NULL }; u8 tmp; - struct pci_dev *isa_bridge; int rc; rc = pcim_enable_device(pdev); @@ -543,14 +571,12 @@ static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id) ali_init_chipset(pdev); - isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); if (isa_bridge && pdev->revision >= 0x20 && pdev->revision < 0xC2) { /* Are we paired with a UDMA capable chip */ pci_read_config_byte(isa_bridge, 0x5E, &tmp); if ((tmp & 0x1E) == 0x12) ppi[0] = &info_20_udma; } - pci_dev_put(isa_bridge); return ata_pci_sff_init_one(pdev, ppi, &ali_sht, NULL); } @@ -590,13 +616,20 @@ static struct pci_driver ali_pci_driver = { static int __init ali_init(void) { - return pci_register_driver(&ali_pci_driver); + int ret; + isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + + ret = pci_register_driver(&ali_pci_driver); + if (ret < 0) + pci_dev_put(isa_bridge); + return ret; } static void __exit ali_exit(void) { pci_unregister_driver(&ali_pci_driver); + pci_dev_put(isa_bridge); } diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c index 0ec9c7d9fe9d..63719ab9ea44 100644 --- a/drivers/ata/pata_amd.c +++ b/drivers/ata/pata_amd.c @@ -24,7 +24,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_amd" -#define DRV_VERSION "0.3.10" +#define DRV_VERSION "0.3.11" /** * timing_setup - shared timing computation and load @@ -345,7 +345,7 @@ static struct scsi_host_template amd_sht = { }; static const struct ata_port_operations amd_base_port_ops = { - .inherits = &ata_bmdma_port_ops, + .inherits = &ata_bmdma32_port_ops, .prereset = amd_pre_reset, }; diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index e0c4f05d7d57..65c28e5a6cd7 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c @@ -30,7 +30,7 @@ #define DRV_VERSION "0.6.2" struct hpt_clock { - u8 xfer_speed; + u8 xfer_mode; u32 timing; }; @@ -189,28 +189,6 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) return ata_bmdma_mode_filter(adev, mask); } -/** - * hpt36x_find_mode - reset the hpt36x bus - * @ap: ATA port - * @speed: transfer mode - * - * Return the 32bit register programming information for this channel - * that matches the speed provided. - */ - -static u32 hpt36x_find_mode(struct ata_port *ap, int speed) -{ - struct hpt_clock *clocks = ap->host->private_data; - - while(clocks->xfer_speed) { - if (clocks->xfer_speed == speed) - return clocks->timing; - clocks++; - } - BUG(); - return 0xffffffffU; /* silence compiler warning */ -} - static int hpt36x_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); @@ -226,25 +204,16 @@ static int hpt36x_cable_detect(struct ata_port *ap) return ATA_CBL_PATA80; } -/** - * hpt366_set_piomode - PIO setup - * @ap: ATA interface - * @adev: device on the interface - * - * Perform PIO mode setup. - */ - -static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) +static void hpt366_set_mode(struct ata_port *ap, struct ata_device *adev, + u8 mode) { + struct hpt_clock *clocks = ap->host->private_data; struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; + u32 addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); + u32 addr2 = 0x51 + 4 * ap->port_no; + u32 mask, reg; u8 fast; - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - /* Fast interrupt prediction disable, hold off interrupt disable */ pci_read_config_byte(pdev, addr2, &fast); if (fast & 0x80) { @@ -252,12 +221,43 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) pci_write_config_byte(pdev, addr2, fast); } + /* determine timing mask and find matching clock entry */ + if (mode < XFER_MW_DMA_0) + mask = 0xc1f8ffff; + else if (mode < XFER_UDMA_0) + mask = 0x303800ff; + else + mask = 0x30070000; + + while (clocks->xfer_mode) { + if (clocks->xfer_mode == mode) + break; + clocks++; + } + if (!clocks->xfer_mode) + BUG(); + + /* + * Combine new mode bits with old config bits and disable + * on-chip PIO FIFO/buffer (and PIO MST mode as well) to avoid + * problems handling I/O errors later. + */ pci_read_config_dword(pdev, addr1, ®); - mode = hpt36x_find_mode(ap, adev->pio_mode); - mode &= ~0x8000000; /* No FIFO in PIO */ - mode &= ~0x30070000; /* Leave config bits alone */ - reg &= 0x30070000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + reg = ((reg & ~mask) | (clocks->timing & mask)) & ~0xc0000000; + pci_write_config_dword(pdev, addr1, reg); +} + +/** + * hpt366_set_piomode - PIO setup + * @ap: ATA interface + * @adev: device on the interface + * + * Perform PIO mode setup. + */ + +static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) +{ + hpt366_set_mode(ap, adev, adev->pio_mode); } /** @@ -271,28 +271,7 @@ static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev) static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev) { - struct pci_dev *pdev = to_pci_dev(ap->host->dev); - u32 addr1, addr2; - u32 reg; - u32 mode; - u8 fast; - - addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no); - addr2 = 0x51 + 4 * ap->port_no; - - /* Fast interrupt prediction disable, hold off interrupt disable */ - pci_read_config_byte(pdev, addr2, &fast); - if (fast & 0x80) { - fast &= ~0x80; - pci_write_config_byte(pdev, addr2, fast); - } - - pci_read_config_dword(pdev, addr1, ®); - mode = hpt36x_find_mode(ap, adev->dma_mode); - mode |= 0x8000000; /* FIFO in MWDMA or UDMA */ - mode &= ~0xC0000000; /* Leave config bits alone */ - reg &= 0xC0000000; /* Strip timing bits */ - pci_write_config_dword(pdev, addr1, reg | mode); + hpt366_set_mode(ap, adev, adev->dma_mode); } static struct scsi_host_template hpt36x_sht = { diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c index f11a320337c0..f19cc645881a 100644 --- a/drivers/ata/pata_hpt3x3.c +++ b/drivers/ata/pata_hpt3x3.c @@ -23,7 +23,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_hpt3x3" -#define DRV_VERSION "0.5.3" +#define DRV_VERSION "0.6.1" /** * hpt3x3_set_piomode - PIO setup @@ -80,14 +80,48 @@ static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev) r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */ if (adev->dma_mode >= XFER_UDMA_0) - r2 |= (0x10 << dn); /* Ultra mode */ + r2 |= (0x01 << dn); /* Ultra mode */ else - r2 |= (0x01 << dn); /* MWDMA */ + r2 |= (0x10 << dn); /* MWDMA */ pci_write_config_dword(pdev, 0x44, r1); pci_write_config_dword(pdev, 0x48, r2); } -#endif /* CONFIG_PATA_HPT3X3_DMA */ + +/** + * hpt3x3_freeze - DMA workaround + * @ap: port to freeze + * + * When freezing an HPT3x3 we must stop any pending DMA before + * writing to the control register or the chip will hang + */ + +static void hpt3x3_freeze(struct ata_port *ap) +{ + void __iomem *mmio = ap->ioaddr.bmdma_addr; + + iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ ATA_DMA_START, + mmio + ATA_DMA_CMD); + ata_sff_dma_pause(ap); + ata_sff_freeze(ap); +} + +/** + * hpt3x3_bmdma_setup - DMA workaround + * @qc: Queued command + * + * When issuing BMDMA we must clean up the error/active bits in + * software on this device + */ + +static void hpt3x3_bmdma_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + u8 r = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + r |= ATA_DMA_INTR | ATA_DMA_ERR; + iowrite8(r, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + return ata_bmdma_setup(qc); +} /** * hpt3x3_atapi_dma - ATAPI DMA check @@ -101,18 +135,23 @@ static int hpt3x3_atapi_dma(struct ata_queued_cmd *qc) return 1; } +#endif /* CONFIG_PATA_HPT3X3_DMA */ + static struct scsi_host_template hpt3x3_sht = { ATA_BMDMA_SHT(DRV_NAME), }; static struct ata_port_operations hpt3x3_port_ops = { .inherits = &ata_bmdma_port_ops, - .check_atapi_dma= hpt3x3_atapi_dma, .cable_detect = ata_cable_40wire, .set_piomode = hpt3x3_set_piomode, #if defined(CONFIG_PATA_HPT3X3_DMA) .set_dmamode = hpt3x3_set_dmamode, + .bmdma_setup = hpt3x3_bmdma_setup, + .check_atapi_dma= hpt3x3_atapi_dma, + .freeze = hpt3x3_freeze, #endif + }; /** diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c index 7c8faa48b5f3..aa576cac4d17 100644 --- a/drivers/ata/pata_mpiix.c +++ b/drivers/ata/pata_mpiix.c @@ -35,7 +35,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_mpiix" -#define DRV_VERSION "0.7.6" +#define DRV_VERSION "0.7.7" enum { IDETIM = 0x6C, /* IDE control register */ @@ -146,6 +146,7 @@ static struct ata_port_operations mpiix_port_ops = { .cable_detect = ata_cable_40wire, .set_piomode = mpiix_set_piomode, .prereset = mpiix_pre_reset, + .sff_data_xfer = ata_sff_data_xfer32, }; static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c index 6afa07a37648..d8d743af3225 100644 --- a/drivers/ata/pata_platform.c +++ b/drivers/ata/pata_platform.c @@ -186,7 +186,7 @@ EXPORT_SYMBOL_GPL(__pata_platform_probe); * A platform bus ATA device has been unplugged. Perform the needed * cleanup. Also called on module unload for any active devices. */ -int __devexit __pata_platform_remove(struct device *dev) +int __pata_platform_remove(struct device *dev) { struct ata_host *host = dev_get_drvdata(dev); diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c index 83580a59db58..9e764e5747e6 100644 --- a/drivers/ata/pata_sil680.c +++ b/drivers/ata/pata_sil680.c @@ -32,7 +32,7 @@ #include <linux/libata.h> #define DRV_NAME "pata_sil680" -#define DRV_VERSION "0.4.8" +#define DRV_VERSION "0.4.9" #define SIL680_MMIO_BAR 5 @@ -195,7 +195,7 @@ static struct scsi_host_template sil680_sht = { }; static struct ata_port_operations sil680_port_ops = { - .inherits = &ata_bmdma_port_ops, + .inherits = &ata_bmdma32_port_ops, .cable_detect = sil680_cable_detect, .set_piomode = sil680_set_piomode, .set_dmamode = sil680_set_dmamode, diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index ccee930f1e12..2590c2279fa7 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -51,13 +51,6 @@ struct sil24_sge { __le32 flags; }; -/* - * Port multiplier - */ -struct sil24_port_multiplier { - __le32 diag; - __le32 sactive; -}; enum { SIL24_HOST_BAR = 0, diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index 088885ed51b9..e1c7611e9144 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -64,7 +64,7 @@ #include <linux/jiffies.h> #include "iphase.h" #include "suni.h" -#define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) +#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) #define PRIV(dev) ((struct suni_priv *) dev->phy_data) @@ -1306,7 +1306,7 @@ static void rx_dle_intr(struct atm_dev *dev) // get real pkt length pwang_test trailer = (struct cpcs_trailer*)((u_char *)skb->data + skb->len - sizeof(*trailer)); - length = swap(trailer->length); + length = swap_byte_order(trailer->length); if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { @@ -2995,7 +2995,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { skb->len, PCI_DMA_TODEVICE); wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo; - /* wr_ptr->bytes = swap(total_len); didn't seem to affect ?? */ + /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */ wr_ptr->bytes = skb->len; /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */ diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 35914b6e1d2a..f5be8081cd81 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -616,6 +616,7 @@ config HVC_ISERIES default y select HVC_DRIVER select HVC_IRQ + select VIOPATH help iSeries machines support a hypervisor virtual console. diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c index 91cdb35a9204..0afc8b82212e 100644 --- a/drivers/char/hvc_beat.c +++ b/drivers/char/hvc_beat.c @@ -44,7 +44,7 @@ static int hvc_beat_get_chars(uint32_t vtermno, char *buf, int cnt) static unsigned char q[sizeof(unsigned long) * 2] __attribute__((aligned(sizeof(unsigned long)))); static int qlen = 0; - unsigned long got; + u64 got; again: if (qlen) { @@ -63,7 +63,7 @@ again: } } if (beat_get_term_char(vtermno, &got, - ((unsigned long *)q), ((unsigned long *)q) + 1) == 0) { + ((u64 *)q), ((u64 *)q) + 1) == 0) { qlen = got; goto again; } diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 112a6ba9a96f..146c97613da0 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c @@ -32,7 +32,7 @@ /* These are global because they are accessed in tty_io.c */ #ifdef CONFIG_UNIX98_PTYS -struct tty_driver *ptm_driver; +static struct tty_driver *ptm_driver; static struct tty_driver *pts_driver; #endif diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index ab18c1e7b115..70efba2ee053 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c @@ -273,12 +273,23 @@ static void tpm_nsc_remove(struct device *dev) } } -static struct device_driver nsc_drv = { - .name = "tpm_nsc", - .bus = &platform_bus_type, - .owner = THIS_MODULE, - .suspend = tpm_pm_suspend, - .resume = tpm_pm_resume, +static int tpm_nsc_suspend(struct platform_device *dev, pm_message_t msg) +{ + return tpm_pm_suspend(&dev->dev, msg); +} + +static int tpm_nsc_resume(struct platform_device *dev) +{ + return tpm_pm_resume(&dev->dev); +} + +static struct platform_driver nsc_drv = { + .suspend = tpm_nsc_suspend, + .resume = tpm_nsc_resume, + .driver = { + .name = "tpm_nsc", + .owner = THIS_MODULE, + }, }; static int __init init_nsc(void) @@ -297,7 +308,7 @@ static int __init init_nsc(void) return -ENODEV; } - err = driver_register(&nsc_drv); + err = platform_driver_register(&nsc_drv); if (err) return err; @@ -308,17 +319,15 @@ static int __init init_nsc(void) /* enable the DPM module */ tpm_write_index(nscAddrBase, NSC_LDC_INDEX, 0x01); - pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL); + pdev = platform_device_alloc("tpm_nscl0", -1); if (!pdev) { rc = -ENOMEM; goto err_unreg_drv; } - pdev->name = "tpm_nscl0"; - pdev->id = -1; pdev->num_resources = 0; + pdev->dev.driver = &nsc_drv.driver; pdev->dev.release = tpm_nsc_remove; - pdev->dev.driver = &nsc_drv; if ((rc = platform_device_register(pdev)) < 0) goto err_free_dev; @@ -377,7 +386,7 @@ err_unreg_dev: err_free_dev: kfree(pdev); err_unreg_drv: - driver_unregister(&nsc_drv); + platform_driver_unregister(&nsc_drv); return rc; } @@ -390,7 +399,7 @@ static void __exit cleanup_nsc(void) pdev = NULL; } - driver_unregister(&nsc_drv); + platform_driver_unregister(&nsc_drv); } module_init(init_nsc); diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 80014213fb53..7900bd63b36d 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -969,8 +969,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows) * Takes the console sem and the called methods then take the tty * termios_mutex and the tty ctrl_lock in that order. */ - -int vt_resize(struct tty_struct *tty, struct winsize *ws) +static int vt_resize(struct tty_struct *tty, struct winsize *ws) { struct vc_data *vc = tty->driver_data; int ret; diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index 50a071f1c945..777fba48d2d3 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c @@ -238,11 +238,11 @@ static ssize_t host_control_on_shutdown_store(struct device *dev, } /** - * smi_request: generate SMI request + * dcdbas_smi_request: generate SMI request * * Called with smi_data_lock. */ -static int smi_request(struct smi_cmd *smi_cmd) +int dcdbas_smi_request(struct smi_cmd *smi_cmd) { cpumask_t old_mask; int ret = 0; @@ -309,14 +309,14 @@ static ssize_t smi_request_store(struct device *dev, switch (val) { case 2: /* Raw SMI */ - ret = smi_request(smi_cmd); + ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; break; case 1: /* Calling Interface SMI */ smi_cmd->ebx = (u32) virt_to_phys(smi_cmd->command_buffer); - ret = smi_request(smi_cmd); + ret = dcdbas_smi_request(smi_cmd); if (!ret) ret = count; break; @@ -333,6 +333,7 @@ out: mutex_unlock(&smi_data_lock); return ret; } +EXPORT_SYMBOL(dcdbas_smi_request); /** * host_control_smi: generate host control SMI diff --git a/drivers/firmware/dcdbas.h b/drivers/firmware/dcdbas.h index 87bc3417de27..ca3cb0a54ab6 100644 --- a/drivers/firmware/dcdbas.h +++ b/drivers/firmware/dcdbas.h @@ -101,5 +101,7 @@ struct apm_cmd { } __attribute__ ((packed)) parameters; } __attribute__ ((packed)); +int dcdbas_smi_request(struct smi_cmd *smi_cmd); + #endif /* _DCDBAS_H_ */ diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c index 3bf8ee120d42..261b9aa3f248 100644 --- a/drivers/firmware/memmap.c +++ b/drivers/firmware/memmap.c @@ -56,9 +56,9 @@ struct memmap_attribute { ssize_t (*show)(struct firmware_map_entry *entry, char *buf); }; -struct memmap_attribute memmap_start_attr = __ATTR_RO(start); -struct memmap_attribute memmap_end_attr = __ATTR_RO(end); -struct memmap_attribute memmap_type_attr = __ATTR_RO(type); +static struct memmap_attribute memmap_start_attr = __ATTR_RO(start); +static struct memmap_attribute memmap_end_attr = __ATTR_RO(end); +static struct memmap_attribute memmap_type_attr = __ATTR_RO(type); /* * These are default attributes that are added for every memmap entry. diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index a812db243477..6ba57e91d7ab 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -2705,7 +2705,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) sizeof(struct ietf_mpa_frame)); - /* notify OF layer that accept event was successfull */ + /* notify OF layer that accept event was successful */ cm_id->add_ref(cm_id); cm_event.event = IW_CM_EVENT_ESTABLISHED; diff --git a/drivers/isdn/hardware/eicon/debuglib.h b/drivers/isdn/hardware/eicon/debuglib.h index 016410cf2273..8ea587783e14 100644 --- a/drivers/isdn/hardware/eicon/debuglib.h +++ b/drivers/isdn/hardware/eicon/debuglib.h @@ -235,7 +235,7 @@ typedef void ( * DbgOld) (unsigned short, char *, va_list) ; typedef void ( * DbgEv) (unsigned short, unsigned long, va_list) ; typedef void ( * DbgIrq) (unsigned short, int, char *, va_list) ; typedef struct _DbgHandle_ -{ char Registered ; /* driver successfull registered */ +{ char Registered ; /* driver successfully registered */ #define DBG_HANDLE_REG_NEW 0x01 /* this (new) structure */ #define DBG_HANDLE_REG_OLD 0x7f /* old structure (see below) */ char Version; /* version of this structure */ diff --git a/drivers/isdn/hardware/eicon/os_4bri.c b/drivers/isdn/hardware/eicon/os_4bri.c index 7b4ec3f60dbf..c964b8d91ada 100644 --- a/drivers/isdn/hardware/eicon/os_4bri.c +++ b/drivers/isdn/hardware/eicon/os_4bri.c @@ -997,7 +997,7 @@ diva_4bri_start_adapter(PISDN_ADAPTER IoAdapter, diva_xdi_display_adapter_features(IoAdapter->ANum); for (i = 0; i < IoAdapter->tasks; i++) { - DBG_LOG(("A(%d) %s adapter successfull started", + DBG_LOG(("A(%d) %s adapter successfully started", IoAdapter->QuadroList->QuadroAdapter[i]->ANum, (IoAdapter->tasks == 1) ? "BRI 2.0" : "4BRI")) diva_xdi_didd_register_adapter(IoAdapter->QuadroList->QuadroAdapter[i]->ANum); diff --git a/drivers/isdn/hardware/eicon/os_bri.c b/drivers/isdn/hardware/eicon/os_bri.c index f31bba5b16ff..08f01993f46b 100644 --- a/drivers/isdn/hardware/eicon/os_bri.c +++ b/drivers/isdn/hardware/eicon/os_bri.c @@ -736,7 +736,7 @@ diva_bri_start_adapter(PISDN_ADAPTER IoAdapter, IoAdapter->Properties.Features = (word) features; diva_xdi_display_adapter_features(IoAdapter->ANum); - DBG_LOG(("A(%d) BRI adapter successfull started", IoAdapter->ANum)) + DBG_LOG(("A(%d) BRI adapter successfully started", IoAdapter->ANum)) /* Register with DIDD */ diff --git a/drivers/isdn/hardware/eicon/os_pri.c b/drivers/isdn/hardware/eicon/os_pri.c index 903356547b79..5d65405c75f4 100644 --- a/drivers/isdn/hardware/eicon/os_pri.c +++ b/drivers/isdn/hardware/eicon/os_pri.c @@ -513,7 +513,7 @@ diva_pri_start_adapter(PISDN_ADAPTER IoAdapter, diva_xdi_display_adapter_features(IoAdapter->ANum); - DBG_LOG(("A(%d) PRI adapter successfull started", IoAdapter->ANum)) + DBG_LOG(("A(%d) PRI adapter successfully started", IoAdapter->ANum)) /* Register with DIDD */ diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ab7c8e4a61f9..719943763391 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -215,7 +215,6 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, /* choose a good rdev and read the page from there */ mdk_rdev_t *rdev; - struct list_head *tmp; sector_t target; if (!page) @@ -223,7 +222,7 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, if (!page) return ERR_PTR(-ENOMEM); - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { if (! test_bit(In_sync, &rdev->flags) || test_bit(Faulty, &rdev->flags)) continue; @@ -964,9 +963,11 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) */ page = bitmap->sb_page; offset = sizeof(bitmap_super_t); - read_sb_page(bitmap->mddev, bitmap->offset, - page, - index, count); + if (!file) + read_sb_page(bitmap->mddev, + bitmap->offset, + page, + index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index f26c1f9a475b..86d9adf90e79 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -283,7 +283,6 @@ static int reconfig(mddev_t *mddev, int layout, int chunk_size) static int run(mddev_t *mddev) { mdk_rdev_t *rdev; - struct list_head *tmp; int i; conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); @@ -296,7 +295,7 @@ static int run(mddev_t *mddev) } conf->nfaults = 0; - rdev_for_each(rdev, tmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) conf->rdev = rdev; mddev->array_sectors = mddev->size * 2; diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 3b90c5c924ec..1e3aea9eecf1 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -105,7 +105,6 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) int i, nb_zone, cnt; sector_t min_sectors; sector_t curr_sector; - struct list_head *tmp; conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), GFP_KERNEL); @@ -115,7 +114,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) cnt = 0; conf->array_sectors = 0; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { int j = rdev->raid_disk; dev_info_t *disk = conf->disks + j; diff --git a/drivers/md/md.c b/drivers/md/md.c index 1b1d32694f6f..41e2509bf896 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -214,20 +214,33 @@ static inline mddev_t *mddev_get(mddev_t *mddev) return mddev; } +static void mddev_delayed_delete(struct work_struct *ws) +{ + mddev_t *mddev = container_of(ws, mddev_t, del_work); + kobject_del(&mddev->kobj); + kobject_put(&mddev->kobj); +} + static void mddev_put(mddev_t *mddev) { if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) return; - if (!mddev->raid_disks && list_empty(&mddev->disks)) { + if (!mddev->raid_disks && list_empty(&mddev->disks) && + !mddev->hold_active) { list_del(&mddev->all_mddevs); - spin_unlock(&all_mddevs_lock); - blk_cleanup_queue(mddev->queue); - if (mddev->sysfs_state) - sysfs_put(mddev->sysfs_state); - mddev->sysfs_state = NULL; - kobject_put(&mddev->kobj); - } else - spin_unlock(&all_mddevs_lock); + if (mddev->gendisk) { + /* we did a probe so need to clean up. + * Call schedule_work inside the spinlock + * so that flush_scheduled_work() after + * mddev_find will succeed in waiting for the + * work to be done. + */ + INIT_WORK(&mddev->del_work, mddev_delayed_delete); + schedule_work(&mddev->del_work); + } else + kfree(mddev); + } + spin_unlock(&all_mddevs_lock); } static mddev_t * mddev_find(dev_t unit) @@ -236,15 +249,50 @@ static mddev_t * mddev_find(dev_t unit) retry: spin_lock(&all_mddevs_lock); - list_for_each_entry(mddev, &all_mddevs, all_mddevs) - if (mddev->unit == unit) { - mddev_get(mddev); + + if (unit) { + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == unit) { + mddev_get(mddev); + spin_unlock(&all_mddevs_lock); + kfree(new); + return mddev; + } + + if (new) { + list_add(&new->all_mddevs, &all_mddevs); spin_unlock(&all_mddevs_lock); - kfree(new); - return mddev; + new->hold_active = UNTIL_IOCTL; + return new; } - - if (new) { + } else if (new) { + /* find an unused unit number */ + static int next_minor = 512; + int start = next_minor; + int is_free = 0; + int dev = 0; + while (!is_free) { + dev = MKDEV(MD_MAJOR, next_minor); + next_minor++; + if (next_minor > MINORMASK) + next_minor = 0; + if (next_minor == start) { + /* Oh dear, all in use. */ + spin_unlock(&all_mddevs_lock); + kfree(new); + return NULL; + } + + is_free = 1; + list_for_each_entry(mddev, &all_mddevs, all_mddevs) + if (mddev->unit == dev) { + is_free = 0; + break; + } + } + new->unit = dev; + new->md_minor = MINOR(dev); + new->hold_active = UNTIL_STOP; list_add(&new->all_mddevs, &all_mddevs); spin_unlock(&all_mddevs_lock); return new; @@ -275,16 +323,6 @@ static mddev_t * mddev_find(dev_t unit) new->resync_max = MaxSector; new->level = LEVEL_NONE; - new->queue = blk_alloc_queue(GFP_KERNEL); - if (!new->queue) { - kfree(new); - return NULL; - } - /* Can be unlocked because the queue is new: no concurrency */ - queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue); - - blk_queue_make_request(new->queue, md_fail_request); - goto retry; } @@ -307,25 +345,23 @@ static inline void mddev_unlock(mddev_t * mddev) static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) { - mdk_rdev_t * rdev; - struct list_head *tmp; + mdk_rdev_t *rdev; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->desc_nr == nr) return rdev; - } + return NULL; } static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) { - struct list_head *tmp; mdk_rdev_t *rdev; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->bdev->bd_dev == dev) return rdev; - } + return NULL; } @@ -861,7 +897,6 @@ static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) { mdp_super_t *sb; - struct list_head *tmp; mdk_rdev_t *rdev2; int next_spare = mddev->raid_disks; @@ -933,7 +968,7 @@ static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) sb->state |= (1<<MD_SB_BITMAP_PRESENT); sb->disks[0].state = (1<<MD_DISK_REMOVED); - rdev_for_each(rdev2, tmp, mddev) { + list_for_each_entry(rdev2, &mddev->disks, same_set) { mdp_disk_t *d; int desc_nr; if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) @@ -1259,7 +1294,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) { struct mdp_superblock_1 *sb; - struct list_head *tmp; mdk_rdev_t *rdev2; int max_dev, i; /* make rdev->sb match mddev and rdev data. */ @@ -1307,7 +1341,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) } max_dev = 0; - rdev_for_each(rdev2, tmp, mddev) + list_for_each_entry(rdev2, &mddev->disks, same_set) if (rdev2->desc_nr+1 > max_dev) max_dev = rdev2->desc_nr+1; @@ -1316,7 +1350,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) for (i=0; i<max_dev;i++) sb->dev_roles[i] = cpu_to_le16(0xfffe); - rdev_for_each(rdev2, tmp, mddev) { + list_for_each_entry(rdev2, &mddev->disks, same_set) { i = rdev2->desc_nr; if (test_bit(Faulty, &rdev2->flags)) sb->dev_roles[i] = cpu_to_le16(0xfffe); @@ -1466,6 +1500,9 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) list_add_rcu(&rdev->same_set, &mddev->disks); bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); + + /* May as well allow recovery to be retried once */ + mddev->recovery_disabled = 0; return 0; fail: @@ -1571,8 +1608,7 @@ static void kick_rdev_from_array(mdk_rdev_t * rdev) static void export_array(mddev_t *mddev) { - struct list_head *tmp; - mdk_rdev_t *rdev; + mdk_rdev_t *rdev, *tmp; rdev_for_each(rdev, tmp, mddev) { if (!rdev->mddev) { @@ -1593,7 +1629,7 @@ static void print_desc(mdp_disk_t *desc) desc->major,desc->minor,desc->raid_disk,desc->state); } -static void print_sb(mdp_super_t *sb) +static void print_sb_90(mdp_super_t *sb) { int i; @@ -1624,10 +1660,57 @@ static void print_sb(mdp_super_t *sb) } printk(KERN_INFO "md: THIS: "); print_desc(&sb->this_disk); - } -static void print_rdev(mdk_rdev_t *rdev) +static void print_sb_1(struct mdp_superblock_1 *sb) +{ + __u8 *uuid; + + uuid = sb->set_uuid; + printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" + ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" + KERN_INFO "md: Name: \"%s\" CT:%llu\n", + le32_to_cpu(sb->major_version), + le32_to_cpu(sb->feature_map), + uuid[0], uuid[1], uuid[2], uuid[3], + uuid[4], uuid[5], uuid[6], uuid[7], + uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15], + sb->set_name, + (unsigned long long)le64_to_cpu(sb->ctime) + & MD_SUPERBLOCK_1_TIME_SEC_MASK); + + uuid = sb->device_uuid; + printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" + " RO:%llu\n" + KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" + ":%02x%02x%02x%02x%02x%02x\n" + KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" + KERN_INFO "md: (MaxDev:%u) \n", + le32_to_cpu(sb->level), + (unsigned long long)le64_to_cpu(sb->size), + le32_to_cpu(sb->raid_disks), + le32_to_cpu(sb->layout), + le32_to_cpu(sb->chunksize), + (unsigned long long)le64_to_cpu(sb->data_offset), + (unsigned long long)le64_to_cpu(sb->data_size), + (unsigned long long)le64_to_cpu(sb->super_offset), + (unsigned long long)le64_to_cpu(sb->recovery_offset), + le32_to_cpu(sb->dev_number), + uuid[0], uuid[1], uuid[2], uuid[3], + uuid[4], uuid[5], uuid[6], uuid[7], + uuid[8], uuid[9], uuid[10], uuid[11], + uuid[12], uuid[13], uuid[14], uuid[15], + sb->devflags, + (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, + (unsigned long long)le64_to_cpu(sb->events), + (unsigned long long)le64_to_cpu(sb->resync_offset), + le32_to_cpu(sb->sb_csum), + le32_to_cpu(sb->max_dev) + ); +} + +static void print_rdev(mdk_rdev_t *rdev, int major_version) { char b[BDEVNAME_SIZE]; printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", @@ -1635,15 +1718,22 @@ static void print_rdev(mdk_rdev_t *rdev) test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), rdev->desc_nr); if (rdev->sb_loaded) { - printk(KERN_INFO "md: rdev superblock:\n"); - print_sb((mdp_super_t*)page_address(rdev->sb_page)); + printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); + switch (major_version) { + case 0: + print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); + break; + case 1: + print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); + break; + } } else printk(KERN_INFO "md: no rdev superblock!\n"); } static void md_print_devices(void) { - struct list_head *tmp, *tmp2; + struct list_head *tmp; mdk_rdev_t *rdev; mddev_t *mddev; char b[BDEVNAME_SIZE]; @@ -1658,12 +1748,12 @@ static void md_print_devices(void) bitmap_print_sb(mddev->bitmap); else printk("%s: ", mdname(mddev)); - rdev_for_each(rdev, tmp2, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) printk("<%s>", bdevname(rdev->bdev,b)); printk("\n"); - rdev_for_each(rdev, tmp2, mddev) - print_rdev(rdev); + list_for_each_entry(rdev, &mddev->disks, same_set) + print_rdev(rdev, mddev->major_version); } printk("md: **********************************\n"); printk("\n"); @@ -1679,9 +1769,8 @@ static void sync_sbs(mddev_t * mddev, int nospares) * with the rest of the array) */ mdk_rdev_t *rdev; - struct list_head *tmp; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->sb_events == mddev->events || (nospares && rdev->raid_disk < 0 && @@ -1699,7 +1788,6 @@ static void sync_sbs(mddev_t * mddev, int nospares) static void md_update_sb(mddev_t * mddev, int force_change) { - struct list_head *tmp; mdk_rdev_t *rdev; int sync_req; int nospares = 0; @@ -1790,7 +1878,7 @@ repeat: mdname(mddev),mddev->in_sync); bitmap_update_sb(mddev->bitmap); - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { char b[BDEVNAME_SIZE]; dprintk(KERN_INFO "md: "); if (rdev->sb_loaded != 1) @@ -1999,7 +2087,6 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) md_wakeup_thread(rdev->mddev->thread); } else if (rdev->mddev->pers) { mdk_rdev_t *rdev2; - struct list_head *tmp; /* Activating a spare .. or possibly reactivating * if we every get bitmaps working here. */ @@ -2010,7 +2097,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (rdev->mddev->pers->hot_add_disk == NULL) return -EINVAL; - rdev_for_each(rdev2, tmp, rdev->mddev) + list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) if (rdev2->raid_disk == slot) return -EEXIST; @@ -2125,14 +2212,14 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) */ mddev_t *mddev; int overlap = 0; - struct list_head *tmp, *tmp2; + struct list_head *tmp; mddev_unlock(my_mddev); for_each_mddev(mddev, tmp) { mdk_rdev_t *rdev2; mddev_lock(mddev); - rdev_for_each(rdev2, tmp2, mddev) + list_for_each_entry(rdev2, &mddev->disks, same_set) if (test_bit(AllReserved, &rdev2->flags) || (rdev->bdev == rdev2->bdev && rdev != rdev2 && @@ -2328,8 +2415,7 @@ abort_free: static void analyze_sbs(mddev_t * mddev) { int i; - struct list_head *tmp; - mdk_rdev_t *rdev, *freshest; + mdk_rdev_t *rdev, *freshest, *tmp; char b[BDEVNAME_SIZE]; freshest = NULL; @@ -3046,7 +3132,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) } set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); - sysfs_notify(&mddev->kobj, NULL, "sync_action"); + sysfs_notify_dirent(mddev->sysfs_action); return len; } @@ -3404,6 +3490,8 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, if (!capable(CAP_SYS_ADMIN)) return -EACCES; rv = mddev_lock(mddev); + if (mddev->hold_active == UNTIL_IOCTL) + mddev->hold_active = 0; if (!rv) { rv = entry->store(mddev, page, length); mddev_unlock(mddev); @@ -3414,6 +3502,17 @@ md_attr_store(struct kobject *kobj, struct attribute *attr, static void md_free(struct kobject *ko) { mddev_t *mddev = container_of(ko, mddev_t, kobj); + + if (mddev->sysfs_state) + sysfs_put(mddev->sysfs_state); + + if (mddev->gendisk) { + del_gendisk(mddev->gendisk); + put_disk(mddev->gendisk); + } + if (mddev->queue) + blk_cleanup_queue(mddev->queue); + kfree(mddev); } @@ -3429,34 +3528,74 @@ static struct kobj_type md_ktype = { int mdp_major = 0; -static struct kobject *md_probe(dev_t dev, int *part, void *data) +static int md_alloc(dev_t dev, char *name) { static DEFINE_MUTEX(disks_mutex); mddev_t *mddev = mddev_find(dev); struct gendisk *disk; - int partitioned = (MAJOR(dev) != MD_MAJOR); - int shift = partitioned ? MdpMinorShift : 0; - int unit = MINOR(dev) >> shift; + int partitioned; + int shift; + int unit; int error; if (!mddev) - return NULL; + return -ENODEV; + + partitioned = (MAJOR(mddev->unit) != MD_MAJOR); + shift = partitioned ? MdpMinorShift : 0; + unit = MINOR(mddev->unit) >> shift; + + /* wait for any previous instance if this device + * to be completed removed (mddev_delayed_delete). + */ + flush_scheduled_work(); mutex_lock(&disks_mutex); if (mddev->gendisk) { mutex_unlock(&disks_mutex); mddev_put(mddev); - return NULL; + return -EEXIST; + } + + if (name) { + /* Need to ensure that 'name' is not a duplicate. + */ + mddev_t *mddev2; + spin_lock(&all_mddevs_lock); + + list_for_each_entry(mddev2, &all_mddevs, all_mddevs) + if (mddev2->gendisk && + strcmp(mddev2->gendisk->disk_name, name) == 0) { + spin_unlock(&all_mddevs_lock); + return -EEXIST; + } + spin_unlock(&all_mddevs_lock); + } + + mddev->queue = blk_alloc_queue(GFP_KERNEL); + if (!mddev->queue) { + mutex_unlock(&disks_mutex); + mddev_put(mddev); + return -ENOMEM; } + /* Can be unlocked because the queue is new: no concurrency */ + queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); + + blk_queue_make_request(mddev->queue, md_fail_request); + disk = alloc_disk(1 << shift); if (!disk) { mutex_unlock(&disks_mutex); + blk_cleanup_queue(mddev->queue); + mddev->queue = NULL; mddev_put(mddev); - return NULL; + return -ENOMEM; } - disk->major = MAJOR(dev); + disk->major = MAJOR(mddev->unit); disk->first_minor = unit << shift; - if (partitioned) + if (name) + strcpy(disk->disk_name, name); + else if (partitioned) sprintf(disk->disk_name, "md_d%d", unit); else sprintf(disk->disk_name, "md%d", unit); @@ -3464,7 +3603,7 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) disk->private_data = mddev; disk->queue = mddev->queue; /* Allow extended partitions. This makes the - * 'mdp' device redundant, but we can really + * 'mdp' device redundant, but we can't really * remove it now. */ disk->flags |= GENHD_FL_EXT_DEVT; @@ -3480,9 +3619,35 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) kobject_uevent(&mddev->kobj, KOBJ_ADD); mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); } + mddev_put(mddev); + return 0; +} + +static struct kobject *md_probe(dev_t dev, int *part, void *data) +{ + md_alloc(dev, NULL); return NULL; } +static int add_named_array(const char *val, struct kernel_param *kp) +{ + /* val must be "md_*" where * is not all digits. + * We allocate an array with a large free minor number, and + * set the name to val. val must not already be an active name. + */ + int len = strlen(val); + char buf[DISK_NAME_LEN]; + + while (len && val[len-1] == '\n') + len--; + if (len >= DISK_NAME_LEN) + return -E2BIG; + strlcpy(buf, val, len+1); + if (strncmp(buf, "md_", 3) != 0) + return -EINVAL; + return md_alloc(0, buf); +} + static void md_safemode_timeout(unsigned long data) { mddev_t *mddev = (mddev_t *) data; @@ -3501,7 +3666,6 @@ static int do_md_run(mddev_t * mddev) { int err; int chunk_size; - struct list_head *tmp; mdk_rdev_t *rdev; struct gendisk *disk; struct mdk_personality *pers; @@ -3540,7 +3704,7 @@ static int do_md_run(mddev_t * mddev) } /* devices must have minimum size of one chunk */ - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { if (test_bit(Faulty, &rdev->flags)) continue; if (rdev->size < chunk_size / 1024) { @@ -3565,7 +3729,7 @@ static int do_md_run(mddev_t * mddev) * the only valid external interface is through the md * device. */ - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { if (test_bit(Faulty, &rdev->flags)) continue; sync_blockdev(rdev->bdev); @@ -3630,10 +3794,10 @@ static int do_md_run(mddev_t * mddev) */ char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; mdk_rdev_t *rdev2; - struct list_head *tmp2; int warned = 0; - rdev_for_each(rdev, tmp, mddev) { - rdev_for_each(rdev2, tmp2, mddev) { + + list_for_each_entry(rdev, &mddev->disks, same_set) + list_for_each_entry(rdev2, &mddev->disks, same_set) { if (rdev < rdev2 && rdev->bdev->bd_contains == rdev2->bdev->bd_contains) { @@ -3647,7 +3811,7 @@ static int do_md_run(mddev_t * mddev) warned = 1; } } - } + if (warned) printk(KERN_WARNING "True protection against single-disk" @@ -3684,6 +3848,7 @@ static int do_md_run(mddev_t * mddev) printk(KERN_WARNING "md: cannot register extra attributes for %s\n", mdname(mddev)); + mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); } else if (mddev->ro == 2) /* auto-readonly not meaningful */ mddev->ro = 0; @@ -3694,7 +3859,7 @@ static int do_md_run(mddev_t * mddev) mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ mddev->in_sync = 1; - rdev_for_each(rdev, tmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0) { char nm[20]; sprintf(nm, "rd%d", rdev->raid_disk); @@ -3725,9 +3890,8 @@ static int do_md_run(mddev_t * mddev) * it will remove the drives and not do the right thing */ if (mddev->degraded && !mddev->sync_thread) { - struct list_head *rtmp; int spares = 0; - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) @@ -3754,7 +3918,8 @@ static int do_md_run(mddev_t * mddev) mddev->changed = 1; md_new_event(mddev); sysfs_notify_dirent(mddev->sysfs_state); - sysfs_notify(&mddev->kobj, NULL, "sync_action"); + if (mddev->sysfs_action) + sysfs_notify_dirent(mddev->sysfs_action); sysfs_notify(&mddev->kobj, NULL, "degraded"); kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); return 0; @@ -3854,9 +4019,12 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->queue->merge_bvec_fn = NULL; mddev->queue->unplug_fn = NULL; mddev->queue->backing_dev_info.congested_fn = NULL; - if (mddev->pers->sync_request) + if (mddev->pers->sync_request) { sysfs_remove_group(&mddev->kobj, &md_redundancy_group); - + if (mddev->sysfs_action) + sysfs_put(mddev->sysfs_action); + mddev->sysfs_action = NULL; + } module_put(mddev->pers->owner); mddev->pers = NULL; /* tell userspace to handle 'inactive' */ @@ -3883,7 +4051,6 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) */ if (mode == 0) { mdk_rdev_t *rdev; - struct list_head *tmp; printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); @@ -3895,7 +4062,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) } mddev->bitmap_offset = 0; - rdev_for_each(rdev, tmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0) { char nm[20]; sprintf(nm, "rd%d", rdev->raid_disk); @@ -3941,6 +4108,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->barriers_work = 0; mddev->safemode = 0; kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); + if (mddev->hold_active == UNTIL_STOP) + mddev->hold_active = 0; } else if (mddev->pers) printk(KERN_INFO "md: %s switched to read-only mode.\n", @@ -3956,7 +4125,6 @@ out: static void autorun_array(mddev_t *mddev) { mdk_rdev_t *rdev; - struct list_head *tmp; int err; if (list_empty(&mddev->disks)) @@ -3964,7 +4132,7 @@ static void autorun_array(mddev_t *mddev) printk(KERN_INFO "md: running: "); - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { char b[BDEVNAME_SIZE]; printk("<%s>", bdevname(rdev->bdev,b)); } @@ -3991,8 +4159,7 @@ static void autorun_array(mddev_t *mddev) */ static void autorun_devices(int part) { - struct list_head *tmp; - mdk_rdev_t *rdev0, *rdev; + mdk_rdev_t *rdev0, *rdev, *tmp; mddev_t *mddev; char b[BDEVNAME_SIZE]; @@ -4007,7 +4174,7 @@ static void autorun_devices(int part) printk(KERN_INFO "md: considering %s ...\n", bdevname(rdev0->bdev,b)); INIT_LIST_HEAD(&candidates); - rdev_for_each_list(rdev, tmp, pending_raid_disks) + rdev_for_each_list(rdev, tmp, &pending_raid_disks) if (super_90_load(rdev, rdev0, 0) >= 0) { printk(KERN_INFO "md: adding %s ...\n", bdevname(rdev->bdev,b)); @@ -4053,7 +4220,7 @@ static void autorun_devices(int part) } else { printk(KERN_INFO "md: created %s\n", mdname(mddev)); mddev->persistent = 1; - rdev_for_each_list(rdev, tmp, candidates) { + rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); if (bind_rdev_to_array(rdev, mddev)) export_rdev(rdev); @@ -4064,7 +4231,7 @@ static void autorun_devices(int part) /* on success, candidates will be empty, on error * it won't... */ - rdev_for_each_list(rdev, tmp, candidates) { + rdev_for_each_list(rdev, tmp, &candidates) { list_del_init(&rdev->same_set); export_rdev(rdev); } @@ -4093,10 +4260,9 @@ static int get_array_info(mddev_t * mddev, void __user * arg) mdu_array_info_t info; int nr,working,active,failed,spare; mdk_rdev_t *rdev; - struct list_head *tmp; nr=working=active=failed=spare=0; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { nr++; if (test_bit(Faulty, &rdev->flags)) failed++; @@ -4614,9 +4780,8 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) static int update_size(mddev_t *mddev, sector_t num_sectors) { - mdk_rdev_t * rdev; + mdk_rdev_t *rdev; int rv; - struct list_head *tmp; int fit = (num_sectors == 0); if (mddev->pers->resize == NULL) @@ -4638,7 +4803,7 @@ static int update_size(mddev_t *mddev, sector_t num_sectors) * grow, and re-add. */ return -EBUSY; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { sector_t avail; avail = rdev->size * 2; @@ -5000,6 +5165,9 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, done_unlock: abort_unlock: + if (mddev->hold_active == UNTIL_IOCTL && + err != -EINVAL) + mddev->hold_active = 0; mddev_unlock(mddev); return err; @@ -5016,14 +5184,25 @@ static int md_open(struct block_device *bdev, fmode_t mode) * Succeed if we can lock the mddev, which confirms that * it isn't being stopped right now. */ - mddev_t *mddev = bdev->bd_disk->private_data; + mddev_t *mddev = mddev_find(bdev->bd_dev); int err; + if (mddev->gendisk != bdev->bd_disk) { + /* we are racing with mddev_put which is discarding this + * bd_disk. + */ + mddev_put(mddev); + /* Wait until bdev->bd_disk is definitely gone */ + flush_scheduled_work(); + /* Then retry the open from the top */ + return -ERESTARTSYS; + } + BUG_ON(mddev != bdev->bd_disk->private_data); + if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) goto out; err = 0; - mddev_get(mddev); atomic_inc(&mddev->openers); mddev_unlock(mddev); @@ -5187,11 +5366,10 @@ static void status_unused(struct seq_file *seq) { int i = 0; mdk_rdev_t *rdev; - struct list_head *tmp; seq_printf(seq, "unused devices: "); - rdev_for_each_list(rdev, tmp, pending_raid_disks) { + list_for_each_entry(rdev, &pending_raid_disks, same_set) { char b[BDEVNAME_SIZE]; i++; seq_printf(seq, "%s ", @@ -5350,7 +5528,6 @@ static int md_seq_show(struct seq_file *seq, void *v) { mddev_t *mddev = v; sector_t size; - struct list_head *tmp2; mdk_rdev_t *rdev; struct mdstat_info *mi = seq->private; struct bitmap *bitmap; @@ -5387,7 +5564,7 @@ static int md_seq_show(struct seq_file *seq, void *v) } size = 0; - rdev_for_each(rdev, tmp2, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { char b[BDEVNAME_SIZE]; seq_printf(seq, " %s[%d]", bdevname(rdev->bdev,b), rdev->desc_nr); @@ -5694,7 +5871,6 @@ void md_do_sync(mddev_t *mddev) struct list_head *tmp; sector_t last_check; int skipped = 0; - struct list_head *rtmp; mdk_rdev_t *rdev; char *desc; @@ -5799,7 +5975,7 @@ void md_do_sync(mddev_t *mddev) /* recovery follows the physical size of devices */ max_sectors = mddev->size << 1; j = MaxSector; - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && @@ -5949,7 +6125,7 @@ void md_do_sync(mddev_t *mddev) } else { if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) mddev->curr_resync = MaxSector; - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(Faulty, &rdev->flags) && !test_bit(In_sync, &rdev->flags) && @@ -5985,10 +6161,9 @@ EXPORT_SYMBOL_GPL(md_do_sync); static int remove_and_add_spares(mddev_t *mddev) { mdk_rdev_t *rdev; - struct list_head *rtmp; int spares = 0; - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk >= 0 && !test_bit(Blocked, &rdev->flags) && (test_bit(Faulty, &rdev->flags) || @@ -6003,8 +6178,8 @@ static int remove_and_add_spares(mddev_t *mddev) } } - if (mddev->degraded && ! mddev->ro) { - rdev_for_each(rdev, rtmp, mddev) { + if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { + list_for_each_entry(rdev, &mddev->disks, same_set) { if (rdev->raid_disk >= 0 && !test_bit(In_sync, &rdev->flags) && !test_bit(Blocked, &rdev->flags)) @@ -6056,7 +6231,6 @@ static int remove_and_add_spares(mddev_t *mddev) void md_check_recovery(mddev_t *mddev) { mdk_rdev_t *rdev; - struct list_head *rtmp; if (mddev->bitmap) @@ -6120,7 +6294,7 @@ void md_check_recovery(mddev_t *mddev) if (mddev->flags) md_update_sb(mddev, 0); - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (test_and_clear_bit(StateChanged, &rdev->flags)) sysfs_notify_dirent(rdev->sysfs_state); @@ -6149,13 +6323,13 @@ void md_check_recovery(mddev_t *mddev) * information must be scrapped */ if (!mddev->degraded) - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) rdev->saved_raid_disk = -1; mddev->recovery = 0; /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - sysfs_notify(&mddev->kobj, NULL, "sync_action"); + sysfs_notify_dirent(mddev->sysfs_action); md_new_event(mddev); goto unlock; } @@ -6216,7 +6390,7 @@ void md_check_recovery(mddev_t *mddev) mddev->recovery = 0; } else md_wakeup_thread(mddev->sync_thread); - sysfs_notify(&mddev->kobj, NULL, "sync_action"); + sysfs_notify_dirent(mddev->sysfs_action); md_new_event(mddev); } unlock: @@ -6224,7 +6398,8 @@ void md_check_recovery(mddev_t *mddev) clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) - sysfs_notify(&mddev->kobj, NULL, "sync_action"); + if (mddev->sysfs_action) + sysfs_notify_dirent(mddev->sysfs_action); } mddev_unlock(mddev); } @@ -6386,14 +6561,8 @@ static __exit void md_exit(void) unregister_sysctl_table(raid_table_header); remove_proc_entry("mdstat", NULL); for_each_mddev(mddev, tmp) { - struct gendisk *disk = mddev->gendisk; - if (!disk) - continue; export_array(mddev); - del_gendisk(disk); - put_disk(disk); - mddev->gendisk = NULL; - mddev_put(mddev); + mddev->hold_active = 0; } } @@ -6418,6 +6587,7 @@ static int set_ro(const char *val, struct kernel_param *kp) module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); +module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); EXPORT_SYMBOL(register_md_personality); EXPORT_SYMBOL(unregister_md_personality); diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d4ac47d11279..f6d08f241671 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -408,7 +408,6 @@ static int multipath_run (mddev_t *mddev) int disk_idx; struct multipath_info *disk; mdk_rdev_t *rdev; - struct list_head *tmp; if (mddev->level != LEVEL_MULTIPATH) { printk("multipath: %s: raid level not set to multipath IO (%d)\n", @@ -441,7 +440,7 @@ static int multipath_run (mddev_t *mddev) } conf->working_disks = 0; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { disk_idx = rdev->raid_disk; if (disk_idx < 0 || disk_idx >= mddev->raid_disks) diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 8ac6488ad0dc..c605ba805586 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -53,11 +53,10 @@ static int raid0_congested(void *data, int bits) static int create_strip_zones (mddev_t *mddev) { int i, c, j; - sector_t current_offset, curr_zone_offset; + sector_t current_start, curr_zone_start; sector_t min_spacing; raid0_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev; - struct list_head *tmp1, *tmp2; struct strip_zone *zone; int cnt; char b[BDEVNAME_SIZE]; @@ -67,19 +66,19 @@ static int create_strip_zones (mddev_t *mddev) */ conf->nr_strip_zones = 0; - rdev_for_each(rdev1, tmp1, mddev) { - printk("raid0: looking at %s\n", + list_for_each_entry(rdev1, &mddev->disks, same_set) { + printk(KERN_INFO "raid0: looking at %s\n", bdevname(rdev1->bdev,b)); c = 0; - rdev_for_each(rdev2, tmp2, mddev) { - printk("raid0: comparing %s(%llu)", + list_for_each_entry(rdev2, &mddev->disks, same_set) { + printk(KERN_INFO "raid0: comparing %s(%llu)", bdevname(rdev1->bdev,b), (unsigned long long)rdev1->size); - printk(" with %s(%llu)\n", + printk(KERN_INFO " with %s(%llu)\n", bdevname(rdev2->bdev,b), (unsigned long long)rdev2->size); if (rdev2 == rdev1) { - printk("raid0: END\n"); + printk(KERN_INFO "raid0: END\n"); break; } if (rdev2->size == rdev1->size) @@ -88,19 +87,20 @@ static int create_strip_zones (mddev_t *mddev) * Not unique, don't count it as a new * group */ - printk("raid0: EQUAL\n"); + printk(KERN_INFO "raid0: EQUAL\n"); c = 1; break; } - printk("raid0: NOT EQUAL\n"); + printk(KERN_INFO "raid0: NOT EQUAL\n"); } if (!c) { - printk("raid0: ==> UNIQUE\n"); + printk(KERN_INFO "raid0: ==> UNIQUE\n"); conf->nr_strip_zones++; - printk("raid0: %d zones\n", conf->nr_strip_zones); + printk(KERN_INFO "raid0: %d zones\n", + conf->nr_strip_zones); } } - printk("raid0: FINAL %d zones\n", conf->nr_strip_zones); + printk(KERN_INFO "raid0: FINAL %d zones\n", conf->nr_strip_zones); conf->strip_zone = kzalloc(sizeof(struct strip_zone)* conf->nr_strip_zones, GFP_KERNEL); @@ -119,16 +119,17 @@ static int create_strip_zones (mddev_t *mddev) cnt = 0; smallest = NULL; zone->dev = conf->devlist; - rdev_for_each(rdev1, tmp1, mddev) { + list_for_each_entry(rdev1, &mddev->disks, same_set) { int j = rdev1->raid_disk; if (j < 0 || j >= mddev->raid_disks) { - printk("raid0: bad disk number %d - aborting!\n", j); + printk(KERN_ERR "raid0: bad disk number %d - " + "aborting!\n", j); goto abort; } if (zone->dev[j]) { - printk("raid0: multiple devices for %d - aborting!\n", - j); + printk(KERN_ERR "raid0: multiple devices for %d - " + "aborting!\n", j); goto abort; } zone->dev[j] = rdev1; @@ -149,16 +150,16 @@ static int create_strip_zones (mddev_t *mddev) cnt++; } if (cnt != mddev->raid_disks) { - printk("raid0: too few disks (%d of %d) - aborting!\n", - cnt, mddev->raid_disks); + printk(KERN_ERR "raid0: too few disks (%d of %d) - " + "aborting!\n", cnt, mddev->raid_disks); goto abort; } zone->nb_dev = cnt; - zone->size = smallest->size * cnt; - zone->zone_offset = 0; + zone->sectors = smallest->size * cnt * 2; + zone->zone_start = 0; - current_offset = smallest->size; - curr_zone_offset = zone->size; + current_start = smallest->size * 2; + curr_zone_start = zone->sectors; /* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++) @@ -166,40 +167,41 @@ static int create_strip_zones (mddev_t *mddev) zone = conf->strip_zone + i; zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks; - printk("raid0: zone %d\n", i); - zone->dev_offset = current_offset; + printk(KERN_INFO "raid0: zone %d\n", i); + zone->dev_start = current_start; smallest = NULL; c = 0; for (j=0; j<cnt; j++) { char b[BDEVNAME_SIZE]; rdev = conf->strip_zone[0].dev[j]; - printk("raid0: checking %s ...", bdevname(rdev->bdev,b)); - if (rdev->size > current_offset) - { - printk(" contained as device %d\n", c); + printk(KERN_INFO "raid0: checking %s ...", + bdevname(rdev->bdev, b)); + if (rdev->size > current_start / 2) { + printk(KERN_INFO " contained as device %d\n", + c); zone->dev[c] = rdev; c++; if (!smallest || (rdev->size <smallest->size)) { smallest = rdev; - printk(" (%llu) is smallest!.\n", + printk(KERN_INFO " (%llu) is smallest!.\n", (unsigned long long)rdev->size); } } else - printk(" nope.\n"); + printk(KERN_INFO " nope.\n"); } zone->nb_dev = c; - zone->size = (smallest->size - current_offset) * c; - printk("raid0: zone->nb_dev: %d, size: %llu\n", - zone->nb_dev, (unsigned long long)zone->size); + zone->sectors = (smallest->size * 2 - current_start) * c; + printk(KERN_INFO "raid0: zone->nb_dev: %d, sectors: %llu\n", + zone->nb_dev, (unsigned long long)zone->sectors); - zone->zone_offset = curr_zone_offset; - curr_zone_offset += zone->size; + zone->zone_start = curr_zone_start; + curr_zone_start += zone->sectors; - current_offset = smallest->size; - printk("raid0: current zone offset: %llu\n", - (unsigned long long)current_offset); + current_start = smallest->size * 2; + printk(KERN_INFO "raid0: current zone start: %llu\n", + (unsigned long long)current_start); } /* Now find appropriate hash spacing. @@ -210,16 +212,16 @@ static int create_strip_zones (mddev_t *mddev) * strip though as it's size has no bearing on the efficacy of the hash * table. */ - conf->hash_spacing = curr_zone_offset; - min_spacing = curr_zone_offset; + conf->spacing = curr_zone_start; + min_spacing = curr_zone_start; sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*)); for (i=0; i < conf->nr_strip_zones-1; i++) { - sector_t sz = 0; - for (j=i; j<conf->nr_strip_zones-1 && - sz < min_spacing ; j++) - sz += conf->strip_zone[j].size; - if (sz >= min_spacing && sz < conf->hash_spacing) - conf->hash_spacing = sz; + sector_t s = 0; + for (j = i; j < conf->nr_strip_zones - 1 && + s < min_spacing; j++) + s += conf->strip_zone[j].sectors; + if (s >= min_spacing && s < conf->spacing) + conf->spacing = s; } mddev->queue->unplug_fn = raid0_unplug; @@ -227,7 +229,7 @@ static int create_strip_zones (mddev_t *mddev) mddev->queue->backing_dev_info.congested_fn = raid0_congested; mddev->queue->backing_dev_info.congested_data = mddev; - printk("raid0: done.\n"); + printk(KERN_INFO "raid0: done.\n"); return 0; abort: return 1; @@ -262,10 +264,9 @@ static int raid0_mergeable_bvec(struct request_queue *q, static int raid0_run (mddev_t *mddev) { unsigned cur=0, i=0, nb_zone; - s64 size; + s64 sectors; raid0_conf_t *conf; mdk_rdev_t *rdev; - struct list_head *tmp; if (mddev->chunk_size == 0) { printk(KERN_ERR "md/raid0: non-zero chunk size required.\n"); @@ -291,54 +292,54 @@ static int raid0_run (mddev_t *mddev) /* calculate array device size */ mddev->array_sectors = 0; - rdev_for_each(rdev, tmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) mddev->array_sectors += rdev->size * 2; - printk("raid0 : md_size is %llu blocks.\n", - (unsigned long long)mddev->array_sectors / 2); - printk("raid0 : conf->hash_spacing is %llu blocks.\n", - (unsigned long long)conf->hash_spacing); + printk(KERN_INFO "raid0 : md_size is %llu sectors.\n", + (unsigned long long)mddev->array_sectors); + printk(KERN_INFO "raid0 : conf->spacing is %llu sectors.\n", + (unsigned long long)conf->spacing); { - sector_t s = mddev->array_sectors / 2; - sector_t space = conf->hash_spacing; + sector_t s = mddev->array_sectors; + sector_t space = conf->spacing; int round; - conf->preshift = 0; + conf->sector_shift = 0; if (sizeof(sector_t) > sizeof(u32)) { /*shift down space and s so that sector_div will work */ while (space > (sector_t) (~(u32)0)) { s >>= 1; space >>= 1; s += 1; /* force round-up */ - conf->preshift++; + conf->sector_shift++; } } round = sector_div(s, (u32)space) ? 1 : 0; nb_zone = s + round; } - printk("raid0 : nb_zone is %d.\n", nb_zone); + printk(KERN_INFO "raid0 : nb_zone is %d.\n", nb_zone); - printk("raid0 : Allocating %Zd bytes for hash.\n", + printk(KERN_INFO "raid0 : Allocating %zu bytes for hash.\n", nb_zone*sizeof(struct strip_zone*)); conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL); if (!conf->hash_table) goto out_free_conf; - size = conf->strip_zone[cur].size; + sectors = conf->strip_zone[cur].sectors; conf->hash_table[0] = conf->strip_zone + cur; for (i=1; i< nb_zone; i++) { - while (size <= conf->hash_spacing) { + while (sectors <= conf->spacing) { cur++; - size += conf->strip_zone[cur].size; + sectors += conf->strip_zone[cur].sectors; } - size -= conf->hash_spacing; + sectors -= conf->spacing; conf->hash_table[i] = conf->strip_zone + cur; } - if (conf->preshift) { - conf->hash_spacing >>= conf->preshift; - /* round hash_spacing up so when we divide by it, we + if (conf->sector_shift) { + conf->spacing >>= conf->sector_shift; + /* round spacing up so when we divide by it, we * err on the side of too-low, which is safest */ - conf->hash_spacing++; + conf->spacing++; } /* calculate the max read-ahead size. @@ -387,12 +388,12 @@ static int raid0_stop (mddev_t *mddev) static int raid0_make_request (struct request_queue *q, struct bio *bio) { mddev_t *mddev = q->queuedata; - unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects; + unsigned int sect_in_chunk, chunksect_bits, chunk_sects; raid0_conf_t *conf = mddev_to_conf(mddev); struct strip_zone *zone; mdk_rdev_t *tmp_dev; sector_t chunk; - sector_t block, rsect; + sector_t sector, rsect; const int rw = bio_data_dir(bio); int cpu; @@ -407,11 +408,9 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) bio_sectors(bio)); part_stat_unlock(); - chunk_size = mddev->chunk_size >> 10; chunk_sects = mddev->chunk_size >> 9; - chunksize_bits = ffz(~chunk_size); - block = bio->bi_sector >> 1; - + chunksect_bits = ffz(~chunk_sects); + sector = bio->bi_sector; if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) { struct bio_pair *bp; @@ -434,28 +433,27 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) { - sector_t x = block >> conf->preshift; - sector_div(x, (u32)conf->hash_spacing); + sector_t x = sector >> conf->sector_shift; + sector_div(x, (u32)conf->spacing); zone = conf->hash_table[x]; } - - while (block >= (zone->zone_offset + zone->size)) + + while (sector >= zone->zone_start + zone->sectors) zone++; - - sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1); + + sect_in_chunk = bio->bi_sector & (chunk_sects - 1); { - sector_t x = (block - zone->zone_offset) >> chunksize_bits; + sector_t x = (sector - zone->zone_start) >> chunksect_bits; sector_div(x, zone->nb_dev); chunk = x; - x = block >> chunksize_bits; + x = sector >> chunksect_bits; tmp_dev = zone->dev[sector_div(x, zone->nb_dev)]; } - rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1) - + sect_in_chunk; + rsect = (chunk << chunksect_bits) + zone->dev_start + sect_in_chunk; bio->bi_bdev = tmp_dev->bdev; bio->bi_sector = rsect + tmp_dev->data_offset; @@ -467,7 +465,7 @@ static int raid0_make_request (struct request_queue *q, struct bio *bio) bad_map: printk("raid0_make_request bug: can't convert block across chunks" - " or bigger than %dk %llu %d\n", chunk_size, + " or bigger than %dk %llu %d\n", chunk_sects / 2, (unsigned long long)bio->bi_sector, bio->bi_size >> 10); bio_io_error(bio); @@ -492,10 +490,10 @@ static void raid0_status (struct seq_file *seq, mddev_t *mddev) seq_printf(seq, "%s/", bdevname( conf->strip_zone[j].dev[k]->bdev,b)); - seq_printf(seq, "] zo=%d do=%d s=%d\n", - conf->strip_zone[j].zone_offset, - conf->strip_zone[j].dev_offset, - conf->strip_zone[j].size); + seq_printf(seq, "] zs=%d ds=%d s=%d\n", + conf->strip_zone[j].zone_start, + conf->strip_zone[j].dev_start, + conf->strip_zone[j].sectors); } #endif seq_printf(seq, " %dk chunks", mddev->chunk_size/1024); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9c788e2489b1..7b4f5f7155d8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1016,12 +1016,16 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) * else mark the drive as failed */ if (test_bit(In_sync, &rdev->flags) - && (conf->raid_disks - mddev->degraded) == 1) + && (conf->raid_disks - mddev->degraded) == 1) { /* * Don't fail the drive, act as though we were just a - * normal single drive + * normal single drive. + * However don't try a recovery from this drive as + * it is very likely to fail. */ + mddev->recovery_disabled = 1; return; + } if (test_and_clear_bit(In_sync, &rdev->flags)) { unsigned long flags; spin_lock_irqsave(&conf->device_lock, flags); @@ -1919,7 +1923,6 @@ static int run(mddev_t *mddev) int i, j, disk_idx; mirror_info_t *disk; mdk_rdev_t *rdev; - struct list_head *tmp; if (mddev->level != 1) { printk("raid1: %s: raid level not set to mirroring (%d)\n", @@ -1964,7 +1967,7 @@ static int run(mddev_t *mddev) spin_lock_init(&conf->device_lock); mddev->queue->queue_lock = &conf->device_lock; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 970a96ef9b18..6736d6dff981 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2025,7 +2025,6 @@ static int run(mddev_t *mddev) int i, disk_idx; mirror_info_t *disk; mdk_rdev_t *rdev; - struct list_head *tmp; int nc, fc, fo; sector_t stride, size; @@ -2108,7 +2107,7 @@ static int run(mddev_t *mddev) spin_lock_init(&conf->device_lock); mddev->queue->queue_lock = &conf->device_lock; - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { disk_idx = rdev->raid_disk; if (disk_idx >= mddev->raid_disks || disk_idx < 0) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a36a7435edf5..a5ba080d303b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3998,7 +3998,6 @@ static int run(mddev_t *mddev) int raid_disk, memory; mdk_rdev_t *rdev; struct disk_info *disk; - struct list_head *tmp; int working_disks = 0; if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { @@ -4108,7 +4107,7 @@ static int run(mddev_t *mddev) pr_debug("raid5: run(%s) called.\n", mdname(mddev)); - rdev_for_each(rdev, tmp, mddev) { + list_for_each_entry(rdev, &mddev->disks, same_set) { raid_disk = rdev->raid_disk; if (raid_disk >= conf->raid_disks || raid_disk < 0) @@ -4533,7 +4532,6 @@ static int raid5_start_reshape(mddev_t *mddev) { raid5_conf_t *conf = mddev_to_conf(mddev); mdk_rdev_t *rdev; - struct list_head *rtmp; int spares = 0; int added_devices = 0; unsigned long flags; @@ -4541,7 +4539,7 @@ static int raid5_start_reshape(mddev_t *mddev) if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) return -EBUSY; - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) spares++; @@ -4563,7 +4561,7 @@ static int raid5_start_reshape(mddev_t *mddev) /* Add some new drives, as many as will fit. * We know there are enough to make the newly sized array work. */ - rdev_for_each(rdev, rtmp, mddev) + list_for_each_entry(rdev, &mddev->disks, same_set) if (rdev->raid_disk < 0 && !test_bit(Faulty, &rdev->flags)) { if (raid5_add_disk(mddev, rdev) == 0) { diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index fee7304102af..3949a1c73451 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -498,6 +498,18 @@ config SGI_GRU_DEBUG This option enables addition debugging code for the SGI GRU driver. If you are unsure, say N. +config DELL_LAPTOP + tristate "Dell Laptop Extras (EXPERIMENTAL)" + depends on X86 + depends on DCDBAS + depends on EXPERIMENTAL + depends on BACKLIGHT_CLASS_DEVICE + depends on RFKILL + default n + ---help--- + This driver adds support for rfkill and backlight control to Dell + laptops. + source "drivers/misc/c2port/Kconfig" endif # MISC_DEVICES diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 817f7f5ab3bd..5de863a0e395 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_ICS932S401) += ics932s401.o obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o obj-$(CONFIG_LKDTM) += lkdtm.o obj-$(CONFIG_TIFM_CORE) += tifm_core.o +obj-$(CONFIG_DELL_LAPTOP) += dell-laptop.o obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o obj-$(CONFIG_PHANTOM) += phantom.o obj-$(CONFIG_SGI_IOC4) += ioc4.o diff --git a/drivers/misc/dell-laptop.c b/drivers/misc/dell-laptop.c new file mode 100644 index 000000000000..4d33a2068b7a --- /dev/null +++ b/drivers/misc/dell-laptop.c @@ -0,0 +1,436 @@ +/* + * Driver for Dell laptop extras + * + * Copyright (c) Red Hat <mjg@redhat.com> + * + * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell + * Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/backlight.h> +#include <linux/err.h> +#include <linux/dmi.h> +#include <linux/io.h> +#include <linux/rfkill.h> +#include <linux/power_supply.h> +#include <linux/acpi.h> +#include "../firmware/dcdbas.h" + +#define BRIGHTNESS_TOKEN 0x7d + +/* This structure will be modified by the firmware when we enter + * system management mode, hence the volatiles */ + +struct calling_interface_buffer { + u16 class; + u16 select; + volatile u32 input[4]; + volatile u32 output[4]; +} __packed; + +struct calling_interface_token { + u16 tokenID; + u16 location; + union { + u16 value; + u16 stringlength; + }; +}; + +struct calling_interface_structure { + struct dmi_header header; + u16 cmdIOAddress; + u8 cmdIOCode; + u32 supportedCmds; + struct calling_interface_token tokens[]; +} __packed; + +static int da_command_address; +static int da_command_code; +static int da_num_tokens; +static struct calling_interface_token *da_tokens; + +static struct backlight_device *dell_backlight_device; +static struct rfkill *wifi_rfkill; +static struct rfkill *bluetooth_rfkill; +static struct rfkill *wwan_rfkill; + +static const struct dmi_system_id __initdata dell_device_table[] = { + { + .ident = "Dell laptop", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_CHASSIS_TYPE, "8"), + }, + }, + { } +}; + +static void parse_da_table(const struct dmi_header *dm) +{ + /* Final token is a terminator, so we don't want to copy it */ + int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; + struct calling_interface_structure *table = + container_of(dm, struct calling_interface_structure, header); + + /* 4 bytes of table header, plus 7 bytes of Dell header, plus at least + 6 bytes of entry */ + + if (dm->length < 17) + return; + + da_command_address = table->cmdIOAddress; + da_command_code = table->cmdIOCode; + + da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * + sizeof(struct calling_interface_token), + GFP_KERNEL); + + if (!da_tokens) + return; + + memcpy(da_tokens+da_num_tokens, table->tokens, + sizeof(struct calling_interface_token) * tokens); + + da_num_tokens += tokens; +} + +static void find_tokens(const struct dmi_header *dm) +{ + switch (dm->type) { + case 0xd4: /* Indexed IO */ + break; + case 0xd5: /* Protected Area Type 1 */ + break; + case 0xd6: /* Protected Area Type 2 */ + break; + case 0xda: /* Calling interface */ + parse_da_table(dm); + break; + } +} + +static int find_token_location(int tokenid) +{ + int i; + for (i = 0; i < da_num_tokens; i++) { + if (da_tokens[i].tokenID == tokenid) + return da_tokens[i].location; + } + + return -1; +} + +static struct calling_interface_buffer * +dell_send_request(struct calling_interface_buffer *buffer, int class, + int select) +{ + struct smi_cmd command; + + command.magic = SMI_CMD_MAGIC; + command.command_address = da_command_address; + command.command_code = da_command_code; + command.ebx = virt_to_phys(buffer); + command.ecx = 0x42534931; + + buffer->class = class; + buffer->select = select; + + dcdbas_smi_request(&command); + + return buffer; +} + +/* Derived from information in DellWirelessCtl.cpp: + Class 17, select 11 is radio control. It returns an array of 32-bit values. + + result[0]: return code + result[1]: + Bit 0: Hardware switch supported + Bit 1: Wifi locator supported + Bit 2: Wifi is supported + Bit 3: Bluetooth is supported + Bit 4: WWAN is supported + Bit 5: Wireless keyboard supported + Bits 6-7: Reserved + Bit 8: Wifi is installed + Bit 9: Bluetooth is installed + Bit 10: WWAN is installed + Bits 11-15: Reserved + Bit 16: Hardware switch is on + Bit 17: Wifi is blocked + Bit 18: Bluetooth is blocked + Bit 19: WWAN is blocked + Bits 20-31: Reserved + result[2]: NVRAM size in bytes + result[3]: NVRAM format version number +*/ + +static int dell_rfkill_set(int radio, enum rfkill_state state) +{ + struct calling_interface_buffer buffer; + int disable = (state == RFKILL_STATE_UNBLOCKED) ? 0 : 1; + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + buffer.input[0] = (1 | (radio<<8) | (disable << 16)); + dell_send_request(&buffer, 17, 11); + + return 0; +} + +static int dell_wifi_set(void *data, enum rfkill_state state) +{ + return dell_rfkill_set(1, state); +} + +static int dell_bluetooth_set(void *data, enum rfkill_state state) +{ + return dell_rfkill_set(2, state); +} + +static int dell_wwan_set(void *data, enum rfkill_state state) +{ + return dell_rfkill_set(3, state); +} + +static int dell_rfkill_get(int bit, enum rfkill_state *state) +{ + struct calling_interface_buffer buffer; + int status; + int new_state = RFKILL_STATE_HARD_BLOCKED; + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + dell_send_request(&buffer, 17, 11); + status = buffer.output[1]; + + if (status & (1<<16)) + new_state = RFKILL_STATE_SOFT_BLOCKED; + + if (status & (1<<bit)) + *state = new_state; + else + *state = RFKILL_STATE_UNBLOCKED; + + return 0; +} + +static int dell_wifi_get(void *data, enum rfkill_state *state) +{ + return dell_rfkill_get(17, state); +} + +static int dell_bluetooth_get(void *data, enum rfkill_state *state) +{ + return dell_rfkill_get(18, state); +} + +static int dell_wwan_get(void *data, enum rfkill_state *state) +{ + return dell_rfkill_get(19, state); +} + +static int dell_setup_rfkill(void) +{ + struct calling_interface_buffer buffer; + int status; + int ret; + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + dell_send_request(&buffer, 17, 11); + status = buffer.output[1]; + + if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) { + wifi_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WLAN); + if (!wifi_rfkill) + goto err_wifi; + wifi_rfkill->name = "dell-wifi"; + wifi_rfkill->toggle_radio = dell_wifi_set; + wifi_rfkill->get_state = dell_wifi_get; + ret = rfkill_register(wifi_rfkill); + if (ret) + goto err_wifi; + } + + if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) { + bluetooth_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_BLUETOOTH); + if (!bluetooth_rfkill) + goto err_bluetooth; + bluetooth_rfkill->name = "dell-bluetooth"; + bluetooth_rfkill->toggle_radio = dell_bluetooth_set; + bluetooth_rfkill->get_state = dell_bluetooth_get; + ret = rfkill_register(bluetooth_rfkill); + if (ret) + goto err_bluetooth; + } + + if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) { + wwan_rfkill = rfkill_allocate(NULL, RFKILL_TYPE_WWAN); + if (!wwan_rfkill) + goto err_wwan; + wwan_rfkill->name = "dell-wwan"; + wwan_rfkill->toggle_radio = dell_wwan_set; + wwan_rfkill->get_state = dell_wwan_get; + ret = rfkill_register(wwan_rfkill); + if (ret) + goto err_wwan; + } + + return 0; +err_wwan: + if (wwan_rfkill) + rfkill_free(wwan_rfkill); + if (bluetooth_rfkill) { + rfkill_unregister(bluetooth_rfkill); + bluetooth_rfkill = NULL; + } +err_bluetooth: + if (bluetooth_rfkill) + rfkill_free(bluetooth_rfkill); + if (wifi_rfkill) { + rfkill_unregister(wifi_rfkill); + wifi_rfkill = NULL; + } +err_wifi: + if (wifi_rfkill) + rfkill_free(wifi_rfkill); + + return ret; +} + +static int dell_send_intensity(struct backlight_device *bd) +{ + struct calling_interface_buffer buffer; + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); + buffer.input[1] = bd->props.brightness; + + if (buffer.input[0] == -1) + return -ENODEV; + + if (power_supply_is_system_supplied() > 0) + dell_send_request(&buffer, 1, 2); + else + dell_send_request(&buffer, 1, 1); + + return 0; +} + +static int dell_get_intensity(struct backlight_device *bd) +{ + struct calling_interface_buffer buffer; + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); + + if (buffer.input[0] == -1) + return -ENODEV; + + if (power_supply_is_system_supplied() > 0) + dell_send_request(&buffer, 0, 2); + else + dell_send_request(&buffer, 0, 1); + + return buffer.output[1]; +} + +static struct backlight_ops dell_ops = { + .get_brightness = dell_get_intensity, + .update_status = dell_send_intensity, +}; + +static int __init dell_init(void) +{ + struct calling_interface_buffer buffer; + int max_intensity = 0; + int ret; + + if (!dmi_check_system(dell_device_table)) + return -ENODEV; + + dmi_walk(find_tokens); + + if (!da_tokens) { + printk(KERN_INFO "dell-laptop: Unable to find dmi tokens\n"); + return -ENODEV; + } + + ret = dell_setup_rfkill(); + + if (ret) { + printk(KERN_WARNING "dell-laptop: Unable to setup rfkill\n"); + goto out; + } + +#ifdef CONFIG_ACPI + /* In the event of an ACPI backlight being available, don't + * register the platform controller. + */ + if (acpi_video_backlight_support()) + return 0; +#endif + + memset(&buffer, 0, sizeof(struct calling_interface_buffer)); + buffer.input[0] = find_token_location(BRIGHTNESS_TOKEN); + + if (buffer.input[0] != -1) { + dell_send_request(&buffer, 0, 2); + max_intensity = buffer.output[3]; + } + + if (max_intensity) { + dell_backlight_device = backlight_device_register( + "dell_backlight", + NULL, NULL, + &dell_ops); + + if (IS_ERR(dell_backlight_device)) { + ret = PTR_ERR(dell_backlight_device); + dell_backlight_device = NULL; + goto out; + } + + dell_backlight_device->props.max_brightness = max_intensity; + dell_backlight_device->props.brightness = + dell_get_intensity(dell_backlight_device); + backlight_update_status(dell_backlight_device); + } + + return 0; +out: + if (wifi_rfkill) + rfkill_unregister(wifi_rfkill); + if (bluetooth_rfkill) + rfkill_unregister(bluetooth_rfkill); + if (wwan_rfkill) + rfkill_unregister(wwan_rfkill); + kfree(da_tokens); + return ret; +} + +static void __exit dell_exit(void) +{ + backlight_device_unregister(dell_backlight_device); + if (wifi_rfkill) + rfkill_unregister(wifi_rfkill); + if (bluetooth_rfkill) + rfkill_unregister(bluetooth_rfkill); + if (wwan_rfkill) + rfkill_unregister(wwan_rfkill); +} + +module_init(dell_init); +module_exit(dell_exit); + +MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); +MODULE_DESCRIPTION("Dell laptop driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("dmi:*svnDellInc.:*:ct8:*"); diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 6fde0a2e3567..bc33200535fc 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -120,6 +120,13 @@ config MTD_PHRAM doesn't have access to, memory beyond the mem=xxx limit, nvram, memory on the video card, etc... +config MTD_PS3VRAM + tristate "PS3 video RAM" + depends on FB_PS3 + help + This driver allows you to use excess PS3 video RAM as volatile + storage or system swap. + config MTD_LART tristate "28F160xx flash driver for LART" depends on SA1100_LART diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 0993d5cf3923..e51521df4e40 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o obj-$(CONFIG_MTD_M25P80) += m25p80.o +obj-$(CONFIG_MTD_PS3VRAM) += ps3vram.o diff --git a/drivers/mtd/devices/ps3vram.c b/drivers/mtd/devices/ps3vram.c new file mode 100644 index 000000000000..d21e9beb7ed2 --- /dev/null +++ b/drivers/mtd/devices/ps3vram.c @@ -0,0 +1,768 @@ +/** + * ps3vram - Use extra PS3 video ram as MTD block device. + * + * Copyright (c) 2007-2008 Jim Paris <jim@jtan.com> + * Added support RSX DMA Vivien Chappelier <vivien.chappelier@free.fr> + */ + +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/version.h> +#include <linux/gfp.h> +#include <linux/delay.h> +#include <linux/mtd/mtd.h> + +#include <asm/lv1call.h> +#include <asm/ps3.h> + +#define DEVICE_NAME "ps3vram" + +#define XDR_BUF_SIZE (2 * 1024 * 1024) /* XDR buffer (must be 1MiB aligned) */ +#define XDR_IOIF 0x0c000000 + +#define FIFO_BASE XDR_IOIF +#define FIFO_SIZE (64 * 1024) + +#define DMA_PAGE_SIZE (4 * 1024) + +#define CACHE_PAGE_SIZE (256 * 1024) +#define CACHE_PAGE_COUNT ((XDR_BUF_SIZE - FIFO_SIZE) / CACHE_PAGE_SIZE) + +#define CACHE_OFFSET CACHE_PAGE_SIZE +#define FIFO_OFFSET 0 + +#define CTRL_PUT 0x10 +#define CTRL_GET 0x11 +#define CTRL_TOP 0x15 + +#define UPLOAD_SUBCH 1 +#define DOWNLOAD_SUBCH 2 + +#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c +#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104 + +#define L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT 0x601 + +struct mtd_info ps3vram_mtd; + +#define CACHE_PAGE_PRESENT 1 +#define CACHE_PAGE_DIRTY 2 + +struct ps3vram_tag { + unsigned int address; + unsigned int flags; +}; + +struct ps3vram_cache { + unsigned int page_count; + unsigned int page_size; + struct ps3vram_tag *tags; +}; + +struct ps3vram_priv { + u64 memory_handle; + u64 context_handle; + u32 *ctrl; + u32 *reports; + u8 __iomem *ddr_base; + u8 *xdr_buf; + + u32 *fifo_base; + u32 *fifo_ptr; + + struct device *dev; + struct ps3vram_cache cache; + + /* Used to serialize cache/DMA operations */ + struct mutex lock; +}; + +#define DMA_NOTIFIER_HANDLE_BASE 0x66604200 /* first DMA notifier handle */ +#define DMA_NOTIFIER_OFFSET_BASE 0x1000 /* first DMA notifier offset */ +#define DMA_NOTIFIER_SIZE 0x40 +#define NOTIFIER 7 /* notifier used for completion report */ + +/* A trailing '-' means to subtract off ps3fb_videomemory.size */ +char *size = "256M-"; +module_param(size, charp, 0); +MODULE_PARM_DESC(size, "memory size"); + +static u32 *ps3vram_get_notifier(u32 *reports, int notifier) +{ + return (void *) reports + + DMA_NOTIFIER_OFFSET_BASE + + DMA_NOTIFIER_SIZE * notifier; +} + +static void ps3vram_notifier_reset(struct mtd_info *mtd) +{ + int i; + + struct ps3vram_priv *priv = mtd->priv; + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); + for (i = 0; i < 4; i++) + notify[i] = 0xffffffff; +} + +static int ps3vram_notifier_wait(struct mtd_info *mtd, unsigned int timeout_ms) +{ + struct ps3vram_priv *priv = mtd->priv; + u32 *notify = ps3vram_get_notifier(priv->reports, NOTIFIER); + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + do { + if (!notify[3]) + return 0; + msleep(1); + } while (time_before(jiffies, timeout)); + + return -ETIMEDOUT; +} + +static void ps3vram_init_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; + priv->ctrl[CTRL_GET] = FIFO_BASE + FIFO_OFFSET; +} + +static int ps3vram_wait_ring(struct mtd_info *mtd, unsigned int timeout_ms) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + + do { + if (priv->ctrl[CTRL_PUT] == priv->ctrl[CTRL_GET]) + return 0; + msleep(1); + } while (time_before(jiffies, timeout)); + + dev_dbg(priv->dev, "%s:%d: FIFO timeout (%08x/%08x/%08x)\n", __func__, + __LINE__, priv->ctrl[CTRL_PUT], priv->ctrl[CTRL_GET], + priv->ctrl[CTRL_TOP]); + + return -ETIMEDOUT; +} + +static void ps3vram_out_ring(struct ps3vram_priv *priv, u32 data) +{ + *(priv->fifo_ptr)++ = data; +} + +static void ps3vram_begin_ring(struct ps3vram_priv *priv, u32 chan, + u32 tag, u32 size) +{ + ps3vram_out_ring(priv, (size << 18) | (chan << 13) | tag); +} + +static void ps3vram_rewind_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + u64 status; + + ps3vram_out_ring(priv, 0x20000000 | (FIFO_BASE + FIFO_OFFSET)); + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET; + + /* asking the HV for a blit will kick the fifo */ + status = lv1_gpu_context_attribute(priv->context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + 0, 0, 0, 0); + if (status) + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", + __func__, __LINE__); + + priv->fifo_ptr = priv->fifo_base; +} + +static void ps3vram_fire_ring(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + u64 status; + + mutex_lock(&ps3_gpu_mutex); + + priv->ctrl[CTRL_PUT] = FIFO_BASE + FIFO_OFFSET + + (priv->fifo_ptr - priv->fifo_base) * sizeof(u32); + + /* asking the HV for a blit will kick the fifo */ + status = lv1_gpu_context_attribute(priv->context_handle, + L1GPU_CONTEXT_ATTRIBUTE_FB_BLIT, + 0, 0, 0, 0); + if (status) + dev_err(priv->dev, "%s:%d: lv1_gpu_context_attribute failed\n", + __func__, __LINE__); + + if ((priv->fifo_ptr - priv->fifo_base) * sizeof(u32) > + FIFO_SIZE - 1024) { + dev_dbg(priv->dev, "%s:%d: fifo full, rewinding\n", __func__, + __LINE__); + ps3vram_wait_ring(mtd, 200); + ps3vram_rewind_ring(mtd); + } + + mutex_unlock(&ps3_gpu_mutex); +} + +static void ps3vram_bind(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0, 1); + ps3vram_out_ring(priv, 0x31337303); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x180, 3); + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ + + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0, 1); + ps3vram_out_ring(priv, 0x3137c0de); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x180, 3); + ps3vram_out_ring(priv, DMA_NOTIFIER_HANDLE_BASE + NOTIFIER); + ps3vram_out_ring(priv, 0xfeed0000); /* DMA video RAM instance */ + ps3vram_out_ring(priv, 0xfeed0001); /* DMA system RAM instance */ + + ps3vram_fire_ring(mtd); +} + +static int ps3vram_upload(struct mtd_info *mtd, unsigned int src_offset, + unsigned int dst_offset, int len, int count) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, UPLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + ps3vram_out_ring(priv, XDR_IOIF + src_offset); + ps3vram_out_ring(priv, dst_offset); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, count); + ps3vram_out_ring(priv, (1 << 8) | 1); + ps3vram_out_ring(priv, 0); + + ps3vram_notifier_reset(mtd); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); + ps3vram_out_ring(priv, 0); + ps3vram_begin_ring(priv, UPLOAD_SUBCH, 0x100, 1); + ps3vram_out_ring(priv, 0); + ps3vram_fire_ring(mtd); + if (ps3vram_notifier_wait(mtd, 200) < 0) { + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, + __LINE__); + return -1; + } + + return 0; +} + +static int ps3vram_download(struct mtd_info *mtd, unsigned int src_offset, + unsigned int dst_offset, int len, int count) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); + ps3vram_out_ring(priv, src_offset); + ps3vram_out_ring(priv, XDR_IOIF + dst_offset); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, len); + ps3vram_out_ring(priv, count); + ps3vram_out_ring(priv, (1 << 8) | 1); + ps3vram_out_ring(priv, 0); + + ps3vram_notifier_reset(mtd); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, + NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY, 1); + ps3vram_out_ring(priv, 0); + ps3vram_begin_ring(priv, DOWNLOAD_SUBCH, 0x100, 1); + ps3vram_out_ring(priv, 0); + ps3vram_fire_ring(mtd); + if (ps3vram_notifier_wait(mtd, 200) < 0) { + dev_dbg(priv->dev, "%s:%d: notifier timeout\n", __func__, + __LINE__); + return -1; + } + + return 0; +} + +static void ps3vram_cache_evict(struct mtd_info *mtd, int entry) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + + if (cache->tags[entry].flags & CACHE_PAGE_DIRTY) { + dev_dbg(priv->dev, "%s:%d: flushing %d : 0x%08x\n", __func__, + __LINE__, entry, cache->tags[entry].address); + if (ps3vram_upload(mtd, + CACHE_OFFSET + entry * cache->page_size, + cache->tags[entry].address, + DMA_PAGE_SIZE, + cache->page_size / DMA_PAGE_SIZE) < 0) { + dev_dbg(priv->dev, "%s:%d: failed to upload from " + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, + entry * cache->page_size, + cache->tags[entry].address, cache->page_size); + } + cache->tags[entry].flags &= ~CACHE_PAGE_DIRTY; + } +} + +static void ps3vram_cache_load(struct mtd_info *mtd, int entry, + unsigned int address) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + + dev_dbg(priv->dev, "%s:%d: fetching %d : 0x%08x\n", __func__, __LINE__, + entry, address); + if (ps3vram_download(mtd, + address, + CACHE_OFFSET + entry * cache->page_size, + DMA_PAGE_SIZE, + cache->page_size / DMA_PAGE_SIZE) < 0) { + dev_err(priv->dev, "%s:%d: failed to download from " + "0x%x to 0x%x size 0x%x\n", __func__, __LINE__, address, + entry * cache->page_size, cache->page_size); + } + + cache->tags[entry].address = address; + cache->tags[entry].flags |= CACHE_PAGE_PRESENT; +} + + +static void ps3vram_cache_flush(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + int i; + + dev_dbg(priv->dev, "%s:%d: FLUSH\n", __func__, __LINE__); + for (i = 0; i < cache->page_count; i++) { + ps3vram_cache_evict(mtd, i); + cache->tags[i].flags = 0; + } +} + +static unsigned int ps3vram_cache_match(struct mtd_info *mtd, loff_t address) +{ + struct ps3vram_priv *priv = mtd->priv; + struct ps3vram_cache *cache = &priv->cache; + unsigned int base; + unsigned int offset; + int i; + static int counter; + + offset = (unsigned int) (address & (cache->page_size - 1)); + base = (unsigned int) (address - offset); + + /* fully associative check */ + for (i = 0; i < cache->page_count; i++) { + if ((cache->tags[i].flags & CACHE_PAGE_PRESENT) && + cache->tags[i].address == base) { + dev_dbg(priv->dev, "%s:%d: found entry %d : 0x%08x\n", + __func__, __LINE__, i, cache->tags[i].address); + return i; + } + } + + /* choose a random entry */ + i = (jiffies + (counter++)) % cache->page_count; + dev_dbg(priv->dev, "%s:%d: using entry %d\n", __func__, __LINE__, i); + + ps3vram_cache_evict(mtd, i); + ps3vram_cache_load(mtd, i, base); + + return i; +} + +static int ps3vram_cache_init(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + priv->cache.page_count = CACHE_PAGE_COUNT; + priv->cache.page_size = CACHE_PAGE_SIZE; + priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) * + CACHE_PAGE_COUNT, GFP_KERNEL); + if (priv->cache.tags == NULL) { + dev_err(priv->dev, "%s:%d: could not allocate cache tags\n", + __func__, __LINE__); + return -ENOMEM; + } + + dev_info(priv->dev, "created ram cache: %d entries, %d KiB each\n", + CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024); + + return 0; +} + +static void ps3vram_cache_cleanup(struct mtd_info *mtd) +{ + struct ps3vram_priv *priv = mtd->priv; + + ps3vram_cache_flush(mtd); + kfree(priv->cache.tags); +} + +static int ps3vram_erase(struct mtd_info *mtd, struct erase_info *instr) +{ + struct ps3vram_priv *priv = mtd->priv; + + if (instr->addr + instr->len > mtd->size) + return -EINVAL; + + mutex_lock(&priv->lock); + + ps3vram_cache_flush(mtd); + + /* Set bytes to 0xFF */ + memset_io(priv->ddr_base + instr->addr, 0xFF, instr->len); + + mutex_unlock(&priv->lock); + + instr->state = MTD_ERASE_DONE; + mtd_erase_callback(instr); + + return 0; +} + +static int ps3vram_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned int cached, count; + + dev_dbg(priv->dev, "%s:%d: from=0x%08x len=0x%zx\n", __func__, __LINE__, + (unsigned int)from, len); + + if (from >= mtd->size) + return -EINVAL; + + if (len > mtd->size - from) + len = mtd->size - from; + + /* Copy from vram to buf */ + count = len; + while (count) { + unsigned int offset, avail; + unsigned int entry; + + offset = (unsigned int) (from & (priv->cache.page_size - 1)); + avail = priv->cache.page_size - offset; + + mutex_lock(&priv->lock); + + entry = ps3vram_cache_match(mtd, from); + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; + + dev_dbg(priv->dev, "%s:%d: from=%08x cached=%08x offset=%08x " + "avail=%08x count=%08x\n", __func__, __LINE__, + (unsigned int)from, cached, offset, avail, count); + + if (avail > count) + avail = count; + memcpy(buf, priv->xdr_buf + cached, avail); + + mutex_unlock(&priv->lock); + + buf += avail; + count -= avail; + from += avail; + } + + *retlen = len; + return 0; +} + +static int ps3vram_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf) +{ + struct ps3vram_priv *priv = mtd->priv; + unsigned int cached, count; + + if (to >= mtd->size) + return -EINVAL; + + if (len > mtd->size - to) + len = mtd->size - to; + + /* Copy from buf to vram */ + count = len; + while (count) { + unsigned int offset, avail; + unsigned int entry; + + offset = (unsigned int) (to & (priv->cache.page_size - 1)); + avail = priv->cache.page_size - offset; + + mutex_lock(&priv->lock); + + entry = ps3vram_cache_match(mtd, to); + cached = CACHE_OFFSET + entry * priv->cache.page_size + offset; + + dev_dbg(priv->dev, "%s:%d: to=%08x cached=%08x offset=%08x " + "avail=%08x count=%08x\n", __func__, __LINE__, + (unsigned int)to, cached, offset, avail, count); + + if (avail > count) + avail = count; + memcpy(priv->xdr_buf + cached, buf, avail); + + priv->cache.tags[entry].flags |= CACHE_PAGE_DIRTY; + + mutex_unlock(&priv->lock); + + buf += avail; + count -= avail; + to += avail; + } + + *retlen = len; + return 0; +} + +static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) +{ + struct ps3vram_priv *priv; + int status; + u64 ddr_lpar; + u64 ctrl_lpar; + u64 info_lpar; + u64 reports_lpar; + u64 ddr_size; + u64 reports_size; + int ret = -ENOMEM; + char *rest; + + ret = -EIO; + ps3vram_mtd.priv = kzalloc(sizeof(struct ps3vram_priv), GFP_KERNEL); + if (!ps3vram_mtd.priv) + goto out; + priv = ps3vram_mtd.priv; + + mutex_init(&priv->lock); + priv->dev = &dev->core; + + /* Allocate XDR buffer (1MiB aligned) */ + priv->xdr_buf = (void *)__get_free_pages(GFP_KERNEL, + get_order(XDR_BUF_SIZE)); + if (priv->xdr_buf == NULL) { + dev_dbg(&dev->core, "%s:%d: could not allocate XDR buffer\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_priv; + } + + /* Put FIFO at begginning of XDR buffer */ + priv->fifo_base = (u32 *) (priv->xdr_buf + FIFO_OFFSET); + priv->fifo_ptr = priv->fifo_base; + + /* XXX: Need to open GPU, in case ps3fb or snd_ps3 aren't loaded */ + if (ps3_open_hv_device(dev)) { + dev_err(&dev->core, "%s:%d: ps3_open_hv_device failed\n", + __func__, __LINE__); + ret = -EAGAIN; + goto out_close_gpu; + } + + /* Request memory */ + status = -1; + ddr_size = memparse(size, &rest); + if (*rest == '-') + ddr_size -= ps3fb_videomemory.size; + ddr_size = ALIGN(ddr_size, 1024*1024); + if (ddr_size <= 0) { + dev_err(&dev->core, "%s:%d: specified size is too small\n", + __func__, __LINE__); + ret = -EINVAL; + goto out_close_gpu; + } + + while (ddr_size > 0) { + status = lv1_gpu_memory_allocate(ddr_size, 0, 0, 0, 0, + &priv->memory_handle, + &ddr_lpar); + if (!status) + break; + ddr_size -= 1024*1024; + } + if (status || ddr_size <= 0) { + dev_err(&dev->core, "%s:%d: lv1_gpu_memory_allocate failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_xdr_buf; + } + + /* Request context */ + status = lv1_gpu_context_allocate(priv->memory_handle, + 0, + &priv->context_handle, + &ctrl_lpar, + &info_lpar, + &reports_lpar, + &reports_size); + if (status) { + dev_err(&dev->core, "%s:%d: lv1_gpu_context_allocate failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_memory; + } + + /* Map XDR buffer to RSX */ + status = lv1_gpu_context_iomap(priv->context_handle, XDR_IOIF, + ps3_mm_phys_to_lpar(__pa(priv->xdr_buf)), + XDR_BUF_SIZE, 0); + if (status) { + dev_err(&dev->core, "%s:%d: lv1_gpu_context_iomap failed\n", + __func__, __LINE__); + ret = -ENOMEM; + goto out_free_context; + } + + priv->ddr_base = ioremap_flags(ddr_lpar, ddr_size, _PAGE_NO_CACHE); + + if (!priv->ddr_base) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_free_context; + } + + priv->ctrl = ioremap(ctrl_lpar, 64 * 1024); + if (!priv->ctrl) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_unmap_vram; + } + + priv->reports = ioremap(reports_lpar, reports_size); + if (!priv->reports) { + dev_err(&dev->core, "%s:%d: ioremap failed\n", __func__, + __LINE__); + ret = -ENOMEM; + goto out_unmap_ctrl; + } + + mutex_lock(&ps3_gpu_mutex); + ps3vram_init_ring(&ps3vram_mtd); + mutex_unlock(&ps3_gpu_mutex); + + ps3vram_mtd.name = "ps3vram"; + ps3vram_mtd.size = ddr_size; + ps3vram_mtd.flags = MTD_CAP_RAM; + ps3vram_mtd.erase = ps3vram_erase; + ps3vram_mtd.point = NULL; + ps3vram_mtd.unpoint = NULL; + ps3vram_mtd.read = ps3vram_read; + ps3vram_mtd.write = ps3vram_write; + ps3vram_mtd.owner = THIS_MODULE; + ps3vram_mtd.type = MTD_RAM; + ps3vram_mtd.erasesize = CACHE_PAGE_SIZE; + ps3vram_mtd.writesize = 1; + + ps3vram_bind(&ps3vram_mtd); + + mutex_lock(&ps3_gpu_mutex); + ret = ps3vram_wait_ring(&ps3vram_mtd, 100); + mutex_unlock(&ps3_gpu_mutex); + if (ret < 0) { + dev_err(&dev->core, "%s:%d: failed to initialize channels\n", + __func__, __LINE__); + ret = -ETIMEDOUT; + goto out_unmap_reports; + } + + ps3vram_cache_init(&ps3vram_mtd); + + if (add_mtd_device(&ps3vram_mtd)) { + dev_err(&dev->core, "%s:%d: add_mtd_device failed\n", + __func__, __LINE__); + ret = -EAGAIN; + goto out_cache_cleanup; + } + + dev_info(&dev->core, "reserved %u MiB of gpu memory\n", + (unsigned int)(ddr_size / 1024 / 1024)); + + return 0; + +out_cache_cleanup: + ps3vram_cache_cleanup(&ps3vram_mtd); +out_unmap_reports: + iounmap(priv->reports); +out_unmap_ctrl: + iounmap(priv->ctrl); +out_unmap_vram: + iounmap(priv->ddr_base); +out_free_context: + lv1_gpu_context_free(priv->context_handle); +out_free_memory: + lv1_gpu_memory_free(priv->memory_handle); +out_close_gpu: + ps3_close_hv_device(dev); +out_free_xdr_buf: + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); +out_free_priv: + kfree(ps3vram_mtd.priv); + ps3vram_mtd.priv = NULL; +out: + return ret; +} + +static int ps3vram_shutdown(struct ps3_system_bus_device *dev) +{ + struct ps3vram_priv *priv; + + priv = ps3vram_mtd.priv; + + del_mtd_device(&ps3vram_mtd); + ps3vram_cache_cleanup(&ps3vram_mtd); + iounmap(priv->reports); + iounmap(priv->ctrl); + iounmap(priv->ddr_base); + lv1_gpu_context_free(priv->context_handle); + lv1_gpu_memory_free(priv->memory_handle); + ps3_close_hv_device(dev); + free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE)); + kfree(priv); + return 0; +} + +static struct ps3_system_bus_driver ps3vram_driver = { + .match_id = PS3_MATCH_ID_GPU, + .match_sub_id = PS3_MATCH_SUB_ID_GPU_RAMDISK, + .core.name = DEVICE_NAME, + .core.owner = THIS_MODULE, + .probe = ps3vram_probe, + .remove = ps3vram_shutdown, + .shutdown = ps3vram_shutdown, +}; + +static int __init ps3vram_init(void) +{ + return ps3_system_bus_driver_register(&ps3vram_driver); +} + +static void __exit ps3vram_exit(void) +{ + ps3_system_bus_driver_unregister(&ps3vram_driver); +} + +module_init(ps3vram_init); +module_exit(ps3vram_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jim Paris <jim@jtan.com>"); +MODULE_DESCRIPTION("MTD driver for PS3 video RAM"); +MODULE_ALIAS(PS3_MODULE_ALIAS_GPU_RAMDISK); diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 5d9bcf109c13..4abbe573fa40 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c @@ -564,7 +564,7 @@ EXPORT_SYMBOL_GPL(ubi_leb_unmap); * @dtype: expected data type * * This function maps an un-mapped logical eraseblock @lnum to a physical - * eraseblock. This means, that after a successfull invocation of this + * eraseblock. This means, that after a successful invocation of this * function the logical eraseblock @lnum will be empty (contain only %0xFF * bytes) and be mapped to a physical eraseblock, even if an unclean reboot * happens. diff --git a/drivers/net/wireless/ath5k/dma.c b/drivers/net/wireless/ath5k/dma.c index 7e2b1a67e5da..b65b4feb2d28 100644 --- a/drivers/net/wireless/ath5k/dma.c +++ b/drivers/net/wireless/ath5k/dma.c @@ -594,7 +594,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) * XXX: BMISS interrupts may occur after association. * I found this on 5210 code but it needs testing. If this is * true we should disable them before assoc and re-enable them - * after a successfull assoc + some jiffies. + * after a successful assoc + some jiffies. interrupt_mask &= ~AR5K_INT_BMISS; */ } diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c index 9caa96a13586..a611ad857983 100644 --- a/drivers/net/wireless/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zd1211rw/zd_mac.c @@ -287,7 +287,7 @@ static void zd_op_stop(struct ieee80211_hw *hw) * @skb - a sk-buffer * @flags: extra flags to set in the TX status info * @ackssi: ACK signal strength - * @success - True for successfull transmission of the frame + * @success - True for successful transmission of the frame * * This information calls ieee80211_tx_status_irqsafe() if required by the * control information. It copies the control information into the status diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 162330b9d1dc..7e5155e88ac7 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -86,13 +86,11 @@ enum ds_type { struct ds1307 { - u8 reg_addr; u8 regs[11]; enum ds_type type; unsigned long flags; #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ #define HAS_ALARM 1 /* bit 1 == irq claimed */ - struct i2c_msg msg[2]; struct i2c_client *client; struct rtc_device *rtc; struct work_struct work; @@ -204,13 +202,9 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) int tmp; /* read the RTC date and time registers all at once */ - ds1307->reg_addr = 0; - ds1307->msg[1].flags = I2C_M_RD; - ds1307->msg[1].len = 7; - - tmp = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent), - ds1307->msg, 2); - if (tmp != 2) { + tmp = i2c_smbus_read_i2c_block_data(ds1307->client, + DS1307_REG_SECS, 7, ds1307->regs); + if (tmp != 7) { dev_err(dev, "%s error %d\n", "read", tmp); return -EIO; } @@ -257,7 +251,6 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) t->tm_hour, t->tm_mday, t->tm_mon, t->tm_year, t->tm_wday); - *buf++ = 0; /* first register addr */ buf[DS1307_REG_SECS] = bin2bcd(t->tm_sec); buf[DS1307_REG_MIN] = bin2bcd(t->tm_min); buf[DS1307_REG_HOUR] = bin2bcd(t->tm_hour); @@ -282,23 +275,19 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) break; } - ds1307->msg[1].flags = 0; - ds1307->msg[1].len = 8; - dev_dbg(dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", "write", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6]); - result = i2c_transfer(to_i2c_adapter(ds1307->client->dev.parent), - &ds1307->msg[1], 1); - if (result != 1) { - dev_err(dev, "%s error %d\n", "write", tmp); - return -EIO; + result = i2c_smbus_write_i2c_block_data(ds1307->client, 0, 7, buf); + if (result < 0) { + dev_err(dev, "%s error %d\n", "write", result); + return result; } return 0; } -static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) +static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) { struct i2c_client *client = to_i2c_client(dev); struct ds1307 *ds1307 = i2c_get_clientdata(client); @@ -308,13 +297,9 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) return -EINVAL; /* read all ALARM1, ALARM2, and status registers at once */ - ds1307->reg_addr = DS1339_REG_ALARM1_SECS; - ds1307->msg[1].flags = I2C_M_RD; - ds1307->msg[1].len = 9; - - ret = i2c_transfer(to_i2c_adapter(client->dev.parent), - ds1307->msg, 2); - if (ret != 2) { + ret = i2c_smbus_read_i2c_block_data(client, + DS1339_REG_ALARM1_SECS, 9, ds1307->regs); + if (ret != 9) { dev_err(dev, "%s error %d\n", "alarm read", ret); return -EIO; } @@ -353,7 +338,7 @@ static int ds1307_read_alarm(struct device *dev, struct rtc_wkalrm *t) return 0; } -static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) +static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct i2c_client *client = to_i2c_client(dev); struct ds1307 *ds1307 = i2c_get_clientdata(client); @@ -371,13 +356,9 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* read current status of both alarms and the chip */ - ds1307->reg_addr = DS1339_REG_ALARM1_SECS; - ds1307->msg[1].flags = I2C_M_RD; - ds1307->msg[1].len = 9; - - ret = i2c_transfer(to_i2c_adapter(client->dev.parent), - ds1307->msg, 2); - if (ret != 2) { + ret = i2c_smbus_read_i2c_block_data(client, + DS1339_REG_ALARM1_SECS, 9, buf); + if (ret != 9) { dev_err(dev, "%s error %d\n", "alarm write", ret); return -EIO; } @@ -392,7 +373,6 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) ds1307->regs[6], control, status); /* set ALARM1, using 24 hour and day-of-month modes */ - *buf++ = DS1339_REG_ALARM1_SECS; /* first register addr */ buf[0] = bin2bcd(t->time.tm_sec); buf[1] = bin2bcd(t->time.tm_min); buf[2] = bin2bcd(t->time.tm_hour); @@ -411,14 +391,11 @@ static int ds1307_set_alarm(struct device *dev, struct rtc_wkalrm *t) } buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); - ds1307->msg[1].flags = 0; - ds1307->msg[1].len = 10; - - ret = i2c_transfer(to_i2c_adapter(client->dev.parent), - &ds1307->msg[1], 1); - if (ret != 1) { + ret = i2c_smbus_write_i2c_block_data(client, + DS1339_REG_ALARM1_SECS, 9, buf); + if (ret < 0) { dev_err(dev, "can't set alarm time\n"); - return -EIO; + return ret; } return 0; @@ -475,8 +452,8 @@ static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) static const struct rtc_class_ops ds13xx_rtc_ops = { .read_time = ds1307_get_time, .set_time = ds1307_set_time, - .read_alarm = ds1307_read_alarm, - .set_alarm = ds1307_set_alarm, + .read_alarm = ds1337_read_alarm, + .set_alarm = ds1337_set_alarm, .ioctl = ds1307_ioctl, }; @@ -490,7 +467,6 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr, { struct i2c_client *client; struct ds1307 *ds1307; - struct i2c_msg msg[2]; int result; client = kobj_to_i2c_client(kobj); @@ -503,24 +479,10 @@ ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr, if (unlikely(!count)) return count; - msg[0].addr = client->addr; - msg[0].flags = 0; - msg[0].len = 1; - msg[0].buf = buf; - - buf[0] = 8 + off; - - msg[1].addr = client->addr; - msg[1].flags = I2C_M_RD; - msg[1].len = count; - msg[1].buf = buf; - - result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2); - if (result != 2) { + result = i2c_smbus_read_i2c_block_data(client, 8 + off, count, buf); + if (result < 0) dev_err(&client->dev, "%s error %d\n", "nvram read", result); - return -EIO; - } - return count; + return result; } static ssize_t @@ -528,8 +490,7 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { struct i2c_client *client; - u8 buffer[NVRAM_SIZE + 1]; - int ret; + int result; client = kobj_to_i2c_client(kobj); @@ -540,11 +501,12 @@ ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr, if (unlikely(!count)) return count; - buffer[0] = 8 + off; - memcpy(buffer + 1, buf, count); - - ret = i2c_master_send(client, buffer, count + 1); - return (ret < 0) ? ret : (ret - 1); + result = i2c_smbus_write_i2c_block_data(client, 8 + off, count, buf); + if (result < 0) { + dev_err(&client->dev, "%s error %d\n", "nvram write", result); + return result; + } + return count; } static struct bin_attribute nvram = { @@ -571,9 +533,11 @@ static int __devinit ds1307_probe(struct i2c_client *client, const struct chip_desc *chip = &chips[id->driver_data]; struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); int want_irq = false; + unsigned char *buf; if (!i2c_check_functionality(adapter, - I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) + I2C_FUNC_SMBUS_WRITE_BYTE_DATA | + I2C_FUNC_SMBUS_I2C_BLOCK)) return -EIO; if (!(ds1307 = kzalloc(sizeof(struct ds1307), GFP_KERNEL))) @@ -581,18 +545,8 @@ static int __devinit ds1307_probe(struct i2c_client *client, ds1307->client = client; i2c_set_clientdata(client, ds1307); - - ds1307->msg[0].addr = client->addr; - ds1307->msg[0].flags = 0; - ds1307->msg[0].len = 1; - ds1307->msg[0].buf = &ds1307->reg_addr; - - ds1307->msg[1].addr = client->addr; - ds1307->msg[1].flags = I2C_M_RD; - ds1307->msg[1].len = sizeof(ds1307->regs); - ds1307->msg[1].buf = ds1307->regs; - ds1307->type = id->driver_data; + buf = ds1307->regs; switch (ds1307->type) { case ds_1337: @@ -602,21 +556,15 @@ static int __devinit ds1307_probe(struct i2c_client *client, INIT_WORK(&ds1307->work, ds1307_work); want_irq = true; } - - ds1307->reg_addr = DS1337_REG_CONTROL; - ds1307->msg[1].len = 2; - /* get registers that the "rtc" read below won't read... */ - tmp = i2c_transfer(adapter, ds1307->msg, 2); + tmp = i2c_smbus_read_i2c_block_data(ds1307->client, + DS1337_REG_CONTROL, 2, buf); if (tmp != 2) { pr_debug("read error %d\n", tmp); err = -EIO; goto exit_free; } - ds1307->reg_addr = 0; - ds1307->msg[1].len = sizeof(ds1307->regs); - /* oscillator off? turn it on, so clock can tick. */ if (ds1307->regs[0] & DS1337_BIT_nEOSC) ds1307->regs[0] &= ~DS1337_BIT_nEOSC; @@ -647,9 +595,8 @@ static int __devinit ds1307_probe(struct i2c_client *client, read_rtc: /* read RTC registers */ - - tmp = i2c_transfer(adapter, ds1307->msg, 2); - if (tmp != 2) { + tmp = i2c_smbus_read_i2c_block_data(ds1307->client, 0, 8, buf); + if (tmp != 8) { pr_debug("read error %d\n", tmp); err = -EIO; goto exit_free; @@ -707,22 +654,6 @@ read_rtc: break; } - tmp = ds1307->regs[DS1307_REG_SECS]; - tmp = bcd2bin(tmp & 0x7f); - if (tmp > 60) - goto exit_bad; - tmp = bcd2bin(ds1307->regs[DS1307_REG_MIN] & 0x7f); - if (tmp > 60) - goto exit_bad; - - tmp = bcd2bin(ds1307->regs[DS1307_REG_MDAY] & 0x3f); - if (tmp == 0 || tmp > 31) - goto exit_bad; - - tmp = bcd2bin(ds1307->regs[DS1307_REG_MONTH] & 0x1f); - if (tmp == 0 || tmp > 12) - goto exit_bad; - tmp = ds1307->regs[DS1307_REG_HOUR]; switch (ds1307->type) { case ds_1340: @@ -779,13 +710,6 @@ read_rtc: return 0; -exit_bad: - dev_dbg(&client->dev, "%s: %02x %02x %02x %02x %02x %02x %02x\n", - "bogus register", - ds1307->regs[0], ds1307->regs[1], - ds1307->regs[2], ds1307->regs[3], - ds1307->regs[4], ds1307->regs[5], - ds1307->regs[6]); exit_irq: if (ds1307->rtc) rtc_device_unregister(ds1307->rtc); diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index b8f9c00633f3..d82aad5224f0 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -2621,7 +2621,7 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr) } } - /* double-check if current erp/cqr was successfull */ + /* double-check if current erp/cqr was successful */ if ((cqr->irb.scsw.cmd.cstat == 0x00) && (cqr->irb.scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 05a14536c369..4a39084d9c95 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -199,7 +199,7 @@ struct dasd_ccw_req { #define DASD_CQR_ERROR 0x82 /* request is completed with error */ #define DASD_CQR_CLEAR_PENDING 0x83 /* request is clear pending */ #define DASD_CQR_CLEARED 0x84 /* request was cleared */ -#define DASD_CQR_SUCCESS 0x85 /* request was successfull */ +#define DASD_CQR_SUCCESS 0x85 /* request was successful */ /* per dasd_ccw_req flags */ diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 4005c44a404c..71605a179d65 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c @@ -801,7 +801,7 @@ tape_3590_done(struct tape_device *device, struct tape_request *request) static inline int tape_3590_erp_succeded(struct tape_device *device, struct tape_request *request) { - DBF_EVENT(3, "Error Recovery successfull for %s\n", + DBF_EVENT(3, "Error Recovery successful for %s\n", tape_op_verbose[request->op]); return tape_3590_done(device, request); } diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 06b71823f399..659f8a791656 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -379,7 +379,7 @@ int cio_commit_config(struct subchannel *sch) if (ccode < 0) /* -EIO if msch gets a program check. */ return ccode; switch (ccode) { - case 0: /* successfull */ + case 0: /* successful */ if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) return -ENODEV; diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 744f928a59ea..10cb0f8726e5 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -114,7 +114,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) * @count: count of buffers to examine * @auto_ack: automatically acknowledge buffers * - * Returns the number of successfull extracted equal buffer states. + * Returns the number of successfully extracted equal buffer states. * Stops processing if a state is different from the last buffers state. */ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index b7322976d2b7..256c7bec7bd7 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -884,6 +884,7 @@ config SCSI_IBMVSCSI tristate "IBM Virtual SCSI support" depends on PPC_PSERIES || PPC_ISERIES select SCSI_SRP_ATTRS + select VIOPATH if PPC_ISERIES help This is the IBM POWER Virtual SCSI Client diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 3e34605ee5a8..3e525e38a5d9 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig @@ -1320,13 +1320,30 @@ config SERIAL_NETX_CONSOLE config SERIAL_OF_PLATFORM tristate "Serial port on Open Firmware platform bus" depends on PPC_OF - depends on SERIAL_8250 + depends on SERIAL_8250 || SERIAL_OF_PLATFORM_NWPSERIAL help If you have a PowerPC based system that has serial ports on a platform specific bus, you should enable this option. Currently, only 8250 compatible ports are supported, but others can easily be added. +config SERIAL_OF_PLATFORM_NWPSERIAL + tristate "NWP serial port driver" + depends on PPC_OF && PPC_DCR + select SERIAL_OF_PLATFORM + select SERIAL_CORE_CONSOLE + select SERIAL_CORE + help + This driver supports the cell network processor nwp serial + device. + +config SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE + bool "Console on NWP serial port" + depends on SERIAL_OF_PLATFORM_NWPSERIAL=y + select SERIAL_CORE_CONSOLE + help + Support for Console on the NWP serial ports. + config SERIAL_QE tristate "Freescale QUICC Engine serial port support" depends on QUICC_ENGINE diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index dfe775ac45b2..8844c0a03929 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile @@ -72,6 +72,7 @@ obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o obj-$(CONFIG_SERIAL_NETX) += netx-serial.o obj-$(CONFIG_SERIAL_OF_PLATFORM) += of_serial.o +obj-$(CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL) += nwpserial.o obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o obj-$(CONFIG_SERIAL_QE) += ucc_uart.o diff --git a/drivers/serial/nwpserial.c b/drivers/serial/nwpserial.c new file mode 100644 index 000000000000..32f3eaf0d262 --- /dev/null +++ b/drivers/serial/nwpserial.c @@ -0,0 +1,475 @@ +/* + * Serial Port driver for a NWP uart device + * + * Copyright (C) 2008 IBM Corp., Benjamin Krill <ben@codiert.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ +#include <linux/init.h> +#include <linux/console.h> +#include <linux/serial.h> +#include <linux/serial_reg.h> +#include <linux/serial_core.h> +#include <linux/tty.h> +#include <linux/irqreturn.h> +#include <linux/mutex.h> +#include <linux/of_platform.h> +#include <linux/of_device.h> +#include <linux/nwpserial.h> +#include <asm/prom.h> +#include <asm/dcr.h> + +#define NWPSERIAL_NR 2 + +#define NWPSERIAL_STATUS_RXVALID 0x1 +#define NWPSERIAL_STATUS_TXFULL 0x2 + +struct nwpserial_port { + struct uart_port port; + dcr_host_t dcr_host; + unsigned int ier; + unsigned int mcr; +}; + +static DEFINE_MUTEX(nwpserial_mutex); +static struct nwpserial_port nwpserial_ports[NWPSERIAL_NR]; + +static void wait_for_bits(struct nwpserial_port *up, int bits) +{ + unsigned int status, tmout = 10000; + + /* Wait up to 10ms for the character(s) to be sent. */ + do { + status = dcr_read(up->dcr_host, UART_LSR); + + if (--tmout == 0) + break; + udelay(1); + } while ((status & bits) != bits); +} + +#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE +static void nwpserial_console_putchar(struct uart_port *port, int c) +{ + struct nwpserial_port *up; + up = container_of(port, struct nwpserial_port, port); + /* check if tx buffer is full */ + wait_for_bits(up, UART_LSR_THRE); + dcr_write(up->dcr_host, UART_TX, c); + up->port.icount.tx++; +} + +static void +nwpserial_console_write(struct console *co, const char *s, unsigned int count) +{ + struct nwpserial_port *up = &nwpserial_ports[co->index]; + unsigned long flags; + int locked = 1; + + if (oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); + else + spin_lock_irqsave(&up->port.lock, flags); + + /* save and disable interrupt */ + up->ier = dcr_read(up->dcr_host, UART_IER); + dcr_write(up->dcr_host, UART_IER, up->ier & ~UART_IER_RDI); + + uart_console_write(&up->port, s, count, nwpserial_console_putchar); + + /* wait for transmitter to become emtpy */ + while ((dcr_read(up->dcr_host, UART_LSR) & UART_LSR_THRE) == 0) + cpu_relax(); + + /* restore interrupt state */ + dcr_write(up->dcr_host, UART_IER, up->ier); + + if (locked) + spin_unlock_irqrestore(&up->port.lock, flags); +} + +static struct uart_driver nwpserial_reg; +static struct console nwpserial_console = { + .name = "ttySQ", + .write = nwpserial_console_write, + .device = uart_console_device, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &nwpserial_reg, +}; +#define NWPSERIAL_CONSOLE (&nwpserial_console) +#else +#define NWPSERIAL_CONSOLE NULL +#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ + +/**************************************************************************/ + +static int nwpserial_request_port(struct uart_port *port) +{ + return 0; +} + +static void nwpserial_release_port(struct uart_port *port) +{ + /* N/A */ +} + +static void nwpserial_config_port(struct uart_port *port, int flags) +{ + port->type = PORT_NWPSERIAL; +} + +static irqreturn_t nwpserial_interrupt(int irq, void *dev_id) +{ + struct nwpserial_port *up = dev_id; + struct tty_struct *tty = up->port.info->port.tty; + irqreturn_t ret; + unsigned int iir; + unsigned char ch; + + spin_lock(&up->port.lock); + + /* check if the uart was the interrupt source. */ + iir = dcr_read(up->dcr_host, UART_IIR); + if (!iir) { + ret = IRQ_NONE; + goto out; + } + + do { + up->port.icount.rx++; + ch = dcr_read(up->dcr_host, UART_RX); + if (up->port.ignore_status_mask != NWPSERIAL_STATUS_RXVALID) + tty_insert_flip_char(tty, ch, TTY_NORMAL); + } while (dcr_read(up->dcr_host, UART_RX) & UART_LSR_DR); + + tty_flip_buffer_push(tty); + ret = IRQ_HANDLED; + +out: + spin_unlock(&up->port.lock); + return ret; +} + +static int nwpserial_startup(struct uart_port *port) +{ + struct nwpserial_port *up; + int err; + + up = container_of(port, struct nwpserial_port, port); + + /* disable flow control by default */ + up->mcr = dcr_read(up->dcr_host, UART_MCR) & ~UART_MCR_AFE; + dcr_write(up->dcr_host, UART_MCR, up->mcr); + + /* register interrupt handler */ + err = request_irq(up->port.irq, nwpserial_interrupt, + IRQF_SHARED, "nwpserial", up); + if (err) + return err; + + /* enable interrupts */ + up->ier = UART_IER_RDI; + dcr_write(up->dcr_host, UART_IER, up->ier); + + /* enable receiving */ + up->port.ignore_status_mask &= ~NWPSERIAL_STATUS_RXVALID; + + return 0; +} + +static void nwpserial_shutdown(struct uart_port *port) +{ + struct nwpserial_port *up; + up = container_of(port, struct nwpserial_port, port); + + /* disable receiving */ + up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; + + /* disable interrupts from this port */ + up->ier = 0; + dcr_write(up->dcr_host, UART_IER, up->ier); + + /* free irq */ + free_irq(up->port.irq, port); +} + +static int nwpserial_verify_port(struct uart_port *port, + struct serial_struct *ser) +{ + return -EINVAL; +} + +static const char *nwpserial_type(struct uart_port *port) +{ + return port->type == PORT_NWPSERIAL ? "nwpserial" : NULL; +} + +static void nwpserial_set_termios(struct uart_port *port, + struct ktermios *termios, struct ktermios *old) +{ + struct nwpserial_port *up; + up = container_of(port, struct nwpserial_port, port); + + up->port.read_status_mask = NWPSERIAL_STATUS_RXVALID + | NWPSERIAL_STATUS_TXFULL; + + up->port.ignore_status_mask = 0; + /* ignore all characters if CREAD is not set */ + if ((termios->c_cflag & CREAD) == 0) + up->port.ignore_status_mask |= NWPSERIAL_STATUS_RXVALID; + + /* Copy back the old hardware settings */ + if (old) + tty_termios_copy_hw(termios, old); +} + +static void nwpserial_break_ctl(struct uart_port *port, int ctl) +{ + /* N/A */ +} + +static void nwpserial_enable_ms(struct uart_port *port) +{ + /* N/A */ +} + +static void nwpserial_stop_rx(struct uart_port *port) +{ + struct nwpserial_port *up; + up = container_of(port, struct nwpserial_port, port); + /* don't forward any more data (like !CREAD) */ + up->port.ignore_status_mask = NWPSERIAL_STATUS_RXVALID; +} + +static void nwpserial_putchar(struct nwpserial_port *up, unsigned char c) +{ + /* check if tx buffer is full */ + wait_for_bits(up, UART_LSR_THRE); + dcr_write(up->dcr_host, UART_TX, c); + up->port.icount.tx++; +} + +static void nwpserial_start_tx(struct uart_port *port) +{ + struct nwpserial_port *up; + struct circ_buf *xmit; + up = container_of(port, struct nwpserial_port, port); + xmit = &up->port.info->xmit; + + if (port->x_char) { + nwpserial_putchar(up, up->port.x_char); + port->x_char = 0; + } + + while (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port))) { + nwpserial_putchar(up, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE-1); + } +} + +static unsigned int nwpserial_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void nwpserial_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + /* N/A */ +} + +static void nwpserial_stop_tx(struct uart_port *port) +{ + /* N/A */ +} + +static unsigned int nwpserial_tx_empty(struct uart_port *port) +{ + struct nwpserial_port *up; + unsigned long flags; + int ret; + up = container_of(port, struct nwpserial_port, port); + + spin_lock_irqsave(&up->port.lock, flags); + ret = dcr_read(up->dcr_host, UART_LSR); + spin_unlock_irqrestore(&up->port.lock, flags); + + return ret & UART_LSR_TEMT ? TIOCSER_TEMT : 0; +} + +static struct uart_ops nwpserial_pops = { + .tx_empty = nwpserial_tx_empty, + .set_mctrl = nwpserial_set_mctrl, + .get_mctrl = nwpserial_get_mctrl, + .stop_tx = nwpserial_stop_tx, + .start_tx = nwpserial_start_tx, + .stop_rx = nwpserial_stop_rx, + .enable_ms = nwpserial_enable_ms, + .break_ctl = nwpserial_break_ctl, + .startup = nwpserial_startup, + .shutdown = nwpserial_shutdown, + .set_termios = nwpserial_set_termios, + .type = nwpserial_type, + .release_port = nwpserial_release_port, + .request_port = nwpserial_request_port, + .config_port = nwpserial_config_port, + .verify_port = nwpserial_verify_port, +}; + +static struct uart_driver nwpserial_reg = { + .owner = THIS_MODULE, + .driver_name = "nwpserial", + .dev_name = "ttySQ", + .major = TTY_MAJOR, + .minor = 68, + .nr = NWPSERIAL_NR, + .cons = NWPSERIAL_CONSOLE, +}; + +int nwpserial_register_port(struct uart_port *port) +{ + struct nwpserial_port *up = NULL; + int ret = -1; + int i; + static int first = 1; + int dcr_len; + int dcr_base; + struct device_node *dn; + + mutex_lock(&nwpserial_mutex); + + dn = to_of_device(port->dev)->node; + if (dn == NULL) + goto out; + + /* get dcr base. */ + dcr_base = dcr_resource_start(dn, 0); + + /* find matching entry */ + for (i = 0; i < NWPSERIAL_NR; i++) + if (nwpserial_ports[i].port.iobase == dcr_base) { + up = &nwpserial_ports[i]; + break; + } + + /* we didn't find a mtching entry, search for a free port */ + if (up == NULL) + for (i = 0; i < NWPSERIAL_NR; i++) + if (nwpserial_ports[i].port.type == PORT_UNKNOWN && + nwpserial_ports[i].port.iobase == 0) { + up = &nwpserial_ports[i]; + break; + } + + if (up == NULL) { + ret = -EBUSY; + goto out; + } + + if (first) + uart_register_driver(&nwpserial_reg); + first = 0; + + up->port.membase = port->membase; + up->port.irq = port->irq; + up->port.uartclk = port->uartclk; + up->port.fifosize = port->fifosize; + up->port.regshift = port->regshift; + up->port.iotype = port->iotype; + up->port.flags = port->flags; + up->port.mapbase = port->mapbase; + up->port.private_data = port->private_data; + + if (port->dev) + up->port.dev = port->dev; + + if (up->port.iobase != dcr_base) { + up->port.ops = &nwpserial_pops; + up->port.fifosize = 16; + + spin_lock_init(&up->port.lock); + + up->port.iobase = dcr_base; + dcr_len = dcr_resource_len(dn, 0); + + up->dcr_host = dcr_map(dn, dcr_base, dcr_len); + if (!DCR_MAP_OK(up->dcr_host)) { + printk(KERN_ERR "Cannot map DCR resources for NWPSERIAL"); + goto out; + } + } + + ret = uart_add_one_port(&nwpserial_reg, &up->port); + if (ret == 0) + ret = up->port.line; + +out: + mutex_unlock(&nwpserial_mutex); + + return ret; +} +EXPORT_SYMBOL(nwpserial_register_port); + +void nwpserial_unregister_port(int line) +{ + struct nwpserial_port *up = &nwpserial_ports[line]; + mutex_lock(&nwpserial_mutex); + uart_remove_one_port(&nwpserial_reg, &up->port); + + up->port.type = PORT_UNKNOWN; + + mutex_unlock(&nwpserial_mutex); +} +EXPORT_SYMBOL(nwpserial_unregister_port); + +#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE +static int __init nwpserial_console_init(void) +{ + struct nwpserial_port *up = NULL; + struct device_node *dn; + const char *name; + int dcr_base; + int dcr_len; + int i; + + /* search for a free port */ + for (i = 0; i < NWPSERIAL_NR; i++) + if (nwpserial_ports[i].port.type == PORT_UNKNOWN) { + up = &nwpserial_ports[i]; + break; + } + + if (up == NULL) + return -1; + + name = of_get_property(of_chosen, "linux,stdout-path", NULL); + if (name == NULL) + return -1; + + dn = of_find_node_by_path(name); + if (!dn) + return -1; + + spin_lock_init(&up->port.lock); + up->port.ops = &nwpserial_pops; + up->port.type = PORT_NWPSERIAL; + up->port.fifosize = 16; + + dcr_base = dcr_resource_start(dn, 0); + dcr_len = dcr_resource_len(dn, 0); + up->port.iobase = dcr_base; + + up->dcr_host = dcr_map(dn, dcr_base, dcr_len); + if (!DCR_MAP_OK(up->dcr_host)) { + printk("Cannot map DCR resources for SERIAL"); + return -1; + } + register_console(&nwpserial_console); + return 0; +} +console_initcall(nwpserial_console_init); +#endif /* CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL_CONSOLE */ diff --git a/drivers/serial/of_serial.c b/drivers/serial/of_serial.c index 8fa0ff561e9f..a821e3a3d664 100644 --- a/drivers/serial/of_serial.c +++ b/drivers/serial/of_serial.c @@ -14,6 +14,7 @@ #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <linux/of_platform.h> +#include <linux/nwpserial.h> #include <asm/prom.h> @@ -99,9 +100,16 @@ static int __devinit of_platform_serial_probe(struct of_device *ofdev, goto out; switch (port_type) { +#ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: ret = serial8250_register_port(&port); break; +#endif +#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL + case PORT_NWPSERIAL: + ret = nwpserial_register_port(&port); + break; +#endif default: /* need to add code for these */ case PORT_UNKNOWN: @@ -129,9 +137,16 @@ static int of_platform_serial_remove(struct of_device *ofdev) { struct of_serial_info *info = ofdev->dev.driver_data; switch (info->type) { +#ifdef CONFIG_SERIAL_8250 case PORT_8250 ... PORT_MAX_8250: serial8250_unregister_port(info->line); break; +#endif +#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL + case PORT_NWPSERIAL: + nwpserial_unregister_port(info->line); + break; +#endif default: /* need to add code for these */ break; @@ -148,6 +163,10 @@ static struct of_device_id __devinitdata of_platform_serial_table[] = { { .type = "serial", .compatible = "ns16450", .data = (void *)PORT_16450, }, { .type = "serial", .compatible = "ns16550", .data = (void *)PORT_16550, }, { .type = "serial", .compatible = "ns16750", .data = (void *)PORT_16750, }, +#ifdef CONFIG_SERIAL_OF_PLATFORM_NWPSERIAL + { .type = "serial", .compatible = "ibm,qpace-nwp-serial", + .data = (void *)PORT_NWPSERIAL, }, +#endif { .type = "serial", .data = (void *)PORT_UNKNOWN, }, { /* end of list */ }, }; diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 90616822cd20..96d2f8e4c275 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -34,6 +34,12 @@ config W1_MASTER_DS2482 This driver can also be built as a module. If so, the module will be called ds2482. +config W1_MASTER_MXC + tristate "Freescale MXC 1-wire busmaster" + depends on W1 && ARCH_MXC + help + Say Y here to enable MXC 1-wire host + config W1_MASTER_DS1WM tristate "Maxim DS1WM 1-wire busmaster" depends on W1 && ARM && HAVE_CLK diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile index bc4714a75f3a..c5a3e96fcbab 100644 --- a/drivers/w1/masters/Makefile +++ b/drivers/w1/masters/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o +obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o + obj-$(CONFIG_W1_MASTER_DS1WM) += ds1wm.o obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c new file mode 100644 index 000000000000..b9d74d0b353e --- /dev/null +++ b/drivers/w1/masters/mxc_w1.c @@ -0,0 +1,211 @@ +/* + * Copyright 2005-2008 Freescale Semiconductor, Inc. All Rights Reserved. + * Copyright 2008 Luotao Fu, kernel@pengutronix.de + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + */ + +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/io.h> + +#include "../w1.h" +#include "../w1_int.h" +#include "../w1_log.h" + +/* According to the mx27 Datasheet the reset procedure should take up to about + * 1350us. We set the timeout to 500*100us = 50ms for sure */ +#define MXC_W1_RESET_TIMEOUT 500 + +/* + * MXC W1 Register offsets + */ +#define MXC_W1_CONTROL 0x00 +#define MXC_W1_TIME_DIVIDER 0x02 +#define MXC_W1_RESET 0x04 +#define MXC_W1_COMMAND 0x06 +#define MXC_W1_TXRX 0x08 +#define MXC_W1_INTERRUPT 0x0A +#define MXC_W1_INTERRUPT_EN 0x0C + +struct mxc_w1_device { + void __iomem *regs; + unsigned int clkdiv; + struct clk *clk; + struct w1_bus_master bus_master; +}; + +/* + * this is the low level routine to + * reset the device on the One Wire interface + * on the hardware + */ +static u8 mxc_w1_ds2_reset_bus(void *data) +{ + u8 reg_val; + unsigned int timeout_cnt = 0; + struct mxc_w1_device *dev = data; + + __raw_writeb(0x80, (dev->regs + MXC_W1_CONTROL)); + + while (1) { + reg_val = __raw_readb(dev->regs + MXC_W1_CONTROL); + + if (((reg_val >> 7) & 0x1) == 0 || + timeout_cnt > MXC_W1_RESET_TIMEOUT) + break; + else + timeout_cnt++; + + udelay(100); + } + return (reg_val >> 7) & 0x1; +} + +/* + * this is the low level routine to read/write a bit on the One Wire + * interface on the hardware. It does write 0 if parameter bit is set + * to 0, otherwise a write 1/read. + */ +static u8 mxc_w1_ds2_touch_bit(void *data, u8 bit) +{ + struct mxc_w1_device *mdev = data; + void __iomem *ctrl_addr = mdev->regs + MXC_W1_CONTROL; + unsigned int timeout_cnt = 400; /* Takes max. 120us according to + * datasheet. + */ + + __raw_writeb((1 << (5 - bit)), ctrl_addr); + + while (timeout_cnt--) { + if (!((__raw_readb(ctrl_addr) >> (5 - bit)) & 0x1)) + break; + + udelay(1); + } + + return ((__raw_readb(ctrl_addr)) >> 3) & 0x1; +} + +static int __init mxc_w1_probe(struct platform_device *pdev) +{ + struct mxc_w1_device *mdev; + struct resource *res; + int err = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + mdev = kzalloc(sizeof(struct mxc_w1_device), GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + mdev->clk = clk_get(&pdev->dev, "owire_clk"); + if (!mdev->clk) { + err = -ENODEV; + goto failed_clk; + } + + mdev->clkdiv = (clk_get_rate(mdev->clk) / 1000000) - 1; + + res = request_mem_region(res->start, resource_size(res), + "mxc_w1"); + if (!res) { + err = -EBUSY; + goto failed_req; + } + + mdev->regs = ioremap(res->start, resource_size(res)); + if (!mdev->regs) { + printk(KERN_ERR "Cannot map frame buffer registers\n"); + goto failed_ioremap; + } + + clk_enable(mdev->clk); + __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER); + + mdev->bus_master.data = mdev; + mdev->bus_master.reset_bus = mxc_w1_ds2_reset_bus; + mdev->bus_master.touch_bit = mxc_w1_ds2_touch_bit; + + err = w1_add_master_device(&mdev->bus_master); + + if (err) + goto failed_add; + + platform_set_drvdata(pdev, mdev); + return 0; + +failed_add: + iounmap(mdev->regs); +failed_ioremap: + release_mem_region(res->start, resource_size(res)); +failed_req: + clk_put(mdev->clk); +failed_clk: + kfree(mdev); + return err; +} + +/* + * disassociate the w1 device from the driver + */ +static int mxc_w1_remove(struct platform_device *pdev) +{ + struct mxc_w1_device *mdev = platform_get_drvdata(pdev); + struct resource *res; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + w1_remove_master_device(&mdev->bus_master); + + iounmap(mdev->regs); + release_mem_region(res->start, resource_size(res)); + clk_disable(mdev->clk); + clk_put(mdev->clk); + + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver mxc_w1_driver = { + .driver = { + .name = "mxc_w1", + }, + .probe = mxc_w1_probe, + .remove = mxc_w1_remove, +}; + +static int __init mxc_w1_init(void) +{ + return platform_driver_register(&mxc_w1_driver); +} + +static void mxc_w1_exit(void) +{ + platform_driver_unregister(&mxc_w1_driver); +} + +module_init(mxc_w1_init); +module_exit(mxc_w1_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Freescale Semiconductors Inc"); +MODULE_DESCRIPTION("Driver for One-Wire on MXC"); diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index 97304bd83ec9..d8a9709f3449 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h @@ -210,6 +210,7 @@ u8 w1_read_8(struct w1_master *); int w1_reset_bus(struct w1_master *); u8 w1_calc_crc8(u8 *, int); void w1_write_block(struct w1_master *, const u8 *, int); +void w1_touch_block(struct w1_master *, u8 *, int); u8 w1_read_block(struct w1_master *, u8 *, int); int w1_reset_select_slave(struct w1_slave *sl); void w1_next_pullup(struct w1_master *, int); diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 5139c25ca962..442bd8bbd4a5 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c @@ -238,7 +238,6 @@ EXPORT_SYMBOL_GPL(w1_read_8); * @param dev the master device * @param buf pointer to the data to write * @param len the number of bytes to write - * @return the byte read */ void w1_write_block(struct w1_master *dev, const u8 *buf, int len) { @@ -256,6 +255,31 @@ void w1_write_block(struct w1_master *dev, const u8 *buf, int len) EXPORT_SYMBOL_GPL(w1_write_block); /** + * Touches a series of bytes. + * + * @param dev the master device + * @param buf pointer to the data to write + * @param len the number of bytes to write + */ +void w1_touch_block(struct w1_master *dev, u8 *buf, int len) +{ + int i, j; + u8 tmp; + + for (i = 0; i < len; ++i) { + tmp = 0; + for (j = 0; j < 8; ++j) { + if (j == 7) + w1_pre_write(dev); + tmp |= w1_touch_bit(dev, (buf[i] >> j) & 0x1) << j; + } + + buf[i] = tmp; + } +} +EXPORT_SYMBOL_GPL(w1_touch_block); + +/** * Reads a series of bytes. * * @param dev the master device diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 65c5ebd0787e..fdf72851c574 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c @@ -47,21 +47,56 @@ void w1_netlink_send(struct w1_master *dev, struct w1_netlink_msg *msg) cn_netlink_send(m, 0, GFP_KERNEL); } -static int w1_process_command_master(struct w1_master *dev, struct cn_msg *msg, - struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) +static void w1_send_slave(struct w1_master *dev, u64 rn) +{ + struct cn_msg *msg = dev->priv; + struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); + struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); + int avail; + + avail = dev->priv_size - cmd->len; + + if (avail > 8) { + u64 *data = (void *)(cmd + 1) + cmd->len; + + *data = rn; + cmd->len += 8; + hdr->len += 8; + msg->len += 8; + return; + } + + msg->ack++; + cn_netlink_send(msg, 0, GFP_KERNEL); + + msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); + hdr->len = sizeof(struct w1_netlink_cmd); + cmd->len = 0; +} + +static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg, + unsigned int avail) { - dev_dbg(&dev->dev, "%s: %s: cmd=%02x, len=%u.\n", - __func__, dev->name, cmd->cmd, cmd->len); + struct w1_netlink_msg *hdr = (struct w1_netlink_msg *)(msg + 1); + struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); + int search_type = (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH; - if (cmd->cmd != W1_CMD_SEARCH && cmd->cmd != W1_CMD_ALARM_SEARCH) - return -EINVAL; + dev->priv = msg; + dev->priv_size = avail; + + w1_search_devices(dev, search_type, w1_send_slave); + + msg->ack = 0; + cn_netlink_send(msg, 0, GFP_KERNEL); + + dev->priv = NULL; + dev->priv_size = 0; - w1_search_process(dev, (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); return 0; } -static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, - struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) +static int w1_send_read_reply(struct cn_msg *msg, struct w1_netlink_msg *hdr, + struct w1_netlink_cmd *cmd) { void *data; struct w1_netlink_msg *h; @@ -85,7 +120,8 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, memcpy(c, cmd, sizeof(struct w1_netlink_cmd)); cm->ack = msg->seq+1; - cm->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd) + cmd->len; + cm->len = sizeof(struct w1_netlink_msg) + + sizeof(struct w1_netlink_cmd) + cmd->len; h->len = sizeof(struct w1_netlink_cmd) + cmd->len; @@ -98,36 +134,178 @@ static int w1_send_read_reply(struct w1_slave *sl, struct cn_msg *msg, return err; } -static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, +static int w1_process_command_io(struct w1_master *dev, struct cn_msg *msg, struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) { int err = 0; - dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", - __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, sl->reg_num.crc, - cmd->cmd, cmd->len); + switch (cmd->cmd) { + case W1_CMD_TOUCH: + w1_touch_block(dev, cmd->data, cmd->len); + w1_send_read_reply(msg, hdr, cmd); + break; + case W1_CMD_READ: + w1_read_block(dev, cmd->data, cmd->len); + w1_send_read_reply(msg, hdr, cmd); + break; + case W1_CMD_WRITE: + w1_write_block(dev, cmd->data, cmd->len); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + +static int w1_process_command_master(struct w1_master *dev, struct cn_msg *req_msg, + struct w1_netlink_msg *req_hdr, struct w1_netlink_cmd *req_cmd) +{ + int err = -EINVAL; + struct cn_msg *msg; + struct w1_netlink_msg *hdr; + struct w1_netlink_cmd *cmd; + + msg = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->id = req_msg->id; + msg->seq = req_msg->seq; + msg->ack = 0; + msg->len = sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd); + + hdr = (struct w1_netlink_msg *)(msg + 1); + cmd = (struct w1_netlink_cmd *)(hdr + 1); + + hdr->type = W1_MASTER_CMD; + hdr->id = req_hdr->id; + hdr->len = sizeof(struct w1_netlink_cmd); + + cmd->cmd = req_cmd->cmd; + cmd->len = 0; switch (cmd->cmd) { - case W1_CMD_READ: - w1_read_block(sl->master, cmd->data, cmd->len); - w1_send_read_reply(sl, msg, hdr, cmd); - break; - case W1_CMD_WRITE: - w1_write_block(sl->master, cmd->data, cmd->len); - break; - case W1_CMD_SEARCH: - case W1_CMD_ALARM_SEARCH: - w1_search_process(sl->master, - (cmd->cmd == W1_CMD_ALARM_SEARCH)?W1_ALARM_SEARCH:W1_SEARCH); - break; - default: - err = -1; - break; + case W1_CMD_SEARCH: + case W1_CMD_ALARM_SEARCH: + err = w1_process_search_command(dev, msg, + PAGE_SIZE - msg->len - sizeof(struct cn_msg)); + break; + case W1_CMD_READ: + case W1_CMD_WRITE: + case W1_CMD_TOUCH: + err = w1_process_command_io(dev, req_msg, req_hdr, req_cmd); + break; + case W1_CMD_RESET: + err = w1_reset_bus(dev); + break; + default: + err = -EINVAL; + break; } + kfree(msg); return err; } +static int w1_process_command_slave(struct w1_slave *sl, struct cn_msg *msg, + struct w1_netlink_msg *hdr, struct w1_netlink_cmd *cmd) +{ + dev_dbg(&sl->master->dev, "%s: %02x.%012llx.%02x: cmd=%02x, len=%u.\n", + __func__, sl->reg_num.family, (unsigned long long)sl->reg_num.id, + sl->reg_num.crc, cmd->cmd, cmd->len); + + return w1_process_command_io(sl->master, msg, hdr, cmd); +} + +static int w1_process_command_root(struct cn_msg *msg, struct w1_netlink_msg *mcmd) +{ + struct w1_master *m; + struct cn_msg *cn; + struct w1_netlink_msg *w; + u32 *id; + + if (mcmd->type != W1_LIST_MASTERS) { + printk(KERN_NOTICE "%s: msg: %x.%x, wrong type: %u, len: %u.\n", + __func__, msg->id.idx, msg->id.val, mcmd->type, mcmd->len); + return -EPROTO; + } + + cn = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!cn) + return -ENOMEM; + + cn->id.idx = CN_W1_IDX; + cn->id.val = CN_W1_VAL; + + cn->seq = msg->seq; + cn->ack = 1; + cn->len = sizeof(struct w1_netlink_msg); + w = (struct w1_netlink_msg *)(cn + 1); + + w->type = W1_LIST_MASTERS; + w->status = 0; + w->len = 0; + id = (u32 *)(w + 1); + + mutex_lock(&w1_mlock); + list_for_each_entry(m, &w1_masters, w1_master_entry) { + if (cn->len + sizeof(*id) > PAGE_SIZE - sizeof(struct cn_msg)) { + cn_netlink_send(cn, 0, GFP_KERNEL); + cn->ack++; + cn->len = sizeof(struct w1_netlink_msg); + w->len = 0; + id = (u32 *)(w + 1); + } + + *id = m->id; + w->len += sizeof(*id); + cn->len += sizeof(*id); + id++; + } + cn->ack = 0; + cn_netlink_send(cn, 0, GFP_KERNEL); + mutex_unlock(&w1_mlock); + + kfree(cn); + return 0; +} + +static int w1_netlink_send_error(struct cn_msg *rcmsg, struct w1_netlink_msg *rmsg, + struct w1_netlink_cmd *rcmd, int error) +{ + struct cn_msg *cmsg; + struct w1_netlink_msg *msg; + struct w1_netlink_cmd *cmd; + + cmsg = kzalloc(sizeof(*msg) + sizeof(*cmd) + sizeof(*cmsg), GFP_KERNEL); + if (!cmsg) + return -ENOMEM; + + msg = (struct w1_netlink_msg *)(cmsg + 1); + cmd = (struct w1_netlink_cmd *)(msg + 1); + + memcpy(cmsg, rcmsg, sizeof(*cmsg)); + cmsg->len = sizeof(*msg); + + memcpy(msg, rmsg, sizeof(*msg)); + msg->len = 0; + msg->status = (short)-error; + + if (rcmd) { + memcpy(cmd, rcmd, sizeof(*cmd)); + cmd->len = 0; + msg->len += sizeof(*cmd); + cmsg->len += sizeof(*cmd); + } + + error = cn_netlink_send(cmsg, 0, GFP_KERNEL); + kfree(cmsg); + + return error; +} + static void w1_cn_callback(void *data) { struct cn_msg *msg = data; @@ -144,6 +322,7 @@ static void w1_cn_callback(void *data) dev = NULL; sl = NULL; + cmd = NULL; memcpy(&id, m->id.id, sizeof(id)); #if 0 @@ -155,15 +334,15 @@ static void w1_cn_callback(void *data) break; } - if (!mlen) - goto out_cont; - if (m->type == W1_MASTER_CMD) { dev = w1_search_master_id(m->id.mst.id); } else if (m->type == W1_SLAVE_CMD) { sl = w1_search_slave(&id); if (sl) dev = sl->master; + } else { + err = w1_process_command_root(msg, m); + goto out_cont; } if (!dev) { @@ -171,6 +350,10 @@ static void w1_cn_callback(void *data) goto out_cont; } + err = 0; + if (!mlen) + goto out_cont; + mutex_lock(&dev->mutex); if (sl && w1_reset_select_slave(sl)) { @@ -187,9 +370,12 @@ static void w1_cn_callback(void *data) } if (sl) - w1_process_command_slave(sl, msg, m, cmd); + err = w1_process_command_slave(sl, msg, m, cmd); else - w1_process_command_master(dev, msg, m, cmd); + err = w1_process_command_master(dev, msg, m, cmd); + + w1_netlink_send_error(msg, m, cmd, err); + err = 0; cmd_data += cmd->len + sizeof(struct w1_netlink_cmd); mlen -= cmd->len + sizeof(struct w1_netlink_cmd); @@ -200,6 +386,8 @@ out_up: atomic_dec(&sl->refcnt); mutex_unlock(&dev->mutex); out_cont: + if (!cmd || err) + w1_netlink_send_error(msg, m, cmd, err); msg->len -= sizeof(struct w1_netlink_msg) + m->len; m = (struct w1_netlink_msg *)(((u8 *)m) + sizeof(struct w1_netlink_msg) + m->len); @@ -209,11 +397,6 @@ out_cont: if (err == -ENODEV) err = 0; } -#if 0 - if (err) { - printk("%s: malformed message. Dropping.\n", __func__); - } -#endif } int w1_init_netlink(void) diff --git a/drivers/w1/w1_netlink.h b/drivers/w1/w1_netlink.h index 56122b9e9294..27e950f935b1 100644 --- a/drivers/w1/w1_netlink.h +++ b/drivers/w1/w1_netlink.h @@ -34,12 +34,13 @@ enum w1_netlink_message_types { W1_MASTER_REMOVE, W1_MASTER_CMD, W1_SLAVE_CMD, + W1_LIST_MASTERS, }; struct w1_netlink_msg { __u8 type; - __u8 reserved; + __u8 status; __u16 len; union { __u8 id[8]; @@ -51,10 +52,15 @@ struct w1_netlink_msg __u8 data[0]; }; -#define W1_CMD_READ 0x0 -#define W1_CMD_WRITE 0x1 -#define W1_CMD_SEARCH 0x2 -#define W1_CMD_ALARM_SEARCH 0x3 +enum w1_commands { + W1_CMD_READ = 0, + W1_CMD_WRITE, + W1_CMD_SEARCH, + W1_CMD_ALARM_SEARCH, + W1_CMD_TOUCH, + W1_CMD_RESET, + W1_CMD_MAX, +}; struct w1_netlink_cmd { diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 4b75a16de009..526187c8a12d 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -17,3 +17,27 @@ config XEN_SCRUB_PAGES is not accidentally visible to other domains. Is it more secure, but slightly less efficient. If in doubt, say yes. + +config XENFS + tristate "Xen filesystem" + depends on XEN + default y + help + The xen filesystem provides a way for domains to share + information with each other and with the hypervisor. + For example, by reading and writing the "xenbus" file, guests + may pass arbitrary information to the initial domain. + If in doubt, say yes. + +config XEN_COMPAT_XENFS + bool "Create compatibility mount point /proc/xen" + depends on XENFS + default y + help + The old xenstore userspace tools expect to find "xenbus" + under /proc/xen, but "xenbus" is now found at the root of the + xenfs filesystem. Selecting this causes the kernel to create + the compatibilty mount point /proc/xen if it is running on + a xen platform. + If in doubt, say yes. + diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index d2a8fdf0e191..ff8accc9e103 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -1,5 +1,7 @@ obj-y += grant-table.o features.o events.o manage.o obj-y += xenbus/ + obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o obj-$(CONFIG_XEN_XENCOMM) += xencomm.o obj-$(CONFIG_XEN_BALLOON) += balloon.o +obj-$(CONFIG_XENFS) += xenfs/
\ No newline at end of file diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index 9678b3e98c63..92a1ef80a288 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c @@ -136,7 +136,6 @@ EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); /** * xenbus_switch_state * @dev: xenbus device - * @xbt: transaction handle * @state: new state * * Advertise in the store a change of the given driver to the given new_state. @@ -267,7 +266,7 @@ EXPORT_SYMBOL_GPL(xenbus_dev_error); * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by - * xenbus_switch_state(dev, NULL, XenbusStateClosing) to schedule an orderly + * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index b2a03184a246..773d1cf23283 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -40,6 +40,7 @@ #include <linux/ctype.h> #include <linux/fcntl.h> #include <linux/mm.h> +#include <linux/proc_fs.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> @@ -55,7 +56,10 @@ #include "xenbus_comms.h" #include "xenbus_probe.h" + int xen_store_evtchn; +EXPORT_SYMBOL(xen_store_evtchn); + struct xenstore_domain_interface *xen_store_interface; static unsigned long xen_store_mfn; @@ -166,6 +170,9 @@ static int read_backend_details(struct xenbus_device *xendev) return read_otherend_details(xendev, "backend-id", "backend"); } +static struct device_attribute xenbus_dev_attrs[] = { + __ATTR_NULL +}; /* Bus type for frontend drivers. */ static struct xen_bus_type xenbus_frontend = { @@ -174,12 +181,13 @@ static struct xen_bus_type xenbus_frontend = { .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .bus = { - .name = "xen", - .match = xenbus_match, - .uevent = xenbus_uevent, - .probe = xenbus_dev_probe, - .remove = xenbus_dev_remove, - .shutdown = xenbus_dev_shutdown, + .name = "xen", + .match = xenbus_match, + .uevent = xenbus_uevent, + .probe = xenbus_dev_probe, + .remove = xenbus_dev_remove, + .shutdown = xenbus_dev_shutdown, + .dev_attrs = xenbus_dev_attrs, }, }; @@ -852,6 +860,14 @@ static int __init xenbus_probe_init(void) if (!xen_initial_domain()) xenbus_probe(NULL); +#ifdef CONFIG_XEN_COMPAT_XENFS + /* + * Create xenfs mountpoint in /proc for compatibility with + * utilities that expect to find "xenbus" under "/proc/xen". + */ + proc_mkdir("xen", NULL); +#endif + return 0; out_unreg_back: diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c index 7f2f91c0e11d..e325eab4724d 100644 --- a/drivers/xen/xenbus/xenbus_xs.c +++ b/drivers/xen/xenbus/xenbus_xs.c @@ -184,6 +184,7 @@ void *xenbus_dev_request_and_reply(struct xsd_sockmsg *msg) return ret; } +EXPORT_SYMBOL(xenbus_dev_request_and_reply); /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ static void *xs_talkv(struct xenbus_transaction t, diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile new file mode 100644 index 000000000000..25275c3bbdff --- /dev/null +++ b/drivers/xen/xenfs/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_XENFS) += xenfs.o + +xenfs-objs = super.o xenbus.o
\ No newline at end of file diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c new file mode 100644 index 000000000000..515741a8e6b8 --- /dev/null +++ b/drivers/xen/xenfs/super.c @@ -0,0 +1,64 @@ +/* + * xenfs.c - a filesystem for passing info between the a domain and + * the hypervisor. + * + * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem + * and /proc/xen compatibility mount point. + * Turned xenfs into a loadable module. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/magic.h> + +#include "xenfs.h" + +#include <asm/xen/hypervisor.h> + +MODULE_DESCRIPTION("Xen filesystem"); +MODULE_LICENSE("GPL"); + +static int xenfs_fill_super(struct super_block *sb, void *data, int silent) +{ + static struct tree_descr xenfs_files[] = { + [2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR}, + {""}, + }; + + return simple_fill_super(sb, XENFS_SUPER_MAGIC, xenfs_files); +} + +static int xenfs_get_sb(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data, struct vfsmount *mnt) +{ + return get_sb_single(fs_type, flags, data, xenfs_fill_super, mnt); +} + +static struct file_system_type xenfs_type = { + .owner = THIS_MODULE, + .name = "xenfs", + .get_sb = xenfs_get_sb, + .kill_sb = kill_litter_super, +}; + +static int __init xenfs_init(void) +{ + if (xen_pv_domain()) + return register_filesystem(&xenfs_type); + + printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n"); + return 0; +} + +static void __exit xenfs_exit(void) +{ + if (xen_pv_domain()) + unregister_filesystem(&xenfs_type); +} + +module_init(xenfs_init); +module_exit(xenfs_exit); + diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c new file mode 100644 index 000000000000..875a4c59c594 --- /dev/null +++ b/drivers/xen/xenfs/xenbus.c @@ -0,0 +1,593 @@ +/* + * Driver giving user-space access to the kernel's xenbus connection + * to xenstore. + * + * Copyright (c) 2005, Christian Limpach + * Copyright (c) 2005, Rusty Russell, IBM Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation; or, when distributed + * separately from the Linux kernel or incorporated into other + * software packages, subject to the following license: + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this source file (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, modify, + * merge, publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Changes: + * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem + * and /proc/xen compatibility mount point. + * Turned xenfs into a loadable module. + */ + +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/uio.h> +#include <linux/notifier.h> +#include <linux/wait.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/mutex.h> +#include <linux/spinlock.h> +#include <linux/mount.h> +#include <linux/pagemap.h> +#include <linux/uaccess.h> +#include <linux/init.h> +#include <linux/namei.h> +#include <linux/string.h> + +#include "xenfs.h" +#include "../xenbus/xenbus_comms.h" + +#include <xen/xenbus.h> +#include <asm/xen/hypervisor.h> + +/* + * An element of a list of outstanding transactions, for which we're + * still waiting a reply. + */ +struct xenbus_transaction_holder { + struct list_head list; + struct xenbus_transaction handle; +}; + +/* + * A buffer of data on the queue. + */ +struct read_buffer { + struct list_head list; + unsigned int cons; + unsigned int len; + char msg[]; +}; + +struct xenbus_file_priv { + /* + * msgbuffer_mutex is held while partial requests are built up + * and complete requests are acted on. It therefore protects + * the "transactions" and "watches" lists, and the partial + * request length and buffer. + * + * reply_mutex protects the reply being built up to return to + * usermode. It nests inside msgbuffer_mutex but may be held + * alone during a watch callback. + */ + struct mutex msgbuffer_mutex; + + /* In-progress transactions */ + struct list_head transactions; + + /* Active watches. */ + struct list_head watches; + + /* Partial request. */ + unsigned int len; + union { + struct xsd_sockmsg msg; + char buffer[PAGE_SIZE]; + } u; + + /* Response queue. */ + struct mutex reply_mutex; + struct list_head read_buffers; + wait_queue_head_t read_waitq; + +}; + +/* Read out any raw xenbus messages queued up. */ +static ssize_t xenbus_file_read(struct file *filp, + char __user *ubuf, + size_t len, loff_t *ppos) +{ + struct xenbus_file_priv *u = filp->private_data; + struct read_buffer *rb; + unsigned i; + int ret; + + mutex_lock(&u->reply_mutex); + while (list_empty(&u->read_buffers)) { + mutex_unlock(&u->reply_mutex); + ret = wait_event_interruptible(u->read_waitq, + !list_empty(&u->read_buffers)); + if (ret) + return ret; + mutex_lock(&u->reply_mutex); + } + + rb = list_entry(u->read_buffers.next, struct read_buffer, list); + i = 0; + while (i < len) { + unsigned sz = min((unsigned)len - i, rb->len - rb->cons); + + ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); + + i += sz - ret; + rb->cons += sz - ret; + + if (ret != sz) { + if (i == 0) + i = -EFAULT; + goto out; + } + + /* Clear out buffer if it has been consumed */ + if (rb->cons == rb->len) { + list_del(&rb->list); + kfree(rb); + if (list_empty(&u->read_buffers)) + break; + rb = list_entry(u->read_buffers.next, + struct read_buffer, list); + } + } + +out: + mutex_unlock(&u->reply_mutex); + return i; +} + +/* + * Add a buffer to the queue. Caller must hold the appropriate lock + * if the queue is not local. (Commonly the caller will build up + * multiple queued buffers on a temporary local list, and then add it + * to the appropriate list under lock once all the buffers have een + * successfully allocated.) + */ +static int queue_reply(struct list_head *queue, const void *data, size_t len) +{ + struct read_buffer *rb; + + if (len == 0) + return 0; + + rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); + if (rb == NULL) + return -ENOMEM; + + rb->cons = 0; + rb->len = len; + + memcpy(rb->msg, data, len); + + list_add_tail(&rb->list, queue); + return 0; +} + +/* + * Free all the read_buffer s on a list. + * Caller must have sole reference to list. + */ +static void queue_cleanup(struct list_head *list) +{ + struct read_buffer *rb; + + while (!list_empty(list)) { + rb = list_entry(list->next, struct read_buffer, list); + list_del(list->next); + kfree(rb); + } +} + +struct watch_adapter { + struct list_head list; + struct xenbus_watch watch; + struct xenbus_file_priv *dev_data; + char *token; +}; + +static void free_watch_adapter(struct watch_adapter *watch) +{ + kfree(watch->watch.node); + kfree(watch->token); + kfree(watch); +} + +static struct watch_adapter *alloc_watch_adapter(const char *path, + const char *token) +{ + struct watch_adapter *watch; + + watch = kzalloc(sizeof(*watch), GFP_KERNEL); + if (watch == NULL) + goto out_fail; + + watch->watch.node = kstrdup(path, GFP_KERNEL); + if (watch->watch.node == NULL) + goto out_free; + + watch->token = kstrdup(token, GFP_KERNEL); + if (watch->token == NULL) + goto out_free; + + return watch; + +out_free: + free_watch_adapter(watch); + +out_fail: + return NULL; +} + +static void watch_fired(struct xenbus_watch *watch, + const char **vec, + unsigned int len) +{ + struct watch_adapter *adap; + struct xsd_sockmsg hdr; + const char *path, *token; + int path_len, tok_len, body_len, data_len = 0; + int ret; + LIST_HEAD(staging_q); + + adap = container_of(watch, struct watch_adapter, watch); + + path = vec[XS_WATCH_PATH]; + token = adap->token; + + path_len = strlen(path) + 1; + tok_len = strlen(token) + 1; + if (len > 2) + data_len = vec[len] - vec[2] + 1; + body_len = path_len + tok_len + data_len; + + hdr.type = XS_WATCH_EVENT; + hdr.len = body_len; + + mutex_lock(&adap->dev_data->reply_mutex); + + ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); + if (!ret) + ret = queue_reply(&staging_q, path, path_len); + if (!ret) + ret = queue_reply(&staging_q, token, tok_len); + if (!ret && len > 2) + ret = queue_reply(&staging_q, vec[2], data_len); + + if (!ret) { + /* success: pass reply list onto watcher */ + list_splice_tail(&staging_q, &adap->dev_data->read_buffers); + wake_up(&adap->dev_data->read_waitq); + } else + queue_cleanup(&staging_q); + + mutex_unlock(&adap->dev_data->reply_mutex); +} + +static int xenbus_write_transaction(unsigned msg_type, + struct xenbus_file_priv *u) +{ + int rc, ret; + void *reply; + struct xenbus_transaction_holder *trans = NULL; + LIST_HEAD(staging_q); + + if (msg_type == XS_TRANSACTION_START) { + trans = kmalloc(sizeof(*trans), GFP_KERNEL); + if (!trans) { + rc = -ENOMEM; + goto out; + } + } + + reply = xenbus_dev_request_and_reply(&u->u.msg); + if (IS_ERR(reply)) { + kfree(trans); + rc = PTR_ERR(reply); + goto out; + } + + if (msg_type == XS_TRANSACTION_START) { + trans->handle.id = simple_strtoul(reply, NULL, 0); + + list_add(&trans->list, &u->transactions); + } else if (msg_type == XS_TRANSACTION_END) { + list_for_each_entry(trans, &u->transactions, list) + if (trans->handle.id == u->u.msg.tx_id) + break; + BUG_ON(&trans->list == &u->transactions); + list_del(&trans->list); + + kfree(trans); + } + + mutex_lock(&u->reply_mutex); + ret = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg)); + if (!ret) + ret = queue_reply(&staging_q, reply, u->u.msg.len); + if (!ret) { + list_splice_tail(&staging_q, &u->read_buffers); + wake_up(&u->read_waitq); + } else { + queue_cleanup(&staging_q); + rc = ret; + } + mutex_unlock(&u->reply_mutex); + + kfree(reply); + +out: + return rc; +} + +static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) +{ + struct watch_adapter *watch, *tmp_watch; + char *path, *token; + int err, rc; + LIST_HEAD(staging_q); + + path = u->u.buffer + sizeof(u->u.msg); + token = memchr(path, 0, u->u.msg.len); + if (token == NULL) { + rc = -EILSEQ; + goto out; + } + token++; + + if (msg_type == XS_WATCH) { + watch = alloc_watch_adapter(path, token); + if (watch == NULL) { + rc = -ENOMEM; + goto out; + } + + watch->watch.callback = watch_fired; + watch->dev_data = u; + + err = register_xenbus_watch(&watch->watch); + if (err) { + free_watch_adapter(watch); + rc = err; + goto out; + } + list_add(&watch->list, &u->watches); + } else { + list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { + if (!strcmp(watch->token, token) && + !strcmp(watch->watch.node, path)) { + unregister_xenbus_watch(&watch->watch); + list_del(&watch->list); + free_watch_adapter(watch); + break; + } + } + } + + /* Success. Synthesize a reply to say all is OK. */ + { + struct { + struct xsd_sockmsg hdr; + char body[3]; + } __packed reply = { + { + .type = msg_type, + .len = sizeof(reply.body) + }, + "OK" + }; + + mutex_lock(&u->reply_mutex); + rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); + mutex_unlock(&u->reply_mutex); + } + +out: + return rc; +} + +static ssize_t xenbus_file_write(struct file *filp, + const char __user *ubuf, + size_t len, loff_t *ppos) +{ + struct xenbus_file_priv *u = filp->private_data; + uint32_t msg_type; + int rc = len; + int ret; + LIST_HEAD(staging_q); + + /* + * We're expecting usermode to be writing properly formed + * xenbus messages. If they write an incomplete message we + * buffer it up. Once it is complete, we act on it. + */ + + /* + * Make sure concurrent writers can't stomp all over each + * other's messages and make a mess of our partial message + * buffer. We don't make any attemppt to stop multiple + * writers from making a mess of each other's incomplete + * messages; we're just trying to guarantee our own internal + * consistency and make sure that single writes are handled + * atomically. + */ + mutex_lock(&u->msgbuffer_mutex); + + /* Get this out of the way early to avoid confusion */ + if (len == 0) + goto out; + + /* Can't write a xenbus message larger we can buffer */ + if ((len + u->len) > sizeof(u->u.buffer)) { + /* On error, dump existing buffer */ + u->len = 0; + rc = -EINVAL; + goto out; + } + + ret = copy_from_user(u->u.buffer + u->len, ubuf, len); + + if (ret == len) { + rc = -EFAULT; + goto out; + } + + /* Deal with a partial copy. */ + len -= ret; + rc = len; + + u->len += len; + + /* Return if we haven't got a full message yet */ + if (u->len < sizeof(u->u.msg)) + goto out; /* not even the header yet */ + + /* If we're expecting a message that's larger than we can + possibly send, dump what we have and return an error. */ + if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { + rc = -E2BIG; + u->len = 0; + goto out; + } + + if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) + goto out; /* incomplete data portion */ + + /* + * OK, now we have a complete message. Do something with it. + */ + + msg_type = u->u.msg.type; + + switch (msg_type) { + case XS_TRANSACTION_START: + case XS_TRANSACTION_END: + case XS_DIRECTORY: + case XS_READ: + case XS_GET_PERMS: + case XS_RELEASE: + case XS_GET_DOMAIN_PATH: + case XS_WRITE: + case XS_MKDIR: + case XS_RM: + case XS_SET_PERMS: + /* Send out a transaction */ + ret = xenbus_write_transaction(msg_type, u); + break; + + case XS_WATCH: + case XS_UNWATCH: + /* (Un)Ask for some path to be watched for changes */ + ret = xenbus_write_watch(msg_type, u); + break; + + default: + ret = -EINVAL; + break; + } + if (ret != 0) + rc = ret; + + /* Buffered message consumed */ + u->len = 0; + + out: + mutex_unlock(&u->msgbuffer_mutex); + return rc; +} + +static int xenbus_file_open(struct inode *inode, struct file *filp) +{ + struct xenbus_file_priv *u; + + if (xen_store_evtchn == 0) + return -ENOENT; + + nonseekable_open(inode, filp); + + u = kzalloc(sizeof(*u), GFP_KERNEL); + if (u == NULL) + return -ENOMEM; + + INIT_LIST_HEAD(&u->transactions); + INIT_LIST_HEAD(&u->watches); + INIT_LIST_HEAD(&u->read_buffers); + init_waitqueue_head(&u->read_waitq); + + mutex_init(&u->reply_mutex); + mutex_init(&u->msgbuffer_mutex); + + filp->private_data = u; + + return 0; +} + +static int xenbus_file_release(struct inode *inode, struct file *filp) +{ + struct xenbus_file_priv *u = filp->private_data; + struct xenbus_transaction_holder *trans, *tmp; + struct watch_adapter *watch, *tmp_watch; + + /* + * No need for locking here because there are no other users, + * by definition. + */ + + list_for_each_entry_safe(trans, tmp, &u->transactions, list) { + xenbus_transaction_end(trans->handle, 1); + list_del(&trans->list); + kfree(trans); + } + + list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { + unregister_xenbus_watch(&watch->watch); + list_del(&watch->list); + free_watch_adapter(watch); + } + + kfree(u); + + return 0; +} + +static unsigned int xenbus_file_poll(struct file *file, poll_table *wait) +{ + struct xenbus_file_priv *u = file->private_data; + + poll_wait(file, &u->read_waitq, wait); + if (!list_empty(&u->read_buffers)) + return POLLIN | POLLRDNORM; + return 0; +} + +const struct file_operations xenbus_file_ops = { + .read = xenbus_file_read, + .write = xenbus_file_write, + .open = xenbus_file_open, + .release = xenbus_file_release, + .poll = xenbus_file_poll, +}; diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h new file mode 100644 index 000000000000..51f08b2d0bf1 --- /dev/null +++ b/drivers/xen/xenfs/xenfs.h @@ -0,0 +1,6 @@ +#ifndef _XENFS_XENBUS_H +#define _XENFS_XENBUS_H + +extern const struct file_operations xenbus_file_ops; + +#endif /* _XENFS_XENBUS_H */ |