diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-02-23 21:16:27 -0500 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-02-23 21:16:27 -0500 |
commit | 7b0386921db20add25afd8678ed34a9253e512fc (patch) | |
tree | 8c6b888b76211e38e6c1c3006553dc4b2b24c75e | |
parent | c5580a7ecb859c6821dd761c95fa150ec7695ae1 (diff) | |
parent | 22fe472cb430ce45c4fb9b6d13060dd724d6dbc8 (diff) |
Merge branch 'upstream-fixes'
120 files changed, 1499 insertions, 2190 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index e71bc6cbbc5e..57a09f99ecb0 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt @@ -46,10 +46,12 @@ maxcpus=n Restrict boot time cpus to n. Say if you have 4 cpus, using maxcpus=2 will only boot 2. You can choose to bring the other cpus later online, read FAQ's for more info. -additional_cpus=n [x86_64, s390 only] use this to limit hotpluggable cpus. - This option sets +additional_cpus*=n Use this to limit hotpluggable cpus. This option sets cpu_possible_map = cpu_present_map + additional_cpus +(*) Option valid only for following architectures +- x86_64, ia64, s390 + ia64 and x86_64 use the number of disabled local apics in ACPI tables MADT to determine the number of potentially hot-pluggable cpus. The implementation should only rely on this to count the #of cpus, but *MUST* not rely on the @@ -57,6 +59,9 @@ apicid values in those tables for disabled apics. In the event BIOS doesnt mark such hot-pluggable cpus as disabled entries, one could use this parameter "additional_cpus=x" to represent those cpus in the cpu_possible_map. +s390 uses the number of cpus it detects at IPL time to also the number of bits +in cpu_possible_map. If it is desired to add additional cpus at a later time +the number should be specified using this option or the possible_cpus option. possible_cpus=n [s390 only] use this to set hotpluggable cpus. This option sets possible_cpus bits in diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 79f0ed9f60de..975e14e150ae 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt @@ -178,3 +178,12 @@ Why: The ISA interface is faster and should be always available. The I2C probing is also known to cause trouble in at least one case (see bug #5889.) Who: Jean Delvare <khali@linux-fr.org> + +--------------------------- + +What: mount/umount uevents +When: February 2007 +Why: These events are not correct, and do not properly let userspace know + when a file system has been mounted or unmounted. Userspace should + poll the /proc/mounts file instead to detect this properly. +Who: Greg Kroah-Hartman <gregkh@suse.de> diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index dbe4d87d2615..8a155418c705 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt @@ -79,15 +79,18 @@ that instance in a system with many cpus making intensive use of it. tmpfs has a mount option to set the NUMA memory allocation policy for -all files in that instance: -mpol=interleave prefers to allocate memory from each node in turn -mpol=default prefers to allocate memory from the local node -mpol=bind prefers to allocate from mpol_nodelist -mpol=preferred prefers to allocate from first node in mpol_nodelist +all files in that instance (if CONFIG_NUMA is enabled) - which can be +adjusted on the fly via 'mount -o remount ...' -The following mount option is used in conjunction with mpol=interleave, -mpol=bind or mpol=preferred: -mpol_nodelist: nodelist suitable for parsing with nodelist_parse. +mpol=default prefers to allocate memory from the local node +mpol=prefer:Node prefers to allocate memory from the given Node +mpol=bind:NodeList allocates memory only from nodes in NodeList +mpol=interleave prefers to allocate from each node in turn +mpol=interleave:NodeList allocates from each node of NodeList in turn + +NodeList format is a comma-separated list of decimal numbers and ranges, +a range being two hyphen-separated decimal numbers, the smallest and +largest node numbers in the range. For example, mpol=bind:0-3,5,7,9-15 To specify the initial root directory you can use the following mount @@ -109,4 +112,4 @@ RAM/SWAP in 10240 inodes and it is only accessible by root. Author: Christoph Rohland <cr@sap.com>, 1.12.01 Updated: - Hugh Dickins <hugh@veritas.com>, 13 March 2005 + Hugh Dickins <hugh@veritas.com>, 19 February 2006 diff --git a/Documentation/filesystems/v9fs.txt b/Documentation/filesystems/v9fs.txt index 4e92feb6b507..24c7a9c41f0d 100644 --- a/Documentation/filesystems/v9fs.txt +++ b/Documentation/filesystems/v9fs.txt @@ -57,8 +57,6 @@ OPTIONS port=n port to connect to on the remote server - timeout=n request timeouts (in ms) (default 60000ms) - noextend force legacy mode (no 9P2000.u semantics) uid attempt to mount as a particular uid @@ -74,10 +72,16 @@ OPTIONS RESOURCES ========= -The Linux version of the 9P server, along with some client-side utilities -can be found at http://v9fs.sf.net (along with a CVS repository of the -development branch of this module). There are user and developer mailing -lists here, as well as a bug-tracker. +The Linux version of the 9P server is now maintained under the npfs project +on sourceforge (http://sourceforge.net/projects/npfs). + +There are user and developer mailing lists available through the v9fs project +on sourceforge (http://sourceforge.net/projects/v9fs). + +News and other information is maintained on SWiK (http://swik.net/v9fs). + +Bug reports may be issued through the kernel.org bugzilla +(http://bugzilla.kernel.org) For more information on the Plan 9 Operating System check out http://plan9.bell-labs.com/plan9 diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 9f11d36a8c10..b0c7ab93dcb9 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt @@ -16,6 +16,7 @@ before actually making adjustments. Currently, these files might (depending on your configuration) show up in /proc/sys/kernel: +- acpi_video_flags - acct - core_pattern - core_uses_pid @@ -57,6 +58,15 @@ show up in /proc/sys/kernel: ============================================================== +acpi_video_flags: + +flags + +See Doc*/kernel/power/video.txt, it allows mode of video boot to be +set during run time. + +============================================================== + acct: highwater lowwater frequency diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index 48b1e19b131f..e851d86c212c 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c @@ -128,19 +128,27 @@ EXPORT_SYMBOL(rtc_tm_to_time); /* * Calculate the next alarm time given the requested alarm time mask * and the current time. - * - * FIXME: for now, we just copy the alarm time because we're lazy (and - * is therefore buggy - setting a 10am alarm at 8pm will not result in - * the alarm triggering.) */ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) { + unsigned long next_time; + unsigned long now_time; + next->tm_year = now->tm_year; next->tm_mon = now->tm_mon; next->tm_mday = now->tm_mday; next->tm_hour = alrm->tm_hour; next->tm_min = alrm->tm_min; next->tm_sec = alrm->tm_sec; + + rtc_tm_to_time(now, &now_time); + rtc_tm_to_time(next, &next_time); + + if (next_time < now_time) { + /* Advance one day */ + next_time += 60 * 60 * 24; + rtc_time_to_tm(next_time, next); + } } static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 964cd717506b..ec48d70c6d8b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -566,7 +566,7 @@ ENTRY(__switch_to) ldr r6, [r2, #TI_CPU_DOMAIN]! #endif #if __LINUX_ARM_ARCH__ >= 6 -#ifdef CONFIG_CPU_MPCORE +#ifdef CONFIG_CPU_32v6K clrex #else strex r5, r4, [ip] @ Clear exclusive monitor diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 10235b01582e..03924bcc6129 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -19,6 +19,7 @@ #include <linux/personality.h> #include <linux/ptrace.h> #include <linux/kallsyms.h> +#include <linux/delay.h> #include <linux/init.h> #include <asm/atomic.h> @@ -231,6 +232,13 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) __die(str, err, thread, regs); bust_spinlocks(0); spin_unlock_irq(&die_lock); + + if (panic_on_oops) { + printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n"); + ssleep(5); + panic("Fatal exception"); + } + do_exit(SIGSEGV); } diff --git a/arch/arm/mach-at91rm9200/devices.c b/arch/arm/mach-at91rm9200/devices.c index 8df3e5245651..57eedd5beaf6 100644 --- a/arch/arm/mach-at91rm9200/devices.c +++ b/arch/arm/mach-at91rm9200/devices.c @@ -100,8 +100,10 @@ void __init at91_add_device_udc(struct at91_udc_data *data) at91_set_gpio_input(data->vbus_pin, 0); at91_set_deglitch(data->vbus_pin, 1); } - if (data->pullup_pin) + if (data->pullup_pin) { at91_set_gpio_output(data->pullup_pin, 0); + at91_set_multi_drive(data->pullup_pin, 1); + } udc_data = *data; platform_device_register(&at91rm9200_udc_device); diff --git a/arch/arm/mach-at91rm9200/gpio.c b/arch/arm/mach-at91rm9200/gpio.c index 2fd2ef583e4d..a9f718bf8ba8 100644 --- a/arch/arm/mach-at91rm9200/gpio.c +++ b/arch/arm/mach-at91rm9200/gpio.c @@ -159,6 +159,23 @@ int __init_or_module at91_set_deglitch(unsigned pin, int is_on) } EXPORT_SYMBOL(at91_set_deglitch); +/* + * enable/disable the multi-driver; This is only valid for output and + * allows the output pin to run as an open collector output. + */ +int __init_or_module at91_set_multi_drive(unsigned pin, int is_on) +{ + void __iomem *pio = pin_to_controller(pin); + unsigned mask = pin_to_mask(pin); + + if (!pio) + return -EINVAL; + + __raw_writel(mask, pio + (is_on ? PIO_MDER : PIO_MDDR)); + return 0; +} +EXPORT_SYMBOL(at91_set_multi_drive); + /*--------------------------------------------------------------------------*/ diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 4bdc9d4526cd..fbadf3021b9e 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -111,24 +111,30 @@ static int ixp4xx_set_irq_type(unsigned int irq, unsigned int type) if (line < 0) return -EINVAL; - if (type & IRQT_BOTHEDGE) { + switch (type){ + case IRQT_BOTHEDGE: int_style = IXP4XX_GPIO_STYLE_TRANSITIONAL; irq_type = IXP4XX_IRQ_EDGE; - } else if (type & IRQT_RISING) { + break; + case IRQT_RISING: int_style = IXP4XX_GPIO_STYLE_RISING_EDGE; irq_type = IXP4XX_IRQ_EDGE; - } else if (type & IRQT_FALLING) { + break; + case IRQT_FALLING: int_style = IXP4XX_GPIO_STYLE_FALLING_EDGE; irq_type = IXP4XX_IRQ_EDGE; - } else if (type & IRQT_HIGH) { + break; + case IRQT_HIGH: int_style = IXP4XX_GPIO_STYLE_ACTIVE_HIGH; irq_type = IXP4XX_IRQ_LEVEL; - } else if (type & IRQT_LOW) { + break; + case IRQT_LOW: int_style = IXP4XX_GPIO_STYLE_ACTIVE_LOW; irq_type = IXP4XX_IRQ_LEVEL; - } else + break; + default: return -EINVAL; - + } ixp4xx_config_irq(irq, irq_type); if (line >= 8) { /* pins 8-15 */ diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c index b0ad9e901f6e..d80c362bc539 100644 --- a/arch/arm/mach-ixp4xx/nslu2-power.c +++ b/arch/arm/mach-ixp4xx/nslu2-power.c @@ -77,6 +77,9 @@ static int __init nslu2_power_init(void) static void __exit nslu2_power_exit(void) { + if (!(machine_is_nslu2())) + return; + free_irq(NSLU2_RB_IRQ, NULL); free_irq(NSLU2_PB_IRQ, NULL); } diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index f260a9d34f70..55411f21d838 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c @@ -50,6 +50,12 @@ static struct platform_device nslu2_i2c_controller = { .num_resources = 0, }; +static struct platform_device nslu2_beeper = { + .name = "ixp4xx-beeper", + .id = NSLU2_GPIO_BUZZ, + .num_resources = 0, +}; + static struct resource nslu2_uart_resources[] = { { .start = IXP4XX_UART1_BASE_PHYS, @@ -97,6 +103,7 @@ static struct platform_device *nslu2_devices[] __initdata = { &nslu2_i2c_controller, &nslu2_flash, &nslu2_uart, + &nslu2_beeper, }; static void nslu2_power_off(void) diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index b80d57d51699..722fbabc9cfb 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -240,6 +240,14 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) int i; int myslot = -1; unsigned long val; + void __iomem *local_pci_cfg_base; + + val = __raw_readl(SYS_PCICTL); + if (!(val & 1)) { + printk("Not plugged into PCI backplane!\n"); + ret = -EIO; + goto out; + } if (nr == 0) { sys->mem_offset = 0; @@ -253,48 +261,45 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys) goto out; } - __raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28,PCI_IMAP0); - __raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28,PCI_IMAP1); - __raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28,PCI_IMAP2); - - __raw_writel(1, SYS_PCICTL); - - val = __raw_readl(SYS_PCICTL); - if (!(val & 1)) { - printk("Not plugged into PCI backplane!\n"); - ret = -EIO; - goto out; - } - /* * We need to discover the PCI core first to configure itself * before the main PCI probing is performed */ - for (i=0; i<32; i++) { + for (i=0; i<32; i++) if ((__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+DEVICE_ID_OFFSET) == VP_PCI_DEVICE_ID) && (__raw_readl(VERSATILE_PCI_VIRT_BASE+(i<<11)+CLASS_ID_OFFSET) == VP_PCI_CLASS_ID)) { myslot = i; - - __raw_writel(myslot, PCI_SELFID); - val = __raw_readl(VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET); - val |= (1<<2); - __raw_writel(val, VERSATILE_PCI_CFG_VIRT_BASE+(myslot<<11)+CSR_OFFSET); break; } - } if (myslot == -1) { printk("Cannot find PCI core!\n"); ret = -EIO; - } else { - printk("PCI core found (slot %d)\n",myslot); - /* Do not to map Versatile FPGA PCI device - into memory space as we are short of - mappable memory */ - pci_slot_ignore |= (1 << myslot); - ret = 1; + goto out; } + printk("PCI core found (slot %d)\n",myslot); + + __raw_writel(myslot, PCI_SELFID); + local_pci_cfg_base = (void *) VERSATILE_PCI_CFG_VIRT_BASE + (myslot << 11); + + val = __raw_readl(local_pci_cfg_base + CSR_OFFSET); + val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; + __raw_writel(val, local_pci_cfg_base + CSR_OFFSET); + + /* + * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM + */ + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0); + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1); + __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); + + /* + * Do not to map Versatile FPGA PCI device into memory space + */ + pci_slot_ignore |= (1 << myslot); + ret = 1; + out: return ret; } @@ -305,18 +310,18 @@ struct pci_bus *pci_versatile_scan_bus(int nr, struct pci_sys_data *sys) return pci_scan_bus(sys->busnr, &pci_versatile_ops, sys); } -/* - * V3_LB_BASE? - local bus address - * V3_LB_MAP? - pci bus address - */ void __init pci_versatile_preinit(void) { -} + __raw_writel(VERSATILE_PCI_MEM_BASE0 >> 28, PCI_IMAP0); + __raw_writel(VERSATILE_PCI_MEM_BASE1 >> 28, PCI_IMAP1); + __raw_writel(VERSATILE_PCI_MEM_BASE2 >> 28, PCI_IMAP2); -void __init pci_versatile_postinit(void) -{ -} + __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP0); + __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP1); + __raw_writel(PHYS_OFFSET >> 28, PCI_SMAP2); + __raw_writel(1, SYS_PCICTL); +} /* * map the specified device/slot/pin to an IRQ. Different backplanes may need to modify this. @@ -326,16 +331,15 @@ static int __init versatile_map_irq(struct pci_dev *dev, u8 slot, u8 pin) int irq; int devslot = PCI_SLOT(dev->devfn); - /* slot, pin, irq - 24 1 27 - 25 1 28 untested - 26 1 29 - 27 1 30 untested - */ - - irq = 27 + ((slot + pin + 2) % 3); /* Fudged */ + /* slot, pin, irq + * 24 1 27 + * 25 1 28 + * 26 1 29 + * 27 1 30 + */ + irq = 27 + ((slot + pin - 1) & 3); - printk("map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq); + printk("PCI map irq: slot %d, pin %d, devslot %d, irq: %d\n",slot,pin,devslot,irq); return irq; } @@ -347,7 +351,6 @@ static struct hw_pci versatile_pci __initdata = { .setup = pci_versatile_setup, .scan = pci_versatile_scan_bus, .preinit = pci_versatile_preinit, - .postinit = pci_versatile_postinit, }; static int __init versatile_pci_init(void) diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index dbd346033122..8a7f65ba14b7 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -20,7 +20,7 @@ */ .align 5 ENTRY(v6_early_abort) -#ifdef CONFIG_CPU_MPCORE +#ifdef CONFIG_CPU_32v6K clrex #else strex r0, r1, [sp] @ Clear the exclusive monitor diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types index d0f9bb5e9023..8ab5300dcb94 100644 --- a/arch/arm/tools/mach-types +++ b/arch/arm/tools/mach-types @@ -12,7 +12,7 @@ # # http://www.arm.linux.org.uk/developer/machines/?action=new # -# Last update: Mon Jan 9 12:56:42 2006 +# Last update: Mon Feb 20 10:18:02 2006 # # machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number # @@ -904,7 +904,7 @@ wg302v2 MACH_WG302V2 WG302V2 890 eb42x MACH_EB42X EB42X 891 iq331es MACH_IQ331ES IQ331ES 892 cosydsp MACH_COSYDSP COSYDSP 893 -uplat7d MACH_UPLAT7D UPLAT7D 894 +uplat7d_proto MACH_UPLAT7D UPLAT7D 894 ptdavinci MACH_PTDAVINCI PTDAVINCI 895 mbus MACH_MBUS MBUS 896 nadia2vb MACH_NADIA2VB NADIA2VB 897 @@ -938,3 +938,34 @@ auckland MACH_AUCKLAND AUCKLAND 924 ak3220m MACH_AK3320M AK3320M 925 duramax MACH_DURAMAX DURAMAX 926 n35 MACH_N35 N35 927 +pronghorn MACH_PRONGHORN PRONGHORN 928 +fundy MACH_FUNDY FUNDY 929 +logicpd_pxa270 MACH_LOGICPD_PXA270 LOGICPD_PXA270 930 +cpu777 MACH_CPU777 CPU777 931 +simicon9201 MACH_SIMICON9201 SIMICON9201 932 +leap2_hpm MACH_LEAP2_HPM LEAP2_HPM 933 +cm922txa10 MACH_CM922TXA10 CM922TXA10 934 +sandgate MACH_PXA PXA 935 +sandgate2 MACH_SANDGATE2 SANDGATE2 936 +sandgate2g MACH_SANDGATE2G SANDGATE2G 937 +sandgate2p MACH_SANDGATE2P SANDGATE2P 938 +fred_jack MACH_FRED_JACK FRED_JACK 939 +ttg_color1 MACH_TTG_COLOR1 TTG_COLOR1 940 +nxeb500hmi MACH_NXEB500HMI NXEB500HMI 941 +netdcu8 MACH_NETDCU8 NETDCU8 942 +ml675050_cpu_boa MACH_ML675050_CPU_BOA ML675050_CPU_BOA 943 +ng_fvx538 MACH_NG_FVX538 NG_FVX538 944 +ng_fvs338 MACH_NG_FVS338 NG_FVS338 945 +pnx4103 MACH_PNX4103 PNX4103 946 +hesdb MACH_HESDB HESDB 947 +xsilo MACH_XSILO XSILO 948 +espresso MACH_ESPRESSO ESPRESSO 949 +emlc MACH_EMLC EMLC 950 +sisteron MACH_SISTERON SISTERON 951 +rx1950 MACH_RX1950 RX1950 952 +tsc_venus MACH_TSC_VENUS TSC_VENUS 953 +ds101j MACH_DS101J DS101J 954 +mxc300_30ads MACH_MXC30030ADS MXC30030ADS 955 +fujitsu_wimaxsoc MACH_FUJITSU_WIMAXSOC FUJITSU_WIMAXSOC 956 +dualpcmodem MACH_DUALPCMODEM DUALPCMODEM 957 +gesbc9312 MACH_GESBC9312 GESBC9312 958 diff --git a/arch/h8300/Kconfig.debug b/arch/h8300/Kconfig.debug index 55034d08abff..e0e9bcb015a9 100644 --- a/arch/h8300/Kconfig.debug +++ b/arch/h8300/Kconfig.debug @@ -34,7 +34,7 @@ config GDB_DEBUG help gdb stub exception support -config CONFIG_SH_STANDARD_BIOS +config SH_STANDARD_BIOS bool "Use gdb protocol serial console" depends on (!H8300H_SIM && !H8S_SIM) help diff --git a/arch/h8300/defconfig b/arch/h8300/defconfig index 9d9b491cfc2c..8f1ec3297150 100644 --- a/arch/h8300/defconfig +++ b/arch/h8300/defconfig @@ -328,7 +328,7 @@ CONFIG_FULLDEBUG=y CONFIG_NO_KERNEL_MSG=y # CONFIG_SYSCALL_PRINT is not set # CONFIG_GDB_DEBUG is not set -# CONFIG_CONFIG_SH_STANDARD_BIOS is not set +# CONFIG_SH_STANDARD_BIOS is not set # CONFIG_DEFAULT_CMDLINE is not set # CONFIG_BLKDEV_RESERVE is not set diff --git a/arch/i386/kernel/mpparse.c b/arch/i386/kernel/mpparse.c index 0102f3d50e57..e7609abf3796 100644 --- a/arch/i386/kernel/mpparse.c +++ b/arch/i386/kernel/mpparse.c @@ -710,7 +710,7 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc((void *)mpf->mpf_physptr)) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index fe55b28d3725..670cb49210af 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c @@ -29,28 +29,7 @@ /* * sys_tas() - test-and-set - * linuxthreads testing version */ -#ifndef CONFIG_SMP -asmlinkage int sys_tas(int *addr) -{ - int oldval; - unsigned long flags; - - if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) - return -EFAULT; - local_irq_save(flags); - oldval = *addr; - if (!oldval) - *addr = 1; - local_irq_restore(flags); - return oldval; -} -#else /* CONFIG_SMP */ -#include <linux/spinlock.h> - -static DEFINE_SPINLOCK(tas_lock); - asmlinkage int sys_tas(int *addr) { int oldval; @@ -58,15 +37,43 @@ asmlinkage int sys_tas(int *addr) if (!access_ok(VERIFY_WRITE, addr, sizeof (int))) return -EFAULT; - _raw_spin_lock(&tas_lock); - oldval = *addr; - if (!oldval) - *addr = 1; - _raw_spin_unlock(&tas_lock); + /* atomic operation: + * oldval = *addr; *addr = 1; + */ + __asm__ __volatile__ ( + DCACHE_CLEAR("%0", "r4", "%1") + " .fillinsn\n" + "1:\n" + " lock %0, @%1 -> unlock %2, @%1\n" + "2:\n" + /* NOTE: + * The m32r processor can accept interrupts only + * at the 32-bit instruction boundary. + * So, in the above code, the "unlock" instruction + * can be executed continuously after the "lock" + * instruction execution without any interruptions. + */ + ".section .fixup,\"ax\"\n" + " .balign 4\n" + "3: ldi %0, #%3\n" + " seth r14, #high(2b)\n" + " or3 r14, r14, #low(2b)\n" + " jmp r14\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + " .balign 4\n" + " .long 1b,3b\n" + ".previous\n" + : "=&r" (oldval) + : "r" (addr), "r" (1), "i"(-EFAULT) + : "r14", "memory" +#ifdef CONFIG_CHIP_M32700_TS1 + , "r4" +#endif /* CONFIG_CHIP_M32700_TS1 */ + ); return oldval; } -#endif /* CONFIG_SMP */ /* * sys_pipe() is the normal C calling standard for creating diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c index 3f9cb55d0356..2d8ad0727b6b 100644 --- a/arch/m68k/kernel/process.c +++ b/arch/m68k/kernel/process.c @@ -129,6 +129,9 @@ void machine_power_off(void) for (;;); } +void (*pm_power_off)(void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); + void show_regs(struct pt_regs * regs) { printk("\n"); diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index e17d3adff021..58c22cd344d3 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.15-rc2 -# Thu Nov 24 01:06:21 2005 +# Linux kernel version: 2.6.16-rc4 +# Tue Feb 21 13:44:31 2006 # CONFIG_MIPS=y @@ -144,7 +144,6 @@ CONFIG_PREEMPT_BKL=y # Code maturity level options # CONFIG_EXPERIMENTAL=y -CONFIG_CLEAN_COMPILE=y CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 @@ -250,6 +249,7 @@ CONFIG_NET=y # # Networking options # +# CONFIG_NETDEBUG is not set CONFIG_PACKET=y CONFIG_PACKET_MMAP=y CONFIG_UNIX=y @@ -289,6 +289,7 @@ CONFIG_TCP_CONG_BIC=y # SCTP Configuration (EXPERIMENTAL) # # CONFIG_IP_SCTP is not set + # CONFIG_ATM is not set # CONFIG_BRIDGE is not set # CONFIG_VLAN_8021Q is not set @@ -448,7 +449,7 @@ CONFIG_SCSI_SAS_ATTRS=m # # SCSI low-level drivers # -CONFIG_ISCSI_TCP=m +# CONFIG_ISCSI_TCP is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set # CONFIG_SCSI_3W_9XXX is not set # CONFIG_SCSI_ACARD is not set @@ -774,6 +775,10 @@ CONFIG_USB_ARCH_HAS_OHCI=y # # +# EDAC - error detection and reporting (RAS) +# + +# # File systems # CONFIG_EXT2_FS=y diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index d8e2674a1543..4a9f1ecefaf2 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c @@ -103,8 +103,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec); - value->tv_usec /= NSEC_PER_USEC; + long rem; + value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; } #define ELF_CORE_EFLAGS EF_MIPS_ABI2 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index cec5f327e360..e31813779895 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c @@ -105,8 +105,9 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value) * one divide. */ u64 nsec = (u64)jiffies * TICK_NSEC; - value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_usec); - value->tv_usec /= NSEC_PER_USEC; + long rem; + value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem); + value->tv_usec = rem / NSEC_PER_USEC; } #undef ELF_CORE_COPY_REGS diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 60353f5acc48..5f68b220c26d 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c @@ -230,6 +230,9 @@ sysn32_waitid(int which, compat_pid_t pid, long ret; mm_segment_t old_fs = get_fs(); + if (!access_ok(VERIFY_WRITE, uinfo, sizeof(*uinfo))) + return -EFAULT; + set_fs (KERNEL_DS); ret = sys_waitid(which, pid, uinfo, options, uru ? (struct rusage __user *) &ru : NULL); @@ -1450,25 +1453,6 @@ sys32_timer_create(u32 clock, struct sigevent32 __user *se32, timer_t __user *ti return sys_timer_create(clock, p, timer_id); } -asmlinkage long -sysn32_rt_sigtimedwait(const sigset_t __user *uthese, - siginfo_t __user *uinfo, - const struct compat_timespec __user *uts32, - size_t sigsetsize) -{ - struct timespec __user *uts = NULL; - - if (uts32) { - struct timespec ts; - uts = compat_alloc_user_space(sizeof(struct timespec)); - if (get_user(ts.tv_sec, &uts32->tv_sec) || - get_user(ts.tv_nsec, &uts32->tv_nsec) || - copy_to_user (uts, &ts, sizeof (ts))) - return -EFAULT; - } - return sys_rt_sigtimedwait(uthese, uinfo, uts, sigsetsize); -} - save_static_function(sys32_clone); __attribute_used__ noinline static int _sys32_clone(nabi_no_regargs struct pt_regs regs) diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index bc4980cefc8b..d87b5446fa13 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -245,9 +245,9 @@ EXPORT(sysn32_call_table) PTR sys_capget PTR sys_capset PTR sys32_rt_sigpending /* 6125 */ - PTR sysn32_rt_sigtimedwait + PTR compat_sys_rt_sigtimedwait PTR sys_rt_sigqueueinfo - PTR sys32_rt_sigsuspend + PTR sysn32_rt_sigsuspend PTR sys32_sigaltstack PTR compat_sys_utime /* 6130 */ PTR sys_mknod diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 8a8b8dd90417..237cd8a2cd32 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -4,7 +4,7 @@ * for more details. * * Copyright (C) 1991, 1992 Linus Torvalds - * Copyright (C) 1994 - 2000 Ralf Baechle + * Copyright (C) 1994 - 2000, 2006 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/cache.h> @@ -106,8 +106,6 @@ typedef struct compat_siginfo { #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) -extern int do_signal32(sigset_t *oldset, struct pt_regs *regs); - /* 32-bit compatibility types */ #define _NSIG_BPW32 32 @@ -198,7 +196,7 @@ __attribute_used__ noinline static int _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) { compat_sigset_t *uset; - sigset_t newset, saveset; + sigset_t newset; uset = (compat_sigset_t *) regs.regs[4]; if (get_sigset(&newset, uset)) @@ -206,19 +204,15 @@ _sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) sigdelsetmask(&newset, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; current->blocked = newset; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - regs.regs[2] = EINTR; - regs.regs[7] = 1; - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - if (do_signal32(&saveset, ®s)) - return -EINTR; - } + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; } save_static_function(sys32_rt_sigsuspend); @@ -226,8 +220,8 @@ __attribute_used__ noinline static int _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) { compat_sigset_t *uset; - sigset_t newset, saveset; - size_t sigsetsize; + sigset_t newset; + size_t sigsetsize; /* XXX Don't preclude handling different sized sigset_t's. */ sigsetsize = regs.regs[5]; @@ -240,19 +234,15 @@ _sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigdelsetmask(&newset, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); - saveset = current->blocked; + current->saved_sigmask = current->blocked; current->blocked = newset; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - regs.regs[2] = EINTR; - regs.regs[7] = 1; - while (1) { - current->state = TASK_INTERRUPTIBLE; - schedule(); - if (do_signal32(&saveset, ®s)) - return -EINTR; - } + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; } asmlinkage int sys32_sigaction(int sig, const struct sigaction32 *act, @@ -783,7 +773,7 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info, regs->regs[2] = EINTR; break; case ERESTARTSYS: - if(!(ka->sa.sa_flags & SA_RESTART)) { + if (!(ka->sa.sa_flags & SA_RESTART)) { regs->regs[2] = EINTR; break; } @@ -810,9 +800,10 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info, return ret; } -int do_signal32(sigset_t *oldset, struct pt_regs *regs) +void do_signal32(struct pt_regs *regs) { struct k_sigaction ka; + sigset_t *oldset; siginfo_t info; int signr; @@ -822,17 +813,30 @@ int do_signal32(sigset_t *oldset, struct pt_regs *regs) * if so. */ if (!user_mode(regs)) - return 1; + return; if (try_to_freeze()) goto no_signal; - if (!oldset) + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + oldset = ¤t->saved_sigmask; + else oldset = ¤t->blocked; signr = get_signal_to_deliver(&info, &ka, regs, NULL); - if (signr > 0) - return handle_signal(signr, &info, &ka, oldset, regs); + if (signr > 0) { + /* Whee! Actually deliver the signal. */ + if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { + /* + * A signal was successfully delivered; the saved + * sigmask will have been stored in the signal frame, + * and will be restored by sigreturn, so we can simply + * clear the TIF_RESTORE_SIGMASK flag. + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) + clear_thread_flag(TIF_RESTORE_SIGMASK); + } + } no_signal: /* @@ -853,7 +857,15 @@ no_signal: regs->cp0_epc -= 4; } } - return 0; + + /* + * If there's no signal to deliver, we just put the saved sigmask + * back + */ + if (test_thread_flag(TIF_RESTORE_SIGMASK)) { + clear_thread_flag(TIF_RESTORE_SIGMASK); + sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + } } asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 *act, diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index 5a3776096f07..3e168c08a3a8 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -81,6 +81,39 @@ struct rt_sigframe_n32 { #endif }; +extern void sigset_from_compat (sigset_t *set, compat_sigset_t *compat); + +save_static_function(sysn32_rt_sigsuspend); +__attribute_used__ noinline static int +_sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) +{ + compat_sigset_t __user *unewset, uset; + size_t sigsetsize; + sigset_t newset; + + /* XXX Don't preclude handling different sized sigset_t's. */ + sigsetsize = regs.regs[5]; + if (sigsetsize != sizeof(sigset_t)) + return -EINVAL; + + unewset = (compat_sigset_t __user *) regs.regs[4]; + if (copy_from_user(&uset, unewset, sizeof(uset))) + return -EFAULT; + sigset_from_compat (&newset, &uset); + sigdelsetmask(&newset, ~_BLOCKABLE); + + spin_lock_irq(¤t->sighand->siglock); + current->saved_sigmask = current->blocked; + current->blocked = newset; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + + current->state = TASK_INTERRUPTIBLE; + schedule(); + set_thread_flag(TIF_RESTORE_SIGMASK); + return -ERESTARTNOHAND; +} + save_static_function(sysn32_rt_sigreturn); __attribute_used__ noinline static void _sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 25472fcaf715..5e189862e523 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -29,6 +29,7 @@ #include <linux/timex.h> #include <linux/sched.h> #include <linux/cpumask.h> +#include <linux/cpu.h> #include <asm/atomic.h> #include <asm/cpu.h> @@ -424,6 +425,25 @@ void flush_tlb_one(unsigned long vaddr) local_flush_tlb_one(vaddr); } +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static int __init topology_init(void) +{ + int cpu; + int ret; + + for_each_cpu(cpu) { + ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL); + if (ret) + printk(KERN_WARNING "topology_init: register_cpu %d " + "failed (%d)\n", cpu, ret); + } + + return 0; +} + +subsys_initcall(topology_init); + EXPORT_SYMBOL(flush_tlb_page); EXPORT_SYMBOL(flush_tlb_one); EXPORT_SYMBOL(cpu_data); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index c9d2b5147ca3..005debbfbe84 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -3,7 +3,7 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 1994 - 1999, 2000, 01 Ralf Baechle + * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle * Copyright (C) 1995, 1996 Paul M. Antoine * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. @@ -548,6 +548,8 @@ asmlinkage void do_ov(struct pt_regs *regs) { siginfo_t info; + die_if_kernel("Integer overflow", regs); + info.si_code = FPE_INTOVF; info.si_signo = SIGFPE; info.si_errno = 0; diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S index 0e71580774ff..e54a62f2807c 100644 --- a/arch/mips/mm/cex-sb1.S +++ b/arch/mips/mm/cex-sb1.S @@ -64,7 +64,7 @@ LEAF(except_vec2_sb1) sd k0,0x170($0) sd k1,0x178($0) -#if CONFIG_SB1_CEX_ALWAYS_FATAL +#ifdef CONFIG_SB1_CEX_ALWAYS_FATAL j handle_vec2_sb1 nop #else diff --git a/arch/mips/pmc-sierra/yosemite/smp.c b/arch/mips/pmc-sierra/yosemite/smp.c index f17f575f58f0..7f8fda962190 100644 --- a/arch/mips/pmc-sierra/yosemite/smp.c +++ b/arch/mips/pmc-sierra/yosemite/smp.c @@ -94,7 +94,7 @@ void __init prom_prepare_cpus(unsigned int max_cpus) void prom_boot_secondary(int cpu, struct task_struct *idle) { unsigned long gp = (unsigned long) task_thread_info(idle); - unsigned long sp = __KSTK_TOP(idle); + unsigned long sp = __KSTK_TOS(idle); secondary_sp = sp; secondary_gp = gp; diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig index de46f62ac462..816aee7fcd25 100644 --- a/arch/mips/sibyte/Kconfig +++ b/arch/mips/sibyte/Kconfig @@ -102,11 +102,11 @@ config SIMULATION Build a kernel suitable for running under the GDB simulator. Primarily adjusts the kernel's notion of time. -config CONFIG_SB1_CEX_ALWAYS_FATAL +config SB1_CEX_ALWAYS_FATAL bool "All cache exceptions considered fatal (no recovery attempted)" depends on SIBYTE_SB1xxx_SOC -config CONFIG_SB1_CERR_STALL +config SB1_CERR_STALL bool "Stall (rather than panic) on fatal cache error" depends on SIBYTE_SB1xxx_SOC diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c index b2a1ba5d23df..9cf7d713b13c 100644 --- a/arch/mips/sibyte/bcm1480/irq.c +++ b/arch/mips/sibyte/bcm1480/irq.c @@ -139,7 +139,7 @@ void bcm1480_unmask_irq(int cpu, int irq) #ifdef CONFIG_SMP static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) { - int i = 0, old_cpu, cpu, int_on; + int i = 0, old_cpu, cpu, int_on, k; u64 cur_ints; irq_desc_t *desc = irq_desc + irq; unsigned long flags; @@ -165,7 +165,6 @@ static void bcm1480_set_affinity(unsigned int irq, cpumask_t mask) irq_dirty -= BCM1480_NR_IRQS_HALF; } - int k; for (k=0; k<2; k++) { /* Loop through high and low interrupt mask register */ cur_ints = ____raw_readq(IOADDR(A_BCM1480_IMR_MAPPER(old_cpu) + R_BCM1480_IMR_INTERRUPT_MASK_H + (k*BCM1480_IMR_HL_SPACING))); int_on = !(cur_ints & (((u64) 1) << irq_dirty)); @@ -216,6 +215,7 @@ static void ack_bcm1480_irq(unsigned int irq) { u64 pending; unsigned int irq_dirty; + int k; /* * If the interrupt was an HT interrupt, now is the time to @@ -227,7 +227,6 @@ static void ack_bcm1480_irq(unsigned int irq) if ((irq_dirty >= BCM1480_NR_IRQS_HALF) && (irq_dirty <= BCM1480_NR_IRQS)) { irq_dirty -= BCM1480_NR_IRQS_HALF; } - int k; for (k=0; k<2; k++) { /* Loop through high and low LDT interrupts */ pending = __raw_readq(IOADDR(A_BCM1480_IMR_REGISTER(bcm1480_irq_owner[irq], R_BCM1480_IMR_LDT_INTERRUPT_H + (k*BCM1480_IMR_HL_SPACING)))); diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 415659629394..2b03a09fe5e9 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -157,8 +157,7 @@ _GLOBAL(__secondary_hold) SET_REG_IMMEDIATE(r4, .hmt_init) mtctr r4 bctr -#else -#ifdef CONFIG_SMP +#elif defined(CONFIG_SMP) || defined(CONFIG_KEXEC) LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init) mtctr r4 mr r3,r24 @@ -166,7 +165,6 @@ _GLOBAL(__secondary_hold) #else BUG_OPCODE #endif -#endif /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ae96a8ed7e2..e789fef4eb8a 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c @@ -341,7 +341,7 @@ static int lparcfg_data(struct seq_file *m, void *v) const char *system_id = ""; unsigned int *lp_index_ptr, lp_index = 0; struct device_node *rtas_node; - int *lrdrp; + int *lrdrp = NULL; rootdn = find_path_device("/"); if (rootdn) { @@ -362,7 +362,9 @@ static int lparcfg_data(struct seq_file *m, void *v) seq_printf(m, "partition_id=%d\n", (int)lp_index); rtas_node = find_path_device("/rtas"); - lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL); + if (rtas_node) + lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", + NULL); if (lrdrp == NULL) { partition_potential_processors = vdso_data->processorCount; diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index d9a459c144d8..8a731ea877b7 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c @@ -79,15 +79,8 @@ EXPORT_SYMBOL(sys_sigreturn); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(csum_partial); @@ -185,9 +178,6 @@ EXPORT_SYMBOL(adb_try_handler_change); EXPORT_SYMBOL(cuda_request); EXPORT_SYMBOL(cuda_poll); #endif /* CONFIG_ADB_CUDA */ -#ifdef CONFIG_PPC_PMAC -EXPORT_SYMBOL(sys_ctrler); -#endif #ifdef CONFIG_VT EXPORT_SYMBOL(kd_mksound); #endif @@ -205,7 +195,6 @@ EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); @@ -214,7 +203,6 @@ EXPORT_SYMBOL(screen_info); #endif #ifdef CONFIG_PPC32 -EXPORT_SYMBOL(__delay); EXPORT_SYMBOL(timer_interrupt); EXPORT_SYMBOL(irq_desc); EXPORT_SYMBOL(tb_ticks_per_jiffy); @@ -222,10 +210,6 @@ EXPORT_SYMBOL(console_drivers); EXPORT_SYMBOL(cacheable_memcpy); #endif -EXPORT_SYMBOL(__up); -EXPORT_SYMBOL(__down); -EXPORT_SYMBOL(__down_interruptible); - #ifdef CONFIG_8xx EXPORT_SYMBOL(cpm_install_handler); EXPORT_SYMBOL(cpm_free_handler); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index a717dff695ef..f96c49b03ba0 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -311,8 +311,6 @@ void smp_release_cpus(void) DBG(" <- smp_release_cpus()\n"); } -#else -#define smp_release_cpus() #endif /* CONFIG_SMP || CONFIG_KEXEC */ /* @@ -473,10 +471,12 @@ void __init setup_system(void) check_smt_enabled(); smp_setup_cpu_maps(); +#ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ smp_release_cpus(); +#endif printk("Starting Linux PPC64 %s\n", system_utsname.version); diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index 475249dc2350..cd75ab2908fa 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c @@ -176,7 +176,6 @@ struct timex32 { }; extern int do_adjtimex(struct timex *); -extern void ppc_adjtimex(void); asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) { @@ -209,9 +208,6 @@ asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) ret = do_adjtimex(&txc); - /* adjust the conversion of TB to time of day to track adjtimex */ - ppc_adjtimex(); - if(put_user(txc.modes, &utp->modes) || __put_user(txc.offset, &utp->offset) || __put_user(txc.freq, &utp->freq) || diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 1886045a2fd8..2a7ddc579379 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -50,6 +50,7 @@ #include <linux/security.h> #include <linux/percpu.h> #include <linux/rtc.h> +#include <linux/jiffies.h> #include <asm/io.h> #include <asm/processor.h> @@ -99,7 +100,15 @@ EXPORT_SYMBOL(tb_ticks_per_usec); unsigned long tb_ticks_per_sec; u64 tb_to_xs; unsigned tb_to_us; -unsigned long processor_freq; + +#define TICKLEN_SCALE (SHIFT_SCALE - 10) +u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */ +u64 ticklen_to_xs; /* 0.64 fraction */ + +/* If last_tick_len corresponds to about 1/HZ seconds, then + last_tick_len << TICKLEN_SHIFT will be about 2^63. */ +#define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ) + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL_GPL(rtc_lock); @@ -113,10 +122,6 @@ extern unsigned long wall_jiffies; extern struct timezone sys_tz; static long timezone_offset; -void ppc_adjtimex(void); - -static unsigned adjusting_time = 0; - unsigned long ppc_proc_freq; unsigned long ppc_tb_freq; @@ -178,8 +183,7 @@ static __inline__ void timer_check_rtc(void) */ if (ppc_md.set_rtc_time && ntp_synced() && xtime.tv_sec - last_rtc_update >= 659 && - abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ && - jiffies - wall_jiffies == 1) { + abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ) { struct rtc_time tm; to_tm(xtime.tv_sec + 1 + timezone_offset, &tm); tm.tm_year -= 1900; @@ -226,15 +230,14 @@ void do_gettimeofday(struct timeval *tv) if (__USE_RTC()) { /* do this the old way */ unsigned long flags, seq; - unsigned int sec, nsec, usec, lost; + unsigned int sec, nsec, usec; do { seq = read_seqbegin_irqsave(&xtime_lock, flags); sec = xtime.tv_sec; nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp); - lost = jiffies - wall_jiffies; } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - usec = nsec / 1000 + lost * (1000000 / HZ); + usec = nsec / 1000; while (usec >= 1000000) { usec -= 1000000; ++sec; @@ -248,23 +251,6 @@ void do_gettimeofday(struct timeval *tv) EXPORT_SYMBOL(do_gettimeofday); -/* Synchronize xtime with do_gettimeofday */ - -static inline void timer_sync_xtime(unsigned long cur_tb) -{ -#ifdef CONFIG_PPC64 - /* why do we do this? */ - struct timeval my_tv; - - __do_gettimeofday(&my_tv, cur_tb); - - if (xtime.tv_sec <= my_tv.tv_sec) { - xtime.tv_sec = my_tv.tv_sec; - xtime.tv_nsec = my_tv.tv_usec * 1000; - } -#endif -} - /* * There are two copies of tb_to_xs and stamp_xsec so that no * lock is needed to access and use these values in @@ -323,15 +309,30 @@ static __inline__ void timer_recalc_offset(u64 cur_tb) { unsigned long offset; u64 new_stamp_xsec; + u64 tlen, t2x; if (__USE_RTC()) return; + tlen = current_tick_length(); offset = cur_tb - do_gtod.varp->tb_orig_stamp; - if ((offset & 0x80000000u) == 0) - return; - new_stamp_xsec = do_gtod.varp->stamp_xsec - + mulhdu(offset, do_gtod.varp->tb_to_xs); - update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs); + if (tlen == last_tick_len && offset < 0x80000000u) { + /* check that we're still in sync; if not, resync */ + struct timeval tv; + __do_gettimeofday(&tv, cur_tb); + if (tv.tv_sec <= xtime.tv_sec && + (tv.tv_sec < xtime.tv_sec || + tv.tv_usec * 1000 <= xtime.tv_nsec)) + return; + } + if (tlen != last_tick_len) { + t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs); + last_tick_len = tlen; + } else + t2x = do_gtod.varp->tb_to_xs; + new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC; + do_div(new_stamp_xsec, 1000000000); + new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC; + update_gtod(cur_tb, new_stamp_xsec, t2x); } #ifdef CONFIG_SMP @@ -462,13 +463,10 @@ void timer_interrupt(struct pt_regs * regs) write_seqlock(&xtime_lock); tb_last_jiffy += tb_ticks_per_jiffy; tb_last_stamp = per_cpu(last_jiffy, cpu); - timer_recalc_offset(tb_last_jiffy); do_timer(regs); - timer_sync_xtime(tb_last_jiffy); + timer_recalc_offset(tb_last_jiffy); timer_check_rtc(); write_sequnlock(&xtime_lock); - if (adjusting_time && (time_adjust == 0)) - ppc_adjtimex(); } next_dec = tb_ticks_per_jiffy - ticks; @@ -492,16 +490,18 @@ void timer_interrupt(struct pt_regs * regs) void wakeup_decrementer(void) { - int i; + unsigned long ticks; - set_dec(tb_ticks_per_jiffy); /* - * We don't expect this to be called on a machine with a 601, - * so using get_tbl is fine. + * The timebase gets saved on sleep and restored on wakeup, + * so all we need to do is to reset the decrementer. */ - tb_last_stamp = tb_last_jiffy = get_tb(); - for_each_cpu(i) - per_cpu(last_jiffy, i) = tb_last_stamp; + ticks = tb_ticks_since(__get_cpu_var(last_jiffy)); + if (ticks < tb_ticks_per_jiffy) + ticks = tb_ticks_per_jiffy - ticks; + else + ticks = 1; + set_dec(ticks); } #ifdef CONFIG_SMP @@ -541,8 +541,8 @@ int do_settimeofday(struct timespec *tv) time_t wtm_sec, new_sec = tv->tv_sec; long wtm_nsec, new_nsec = tv->tv_nsec; unsigned long flags; - long int tb_delta; - u64 new_xsec, tb_delta_xs; + u64 new_xsec; + unsigned long tb_delta; if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) return -EINVAL; @@ -563,9 +563,19 @@ int do_settimeofday(struct timespec *tv) first_settimeofday = 0; } #endif + + /* + * Subtract off the number of nanoseconds since the + * beginning of the last tick. + * Note that since we don't increment jiffies_64 anywhere other + * than in do_timer (since we don't have a lost tick problem), + * wall_jiffies will always be the same as jiffies, + * and therefore the (jiffies - wall_jiffies) computation + * has been removed. + */ tb_delta = tb_ticks_since(tb_last_stamp); - tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; - tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); + tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */ + new_nsec -= SCALE_XSEC(tb_delta, 1000000000); wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec); wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec); @@ -580,12 +590,12 @@ int do_settimeofday(struct timespec *tv) ntp_clear(); - new_xsec = 0; - if (new_nsec != 0) { - new_xsec = (u64)new_nsec * XSEC_PER_SEC; + new_xsec = xtime.tv_nsec; + if (new_xsec != 0) { + new_xsec *= XSEC_PER_SEC; do_div(new_xsec, NSEC_PER_SEC); } - new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs; + new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC; update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; @@ -671,7 +681,7 @@ void __init time_init(void) unsigned long flags; unsigned long tm = 0; struct div_result res; - u64 scale; + u64 scale, x; unsigned shift; if (ppc_md.time_init != NULL) @@ -693,11 +703,36 @@ void __init time_init(void) } tb_ticks_per_jiffy = ppc_tb_freq / HZ; - tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; + tb_ticks_per_sec = ppc_tb_freq; tb_ticks_per_usec = ppc_tb_freq / 1000000; tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); - div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); - tb_to_xs = res.result_low; + + /* + * Calculate the length of each tick in ns. It will not be + * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ. + * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq, + * rounded up. + */ + x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1; + do_div(x, ppc_tb_freq); + tick_nsec = x; + last_tick_len = x << TICKLEN_SCALE; + + /* + * Compute ticklen_to_xs, which is a factor which gets multiplied + * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value. + * It is computed as: + * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9) + * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT + * so as to give the result as a 0.64 fixed-point fraction. + */ + div128_by_32(1ULL << (64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT), 0, + tb_ticks_per_jiffy, &res); + div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res); + ticklen_to_xs = res.result_low; + + /* Compute tb_to_xs from tick_nsec */ + tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs); /* * Compute scale factor for sched_clock. @@ -724,6 +759,14 @@ void __init time_init(void) tm = get_boot_time(); write_seqlock_irqsave(&xtime_lock, flags); + + /* If platform provided a timezone (pmac), we correct the time */ + if (timezone_offset) { + sys_tz.tz_minuteswest = -timezone_offset / 60; + sys_tz.tz_dsttime = 0; + tm -= timezone_offset; + } + xtime.tv_sec = tm; xtime.tv_nsec = 0; do_gtod.varp = &do_gtod.vars[0]; @@ -738,18 +781,11 @@ void __init time_init(void) vdso_data->tb_orig_stamp = tb_last_jiffy; vdso_data->tb_update_count = 0; vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; - vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; + vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC; vdso_data->tb_to_xs = tb_to_xs; time_freq = 0; - /* If platform provided a timezone (pmac), we correct the time */ - if (timezone_offset) { - sys_tz.tz_minuteswest = -timezone_offset / 60; - sys_tz.tz_dsttime = 0; - xtime.tv_sec -= timezone_offset; - } - last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); @@ -759,126 +795,6 @@ void __init time_init(void) set_dec(tb_ticks_per_jiffy); } -/* - * After adjtimex is called, adjust the conversion of tb ticks - * to microseconds to keep do_gettimeofday synchronized - * with ntpd. - * - * Use the time_adjust, time_freq and time_offset computed by adjtimex to - * adjust the frequency. - */ - -/* #define DEBUG_PPC_ADJTIMEX 1 */ - -void ppc_adjtimex(void) -{ -#ifdef CONFIG_PPC64 - unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec, - new_tb_to_xs, new_xsec, new_stamp_xsec; - unsigned long tb_ticks_per_sec_delta; - long delta_freq, ltemp; - struct div_result divres; - unsigned long flags; - long singleshot_ppm = 0; - - /* - * Compute parts per million frequency adjustment to - * accomplish the time adjustment implied by time_offset to be - * applied over the elapsed time indicated by time_constant. - * Use SHIFT_USEC to get it into the same units as - * time_freq. - */ - if ( time_offset < 0 ) { - ltemp = -time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - ltemp = -ltemp; - } else { - ltemp = time_offset; - ltemp <<= SHIFT_USEC - SHIFT_UPDATE; - ltemp >>= SHIFT_KG + time_constant; - } - - /* If there is a single shot time adjustment in progress */ - if ( time_adjust ) { -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: "); - if ( adjusting_time == 0 ) - printk("starting "); - printk("single shot time_adjust = %ld\n", time_adjust); -#endif - - adjusting_time = 1; - - /* - * Compute parts per million frequency adjustment - * to match time_adjust - */ - singleshot_ppm = tickadj * HZ; - /* - * The adjustment should be tickadj*HZ to match the code in - * linux/kernel/timer.c, but experiments show that this is too - * large. 3/4 of tickadj*HZ seems about right - */ - singleshot_ppm -= singleshot_ppm / 4; - /* Use SHIFT_USEC to get it into the same units as time_freq */ - singleshot_ppm <<= SHIFT_USEC; - if ( time_adjust < 0 ) - singleshot_ppm = -singleshot_ppm; - } - else { -#ifdef DEBUG_PPC_ADJTIMEX - if ( adjusting_time ) - printk("ppc_adjtimex: ending single shot time_adjust\n"); -#endif - adjusting_time = 0; - } - - /* Add up all of the frequency adjustments */ - delta_freq = time_freq + ltemp + singleshot_ppm; - - /* - * Compute a new value for tb_ticks_per_sec based on - * the frequency adjustment - */ - den = 1000000 * (1 << (SHIFT_USEC - 8)); - if ( delta_freq < 0 ) { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta; - } - else { - tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den; - new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta; - } - -#ifdef DEBUG_PPC_ADJTIMEX - printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm); - printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec); -#endif - - /* - * Compute a new value of tb_to_xs (used to convert tb to - * microseconds) and a new value of stamp_xsec which is the - * time (in 1/2^20 second units) corresponding to - * tb_orig_stamp. This new value of stamp_xsec compensates - * for the change in frequency (implied by the new tb_to_xs) - * which guarantees that the current time remains the same. - */ - write_seqlock_irqsave( &xtime_lock, flags ); - tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp; - div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres); - new_tb_to_xs = divres.result_low; - new_xsec = mulhdu(tb_ticks, new_tb_to_xs); - - old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs); - new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec; - - update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs); - - write_sequnlock_irqrestore( &xtime_lock, flags ); -#endif /* CONFIG_PPC64 */ -} - #define FEBRUARY 2 #define STARTOFTIME 1970 diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index 558dd0692092..34714d3ea69a 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c @@ -1646,10 +1646,10 @@ static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode) KL0_SCC_CELL_ENABLE); MACIO_BIC(KEYLARGO_FCR1, - /*KL1_USB2_CELL_ENABLE |*/ KL1_I2S0_CELL_ENABLE | KL1_I2S0_CLK_ENABLE_BIT | KL1_I2S0_ENABLE | KL1_I2S1_CELL_ENABLE | - KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE); + KL1_I2S1_CLK_ENABLE_BIT | KL1_I2S1_ENABLE | + KL1_EIDE0_ENABLE); if (pmac_mb.board_flags & PMAC_MB_MOBILE) MACIO_BIC(KEYLARGO_FCR1, KL1_UIDE_RESET_N); @@ -2183,7 +2183,7 @@ static struct pmac_mb_def pmac_mb_defs[] = { }, { "PowerMac10,1", "Mac mini", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, - PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER, + PMAC_MB_MAY_SLEEP, }, { "iMac,1", "iMac (first generation)", PMAC_TYPE_ORIG_IMAC, paddington_features, @@ -2295,11 +2295,11 @@ static struct pmac_mb_def pmac_mb_defs[] = { }, { "PowerBook5,8", "PowerBook G4 15\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, - PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, + PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook5,9", "PowerBook G4 17\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, - PMAC_MB_MAY_SLEEP | PMAC_MB_HAS_FW_POWER | PMAC_MB_MOBILE, + PMAC_MB_MAY_SLEEP | PMAC_MB_MOBILE, }, { "PowerBook6,1", "PowerBook G4 12\"", PMAC_TYPE_UNKNOWN_INTREPID, intrepid_features, diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 8e6b1ed1396e..8d710af50756 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c @@ -292,7 +292,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) if (start_cpu == RTAS_UNKNOWN_SERVICE) return 1; - status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, lcpu); + status = rtas_call(start_cpu, 3, 1, NULL, pcpu, start_here, pcpu); if (status != 0) { printk(KERN_ERR "start-cpu failed: %i\n", status); return 0; diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 15bd9b448a48..82adb4601348 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -93,15 +93,8 @@ EXPORT_SYMBOL(test_and_change_bit); EXPORT_SYMBOL(strcpy); EXPORT_SYMBOL(strncpy); EXPORT_SYMBOL(strcat); -EXPORT_SYMBOL(strncat); -EXPORT_SYMBOL(strchr); -EXPORT_SYMBOL(strrchr); -EXPORT_SYMBOL(strpbrk); -EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strlen); -EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strcmp); -EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(strcasecmp); EXPORT_SYMBOL(__div64_32); @@ -253,7 +246,6 @@ EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(cacheable_memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); -EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memchr); diff --git a/arch/ppc/xmon/start.c b/arch/ppc/xmon/start.c index 4344cbe9b5c5..484f5bb1aa3e 100644 --- a/arch/ppc/xmon/start.c +++ b/arch/ppc/xmon/start.c @@ -146,19 +146,6 @@ xmon_map_scc(void) static int scc_initialized = 0; void xmon_init_scc(void); -extern void cuda_poll(void); - -static inline void do_poll_adb(void) -{ -#ifdef CONFIG_ADB_PMU - if (sys_ctrler == SYS_CTRLER_PMU) - pmu_poll_adb(); -#endif /* CONFIG_ADB_PMU */ -#ifdef CONFIG_ADB_CUDA - if (sys_ctrler == SYS_CTRLER_CUDA) - cuda_poll(); -#endif /* CONFIG_ADB_CUDA */ -} int xmon_write(void *handle, void *ptr, int nb) @@ -189,7 +176,7 @@ xmon_write(void *handle, void *ptr, int nb) ct = 0; for (i = 0; i < nb; ++i) { while ((*sccc & TXRDY) == 0) - do_poll_adb(); + ; c = p[i]; if (c == '\n' && !ct) { c = '\r'; diff --git a/arch/x86_64/defconfig b/arch/x86_64/defconfig index b337136f28b6..ce4de61ed85d 100644 --- a/arch/x86_64/defconfig +++ b/arch/x86_64/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.16-rc3 -# Mon Feb 13 22:31:24 2006 +# Linux kernel version: 2.6.16-rc3-git9 +# Sat Feb 18 00:27:03 2006 # CONFIG_X86_64=y CONFIG_64BIT=y @@ -1317,7 +1317,7 @@ CONFIG_DETECT_SOFTLOCKUP=y # CONFIG_DEBUG_SPINLOCK is not set # CONFIG_DEBUG_SPINLOCK_SLEEP is not set # CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_INFO=y +# CONFIG_DEBUG_INFO is not set CONFIG_DEBUG_FS=y # CONFIG_DEBUG_VM is not set # CONFIG_FRAME_POINTER is not set diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 93e44d0292ab..bc9b2bcd7dba 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -58,6 +58,7 @@ #include <linux/suspend.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_ioctl.h> +#include <scsi/scsi.h> #include <asm/uaccess.h> @@ -380,6 +381,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command * memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); + rq->cmd_len = COMMAND_SIZE(rq->cmd[0]); rq->ref_count++; rq->flags |= REQ_NOMERGE; @@ -1495,40 +1497,42 @@ static int pkt_set_write_settings(struct pktcdvd_device *pd) } /* - * 0 -- we can write to this track, 1 -- we can't + * 1 -- we can write to this track, 0 -- we can't */ -static int pkt_good_track(track_information *ti) +static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) { - /* - * only good for CD-RW at the moment, not DVD-RW - */ + switch (pd->mmc3_profile) { + case 0x1a: /* DVD+RW */ + case 0x12: /* DVD-RAM */ + /* The track is always writable on DVD+RW/DVD-RAM */ + return 1; + default: + break; + } - /* - * FIXME: only for FP - */ - if (ti->fp == 0) + if (!ti->packet || !ti->fp) return 0; /* * "good" settings as per Mt Fuji. */ - if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1) - return 0; + if (ti->rt == 0 && ti->blank == 0) + return 1; - if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1) - return 0; + if (ti->rt == 0 && ti->blank == 1) + return 1; - if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1) - return 0; + if (ti->rt == 1 && ti->blank == 0) + return 1; printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); - return 1; + return 0; } /* - * 0 -- we can write to this disc, 1 -- we can't + * 1 -- we can write to this disc, 0 -- we can't */ -static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) +static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) { switch (pd->mmc3_profile) { case 0x0a: /* CD-RW */ @@ -1537,10 +1541,10 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) case 0x1a: /* DVD+RW */ case 0x13: /* DVD-RW */ case 0x12: /* DVD-RAM */ - return 0; + return 1; default: VPRINTK("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile); - return 1; + return 0; } /* @@ -1549,25 +1553,25 @@ static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di) */ if (di->disc_type == 0xff) { printk("pktcdvd: Unknown disc. No track?\n"); - return 1; + return 0; } if (di->disc_type != 0x20 && di->disc_type != 0) { printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type); - return 1; + return 0; } if (di->erasable == 0) { printk("pktcdvd: Disc not erasable\n"); - return 1; + return 0; } if (di->border_status == PACKET_SESSION_RESERVED) { printk("pktcdvd: Can't write to last track (reserved)\n"); - return 1; + return 0; } - return 0; + return 1; } static int pkt_probe_settings(struct pktcdvd_device *pd) @@ -1592,23 +1596,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) return ret; } - if (pkt_good_disc(pd, &di)) - return -ENXIO; + if (!pkt_writable_disc(pd, &di)) + return -EROFS; - switch (pd->mmc3_profile) { - case 0x1a: /* DVD+RW */ - printk("pktcdvd: inserted media is DVD+RW\n"); - break; - case 0x13: /* DVD-RW */ - printk("pktcdvd: inserted media is DVD-RW\n"); - break; - case 0x12: /* DVD-RAM */ - printk("pktcdvd: inserted media is DVD-RAM\n"); - break; - default: - printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : ""); - break; - } pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */ @@ -1617,9 +1607,9 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) return ret; } - if (pkt_good_track(&ti)) { + if (!pkt_writable_track(pd, &ti)) { printk("pktcdvd: can't write to this track\n"); - return -ENXIO; + return -EROFS; } /* @@ -1633,7 +1623,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) } if (pd->settings.size > PACKET_MAX_SECTORS) { printk("pktcdvd: packet size is too big\n"); - return -ENXIO; + return -EROFS; } pd->settings.fp = ti.fp; pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); @@ -1675,7 +1665,7 @@ static int pkt_probe_settings(struct pktcdvd_device *pd) break; default: printk("pktcdvd: unknown data mode\n"); - return 1; + return -EROFS; } return 0; } @@ -1886,7 +1876,7 @@ static int pkt_open_write(struct pktcdvd_device *pd) if ((ret = pkt_probe_settings(pd))) { VPRINTK("pktcdvd: %s failed probe\n", pd->name); - return -EROFS; + return ret; } if ((ret = pkt_set_write_settings(pd))) { diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig index 486ed8a11b59..a4d425d2dce2 100644 --- a/drivers/char/agp/Kconfig +++ b/drivers/char/agp/Kconfig @@ -15,22 +15,23 @@ config AGP due to kernel allocation issues), you could use PCI accesses and have up to a couple gigs of texture space. - Note that this is the only means to have XFree4/GLX use + Note that this is the only means to have X/GLX use write-combining with MTRR support on the AGP bus. Without it, OpenGL direct rendering will be a lot slower but still faster than PIO. - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. - To compile this driver as a module, choose M here: the module will be called agpgart. + You should say Y here if you want to use GLX or DRI. + + If unsure, say N. + config AGP_ALI tristate "ALI chipset support" depends on AGP && X86_32 ---help--- This option gives you AGP support for the GLX component of - XFree86 4.x on the following ALi chipsets. The supported chipsets + X on the following ALi chipsets. The supported chipsets include M1541, M1621, M1631, M1632, M1641,M1647,and M1651. For the ALi-chipset question, ALi suggests you refer to <http://www.ali.com.tw/eng/support/index.shtml>. @@ -40,28 +41,19 @@ config AGP_ALI timing issues, this chipset cannot do AGP 2x with the G200. This is a hardware limitation. AGP 1x seems to be fine, though. - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. - config AGP_ATI tristate "ATI chipset support" depends on AGP && X86_32 ---help--- - This option gives you AGP support for the GLX component of - XFree86 4.x on the ATI RadeonIGP family of chipsets. - - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. + This option gives you AGP support for the GLX component of + X on the ATI RadeonIGP family of chipsets. config AGP_AMD tristate "AMD Irongate, 761, and 762 chipset support" depends on AGP && X86_32 help This option gives you AGP support for the GLX component of - XFree86 4.x on AMD Irongate, 761, and 762 chipsets. - - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. + X on AMD Irongate, 761, and 762 chipsets. config AGP_AMD64 tristate "AMD Opteron/Athlon64 on-CPU GART support" if !GART_IOMMU @@ -69,45 +61,38 @@ config AGP_AMD64 default y if GART_IOMMU help This option gives you AGP support for the GLX component of - XFree86 4.x using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. + X using the on-CPU northbridge of the AMD Athlon64/Opteron CPUs. You still need an external AGP bridge like the AMD 8151, VIA K8T400M, SiS755. It may also support other AGP bridges when loaded with agp_try_unsupported=1. - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say Y config AGP_INTEL tristate "Intel 440LX/BX/GX, I8xx and E7x05 chipset support" depends on AGP && X86 help - This option gives you AGP support for the GLX component of XFree86 4.x + This option gives you AGP support for the GLX component of X on Intel 440LX/BX/GX, 815, 820, 830, 840, 845, 850, 860, 875, - E7205 and E7505 chipsets and full support for the 810, 815, 830M, 845G, - 852GM, 855GM, 865G and I915 integrated graphics chipsets. + E7205 and E7505 chipsets and full support for the 810, 815, 830M, + 845G, 852GM, 855GM, 865G and I915 integrated graphics chipsets. + - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI, or if you have any Intel integrated graphics - chipsets. If unsure, say Y. config AGP_NVIDIA tristate "NVIDIA nForce/nForce2 chipset support" depends on AGP && X86_32 help This option gives you AGP support for the GLX component of - XFree86 4.x on the following NVIDIA chipsets. The supported chipsets - include nForce and nForce2 + X on NVIDIA chipsets including nForce and nForce2 config AGP_SIS tristate "SiS chipset support" depends on AGP && X86_32 help This option gives you AGP support for the GLX component of - XFree86 4.x on Silicon Integrated Systems [SiS] chipsets. + X on Silicon Integrated Systems [SiS] chipsets. Note that 5591/5592 AGP chipsets are NOT supported. - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. config AGP_SWORKS tristate "Serverworks LE/HE chipset support" @@ -121,10 +106,7 @@ config AGP_VIA depends on AGP && X86_32 help This option gives you AGP support for the GLX component of - XFree86 4.x on VIA MVP3/Apollo Pro chipsets. - - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say N. + X on VIA MVP3/Apollo Pro chipsets. config AGP_I460 tristate "Intel 460GX chipset support" @@ -159,9 +141,6 @@ config AGP_EFFICEON This option gives you AGP support for the Transmeta Efficeon series processors with integrated northbridges. - You should say Y here if you use XFree86 3.3.6 or 4.x and want to - use GLX or DRI. If unsure, say Y. - config AGP_SGI_TIOCA tristate "SGI TIO chipset AGP support" depends on AGP && (IA64_SGI_SN2 || IA64_GENERIC) diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 9964c508c111..1251b2515bbe 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c @@ -516,8 +516,10 @@ static int __devinit nforce3_agp_init(struct pci_dev *pdev) pci_read_config_dword (hammers[0], AMD64_GARTAPERTUREBASE, &apbase); /* if x86-64 aperture base is beyond 4G, exit here */ - if ( (apbase & 0x7fff) >> (32 - 25) ) - return -ENODEV; + if ( (apbase & 0x7fff) >> (32 - 25) ) { + printk(KERN_INFO PFX "aperture base > 4G\n"); + return -ENODEV; + } apbase = (apbase & 0x7fff) << 25; diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c index 268f78d926d3..efef9999f1cf 100644 --- a/drivers/char/agp/sworks-agp.c +++ b/drivers/char/agp/sworks-agp.c @@ -468,9 +468,7 @@ static int __devinit agp_serverworks_probe(struct pci_dev *pdev, switch (pdev->device) { case 0x0006: - /* ServerWorks CNB20HE - Fail silently.*/ - printk (KERN_ERR PFX "Detected ServerWorks CNB20HE chipset: No AGP present.\n"); + printk (KERN_ERR PFX "ServerWorks CNB20HE is unsupported due to lack of documentation.\n"); return -ENODEV; case PCI_DEVICE_ID_SERVERWORKS_HE: diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c index a1381c61aa63..d3879ac9970f 100644 --- a/drivers/char/drm/i915_irq.c +++ b/drivers/char/drm/i915_irq.c @@ -202,10 +202,15 @@ void i915_driver_irq_postinstall(drm_device_t * dev) void i915_driver_irq_uninstall(drm_device_t * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + u16 temp; + if (!dev_priv) return; I915_WRITE16(I915REG_HWSTAM, 0xffff); I915_WRITE16(I915REG_INT_MASK_R, 0xffff); I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); + + temp = I915_READ16(I915REG_INT_IDENTITY_R); + I915_WRITE16(I915REG_INT_IDENTITY_R, temp); } diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c index 291dbf4c8186..c08fa5076f05 100644 --- a/drivers/char/drm/r300_cmdbuf.c +++ b/drivers/char/drm/r300_cmdbuf.c @@ -161,6 +161,7 @@ void r300_init_reg_flags(void) ADD_RANGE(R300_VAP_PVS_CNTL_1, 3); ADD_RANGE(R300_GB_ENABLE, 1); ADD_RANGE(R300_GB_MSPOS0, 5); + ADD_RANGE(R300_TX_CNTL, 1); ADD_RANGE(R300_TX_ENABLE, 1); ADD_RANGE(0x4200, 4); ADD_RANGE(0x4214, 1); @@ -489,6 +490,50 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, return 0; } +static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, + drm_radeon_kcmd_buffer_t *cmdbuf) +{ + u32 *cmd = (u32 *) cmdbuf->buf; + int count, ret; + RING_LOCALS; + + count=(cmd[0]>>16) & 0x3fff; + + if (cmd[0] & 0x8000) { + u32 offset; + + if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL + | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { + offset = cmd[2] << 10; + ret = r300_check_offset(dev_priv, offset); + if (ret) { + DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); + return DRM_ERR(EINVAL); + } + } + + if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && + (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { + offset = cmd[3] << 10; + ret = r300_check_offset(dev_priv, offset); + if (ret) { + DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); + return DRM_ERR(EINVAL); + } + + } + } + + BEGIN_RING(count+2); + OUT_RING(cmd[0]); + OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1); + ADVANCE_RING(); + + cmdbuf->buf += (count+2)*4; + cmdbuf->bufsz -= (count+2)*4; + + return 0; +} static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, drm_radeon_kcmd_buffer_t *cmdbuf) @@ -527,6 +572,9 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv, case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header); + case RADEON_CNTL_BITBLT_MULTI: + return r300_emit_bitblt_multi(dev_priv, cmdbuf); + case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */ case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */ case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */ diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h index a0ed20e25221..d1e19954406b 100644 --- a/drivers/char/drm/r300_reg.h +++ b/drivers/char/drm/r300_reg.h @@ -451,6 +451,9 @@ I am fairly certain that they are correct unless stated otherwise in comments. /* END */ /* gap */ +/* Zero to flush caches. */ +#define R300_TX_CNTL 0x4100 + /* The upper enable bits are guessed, based on fglrx reported limits. */ #define R300_TX_ENABLE 0x4104 # define R300_TX_ENABLE_0 (1 << 0) diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h index 498b19b1d641..1f7d2ab8c4fc 100644 --- a/drivers/char/drm/radeon_drv.h +++ b/drivers/char/drm/radeon_drv.h @@ -90,9 +90,10 @@ * 1.19- Add support for gart table in FB memory and PCIE r300 * 1.20- Add support for r300 texrect * 1.21- Add support for card type getparam + * 1.22- Add support for texture cache flushes (R300_TX_CNTL) */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 21 +#define DRIVER_MINOR 22 #define DRIVER_PATCHLEVEL 0 /* diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 5765f672e853..d58f82318853 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c @@ -243,7 +243,7 @@ static struct sysrq_key_op sysrq_term_op = { static void moom_callback(void *ignored) { - out_of_memory(GFP_KERNEL, 0); + out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL], GFP_KERNEL, 0); } static DECLARE_WORK(moom_work, moom_callback, NULL); diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 64819aa7cac4..0c08c58252be 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -348,10 +348,10 @@ aes_set_key(void *ctx_arg, const uint8_t *in_key, unsigned int key_len, uint32_t break; case 32: - E_KEY[4] = le32_to_cpu(in_key[4]); - E_KEY[5] = le32_to_cpu(in_key[5]); - E_KEY[6] = le32_to_cpu(in_key[6]); - t = E_KEY[7] = le32_to_cpu(in_key[7]); + E_KEY[4] = le32_to_cpu(key[4]); + E_KEY[5] = le32_to_cpu(key[5]); + E_KEY[6] = le32_to_cpu(key[6]); + t = E_KEY[7] = le32_to_cpu(key[7]); for (i = 0; i < 7; ++i) loop8 (i); break; diff --git a/drivers/fc4/fc.c b/drivers/fc4/fc.c index 5c8943509cc1..66d03f242d3c 100644 --- a/drivers/fc4/fc.c +++ b/drivers/fc4/fc.c @@ -1053,7 +1053,7 @@ static int fc_do_els(fc_channel *fc, unsigned int alpa, void *data, int len) int i; fcmd = &_fcmd; - memset(fcmd, 0, sizeof(fcmd)); + memset(fcmd, 0, sizeof(fcp_cmnd)); FCD(("PLOGI SID %d DID %d\n", fc->sid, alpa)) fch = &fcmd->fch; FILL_FCHDR_RCTL_DID(fch, R_CTL_ELS_REQ, alpa); diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index 18d7eda38851..eca92eb475a1 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c @@ -137,15 +137,15 @@ MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)" /* * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on * if your sbp2 device is not properly handling the SCSI inquiry command. - * This hack makes the inquiry look more like a typical MS Windows - * inquiry. + * This hack makes the inquiry look more like a typical MS Windows inquiry + * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8. * * If force_inquiry_hack=1 is required for your device to work, * please submit the logged sbp2_firmware_revision value of this device to * the linux1394-devel mailing list. */ static int force_inquiry_hack; -module_param(force_inquiry_hack, int, 0444); +module_param(force_inquiry_hack, int, 0644); MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); /* @@ -264,18 +264,17 @@ static struct hpsb_protocol_driver sbp2_driver = { }, }; - -/* List of device firmware's that require a forced 36 byte inquiry. */ +/* + * List of device firmwares that require the inquiry hack. + * Yields a few false positives but did not break other devices so far. + */ static u32 sbp2_broken_inquiry_list[] = { - 0x00002800, /* Stefan Richter <richtest@bauwesen.tu-cottbus.de> */ + 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */ /* DViCO Momobay CX-1 */ 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */ /* QPS Fire DVDBurner */ }; -#define NUM_BROKEN_INQUIRY_DEVS \ - (sizeof(sbp2_broken_inquiry_list)/sizeof(*sbp2_broken_inquiry_list)) - /************************************** * General utility functions **************************************/ @@ -643,9 +642,15 @@ static int sbp2_remove(struct device *dev) if (!scsi_id) return 0; - /* Trigger shutdown functions in scsi's highlevel. */ - if (scsi_id->scsi_host) + if (scsi_id->scsi_host) { + /* Get rid of enqueued commands if there is no chance to + * send them. */ + if (!sbp2util_node_is_available(scsi_id)) + sbp2scsi_complete_all_commands(scsi_id, DID_NO_CONNECT); + /* scsi_remove_device() will trigger shutdown functions of SCSI + * highlevel drivers which would deadlock if blocked. */ scsi_unblock_requests(scsi_id->scsi_host); + } sdev = scsi_id->sdev; if (sdev) { scsi_id->sdev = NULL; @@ -742,11 +747,6 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud hi->host = ud->ne->host; INIT_LIST_HEAD(&hi->scsi_ids); - /* Register our sbp2 status address space... */ - hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_ops, - SBP2_STATUS_FIFO_ADDRESS, - SBP2_STATUS_FIFO_ADDRESS + - SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(SBP2_MAX_UDS_PER_NODE+1)); #ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA /* Handle data movement if physical dma is not * enabled/supportedon host controller */ @@ -759,6 +759,18 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids); + /* Register the status FIFO address range. We could use the same FIFO + * for targets at different nodes. However we need different FIFOs per + * target in order to support multi-unit devices. */ + scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( + &sbp2_highlevel, ud->ne->host, &sbp2_ops, + sizeof(struct sbp2_status_block), sizeof(quadlet_t), + ~0ULL, ~0ULL); + if (!scsi_id->status_fifo_addr) { + SBP2_ERR("failed to allocate status FIFO address range"); + goto failed_alloc; + } + /* Register our host with the SCSI stack. */ scsi_host = scsi_host_alloc(&scsi_driver_template, sizeof(unsigned long)); @@ -997,6 +1009,10 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id) SBP2_DMA_FREE("single query logins data"); } + if (scsi_id->status_fifo_addr) + hpsb_unregister_addrspace(&sbp2_highlevel, hi->host, + scsi_id->status_fifo_addr); + scsi_id->ud->device.driver_data = NULL; SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id); @@ -1075,11 +1091,10 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id) ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response)); SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized"); - scsi_id->query_logins_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO + - SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id); - scsi_id->query_logins_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | - SBP2_STATUS_FIFO_ADDRESS_HI); - SBP2_DEBUG("sbp2_query_logins: status FIFO initialized"); + scsi_id->query_logins_orb->status_fifo_hi = + ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); + scsi_id->query_logins_orb->status_fifo_lo = + ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb)); @@ -1184,11 +1199,10 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id) ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response)); SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized"); - scsi_id->login_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO + - SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id); - scsi_id->login_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | - SBP2_STATUS_FIFO_ADDRESS_HI); - SBP2_DEBUG("sbp2_login_device: status FIFO initialized"); + scsi_id->login_orb->status_fifo_hi = + ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); + scsi_id->login_orb->status_fifo_lo = + ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); /* * Byte swap ORB if necessary @@ -1301,10 +1315,10 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id) scsi_id->logout_orb->login_ID_misc |= ORB_SET_NOTIFY(1); scsi_id->logout_orb->reserved5 = 0x0; - scsi_id->logout_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO + - SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id); - scsi_id->logout_orb->status_FIFO_hi = (ORB_SET_NODE_ID(hi->host->node_id) | - SBP2_STATUS_FIFO_ADDRESS_HI); + scsi_id->logout_orb->status_fifo_hi = + ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); + scsi_id->logout_orb->status_fifo_lo = + ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); /* * Byte swap ORB if necessary @@ -1366,10 +1380,10 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id) scsi_id->reconnect_orb->login_ID_misc |= ORB_SET_NOTIFY(1); scsi_id->reconnect_orb->reserved5 = 0x0; - scsi_id->reconnect_orb->status_FIFO_lo = SBP2_STATUS_FIFO_ADDRESS_LO + - SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(scsi_id->ud->id); - scsi_id->reconnect_orb->status_FIFO_hi = - (ORB_SET_NODE_ID(hi->host->node_id) | SBP2_STATUS_FIFO_ADDRESS_HI); + scsi_id->reconnect_orb->status_fifo_hi = + ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); + scsi_id->reconnect_orb->status_fifo_lo = + ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); /* * Byte swap ORB if necessary @@ -1560,7 +1574,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, /* Check for a blacklisted set of devices that require us to force * a 36 byte host inquiry. This can be overriden as a module param * (to force all hosts). */ - for (i = 0; i < NUM_BROKEN_INQUIRY_DEVS; i++) { + for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) { if ((firmware_revision & 0xffff00) == sbp2_broken_inquiry_list[i]) { SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround", @@ -2007,18 +2021,6 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id, } /* - * The scsi stack sends down a request_bufflen which does not match the - * length field in the scsi cdb. This causes some sbp2 devices to - * reject this inquiry command. Fix the request_bufflen. - */ - if (*cmd == INQUIRY) { - if (force_inquiry_hack || scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) - request_bufflen = cmd[4] = 0x24; - else - request_bufflen = cmd[4]; - } - - /* * Now actually fill in the comamnd orb and sbp2 s/g list */ sbp2_create_command_orb(scsi_id, command, cmd, SCpnt->use_sg, @@ -2082,9 +2084,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, SBP2_DEBUG("sbp2_check_sbp2_response"); - switch (SCpnt->cmnd[0]) { - - case INQUIRY: + if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) { /* * Make sure data length is ok. Minimum length is 36 bytes */ @@ -2097,13 +2097,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id, */ scsi_buf[2] |= 2; scsi_buf[3] = (scsi_buf[3] & 0xf0) | 2; - - break; - - default: - break; } - return; } /* @@ -2114,7 +2108,6 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest { struct sbp2scsi_host_info *hi; struct scsi_id_instance_data *scsi_id = NULL, *scsi_id_tmp; - u32 id; struct scsi_cmnd *SCpnt = NULL; u32 scsi_status = SBP2_SCSI_STATUS_GOOD; struct sbp2_command_info *command; @@ -2137,12 +2130,12 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest } /* - * Find our scsi_id structure by looking at the status fifo address written to by - * the sbp2 device. + * Find our scsi_id structure by looking at the status fifo address + * written to by the sbp2 device. */ - id = SBP2_STATUS_FIFO_OFFSET_TO_ENTRY((u32)(addr - SBP2_STATUS_FIFO_ADDRESS)); list_for_each_entry(scsi_id_tmp, &hi->scsi_ids, scsi_list) { - if (scsi_id_tmp->ne->nodeid == nodeid && scsi_id_tmp->ud->id == id) { + if (scsi_id_tmp->ne->nodeid == nodeid && + scsi_id_tmp->status_fifo_addr == addr) { scsi_id = scsi_id_tmp; break; } @@ -2483,7 +2476,16 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id, static int sbp2scsi_slave_alloc(struct scsi_device *sdev) { - ((struct scsi_id_instance_data *)sdev->host->hostdata[0])->sdev = sdev; + struct scsi_id_instance_data *scsi_id = + (struct scsi_id_instance_data *)sdev->host->hostdata[0]; + + scsi_id->sdev = sdev; + + if (force_inquiry_hack || + scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) { + sdev->inquiry_len = 36; + sdev->skip_ms_page_8 = 1; + } return 0; } diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index 900ea1d25e71..e2d357a9ea3a 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h @@ -33,15 +33,17 @@ #define ORB_DIRECTION_NO_DATA_TRANSFER 0x2 #define ORB_SET_NULL_PTR(value) ((value & 0x1) << 31) -#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31) -#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */ +#define ORB_SET_NOTIFY(value) ((value & 0x1) << 31) +#define ORB_SET_RQ_FMT(value) ((value & 0x3) << 29) /* unused ? */ #define ORB_SET_NODE_ID(value) ((value & 0xffff) << 16) -#define ORB_SET_DATA_SIZE(value) (value & 0xffff) -#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16) -#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19) -#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20) -#define ORB_SET_SPEED(value) ((value & 0x7) << 24) -#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) +#define ORB_SET_STATUS_FIFO_HI(value, id) (value >> 32 | ORB_SET_NODE_ID(id)) +#define ORB_SET_STATUS_FIFO_LO(value) (value & 0xffffffff) +#define ORB_SET_DATA_SIZE(value) (value & 0xffff) +#define ORB_SET_PAGE_SIZE(value) ((value & 0x7) << 16) +#define ORB_SET_PAGE_TABLE_PRESENT(value) ((value & 0x1) << 19) +#define ORB_SET_MAX_PAYLOAD(value) ((value & 0xf) << 20) +#define ORB_SET_SPEED(value) ((value & 0x7) << 24) +#define ORB_SET_DIRECTION(value) ((value & 0x1) << 27) struct sbp2_command_orb { volatile u32 next_ORB_hi; @@ -76,8 +78,8 @@ struct sbp2_login_orb { u32 login_response_lo; u32 lun_misc; u32 passwd_resp_lengths; - u32 status_FIFO_hi; - u32 status_FIFO_lo; + u32 status_fifo_hi; + u32 status_fifo_lo; }; #define RESPONSE_GET_LOGIN_ID(value) (value & 0xffff) @@ -102,8 +104,8 @@ struct sbp2_query_logins_orb { u32 query_response_lo; u32 lun_misc; u32 reserved_resp_length; - u32 status_FIFO_hi; - u32 status_FIFO_lo; + u32 status_fifo_hi; + u32 status_fifo_lo; }; #define RESPONSE_GET_MAX_LOGINS(value) (value & 0xffff) @@ -123,8 +125,8 @@ struct sbp2_reconnect_orb { u32 reserved4; u32 login_ID_misc; u32 reserved5; - u32 status_FIFO_hi; - u32 status_FIFO_lo; + u32 status_fifo_hi; + u32 status_fifo_lo; }; struct sbp2_logout_orb { @@ -134,8 +136,8 @@ struct sbp2_logout_orb { u32 reserved4; u32 login_ID_misc; u32 reserved5; - u32 status_FIFO_hi; - u32 status_FIFO_lo; + u32 status_fifo_hi; + u32 status_fifo_lo; }; #define PAGE_TABLE_SET_SEGMENT_BASE_HI(value) (value & 0xffff) @@ -195,30 +197,6 @@ struct sbp2_status_block { * Miscellaneous SBP2 related config rom defines */ -/* The status fifo address definition below is used as a base for each - * node, which a chunk seperately assigned to each unit directory in the - * node. For example, 0xfffe00000000ULL is used for the first sbp2 device - * detected on node 0, 0xfffe00000020ULL for the next sbp2 device on node - * 0, and so on. - * - * Note: We could use a single status fifo address for all sbp2 devices, - * and figure out which sbp2 device the status belongs to by looking at - * the source node id of the status write... but, using separate addresses - * for each sbp2 unit directory allows for better code and the ability to - * support multiple luns within a single 1394 node. - * - * Also note that we choose the address range below as it is a region - * specified for write posting, where the ohci controller will - * automatically send an ack_complete when the status is written by the - * sbp2 device... saving a split transaction. =) - */ -#define SBP2_STATUS_FIFO_ADDRESS 0xfffe00000000ULL -#define SBP2_STATUS_FIFO_ADDRESS_HI 0xfffe -#define SBP2_STATUS_FIFO_ADDRESS_LO 0x0 - -#define SBP2_STATUS_FIFO_ENTRY_TO_OFFSET(entry) ((entry) << 5) -#define SBP2_STATUS_FIFO_OFFSET_TO_ENTRY(offset) ((offset) >> 5) - #define SBP2_UNIT_DIRECTORY_OFFSET_KEY 0xd1 #define SBP2_CSR_OFFSET_KEY 0x54 #define SBP2_UNIT_SPEC_ID_KEY 0x12 @@ -258,7 +236,6 @@ struct sbp2_status_block { */ #define SBP2_MAX_SG_ELEMENT_LENGTH 0xf000 -#define SBP2_MAX_UDS_PER_NODE 16 /* Maximum scsi devices per node */ #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ #define SBP2_MAX_CMDS 8 /* This should be safe */ @@ -338,6 +315,11 @@ struct scsi_id_instance_data { u32 sbp2_firmware_revision; /* + * Address for the device to write status blocks to + */ + u64 status_fifo_addr; + + /* * Variable used for logins, reconnects, logouts, query logins */ atomic_t sbp2_login_complete; diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 69c04945591f..ded2c33f5b85 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c @@ -1019,8 +1019,8 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, #define XIP_INVAL_CACHED_RANGE(map, from, size) \ INVALIDATE_CACHED_RANGE(map, from, size) -#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ - UDELAY(map, chip, adr, usec) +#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ + UDELAY(map, chip, cmd_adr, usec) /* * Extra notes: @@ -1052,7 +1052,7 @@ do { \ spin_lock(chip->mutex); \ } while (0) -#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \ +#define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ do { \ spin_unlock(chip->mutex); \ INVALIDATE_CACHED_RANGE(map, adr, len); \ @@ -1284,7 +1284,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, map_write(map, datum, adr); chip->state = mode; - INVALIDATE_CACHE_UDELAY(map, chip, + INVALIDATE_CACHE_UDELAY(map, chip, adr, adr, map_bankwidth(map), chip->word_write_time); @@ -1572,8 +1572,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, map_write(map, CMD(0xd0), cmd_adr); chip->state = FL_WRITING; - INVALIDATE_CACHE_UDELAY(map, chip, - cmd_adr, len, + INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, + adr, len, chip->buffer_write_time); timeo = jiffies + (HZ/2); @@ -1744,7 +1744,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, chip->state = FL_ERASING; chip->erase_suspended = 0; - INVALIDATE_CACHE_UDELAY(map, chip, + INVALIDATE_CACHE_UDELAY(map, chip, adr, adr, len, chip->erase_time*1000/2); diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c index fa176ffb4ad5..8936058a3cce 100644 --- a/drivers/net/irda/irda-usb.c +++ b/drivers/net/irda/irda-usb.c @@ -108,6 +108,7 @@ static void irda_usb_close(struct irda_usb_cb *self); static void speed_bulk_callback(struct urb *urb, struct pt_regs *regs); static void write_bulk_callback(struct urb *urb, struct pt_regs *regs); static void irda_usb_receive(struct urb *urb, struct pt_regs *regs); +static void irda_usb_rx_defer_expired(unsigned long data); static int irda_usb_net_open(struct net_device *dev); static int irda_usb_net_close(struct net_device *dev); static int irda_usb_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); @@ -677,6 +678,12 @@ static void irda_usb_net_timeout(struct net_device *netdev) * on the interrupt pipe and hang the Rx URB only when an interrupt is * received. * Jean II + * + * Note : don't read the above as what we are currently doing, but as + * something we could do with KC dongle. Also don't forget that the + * interrupt pipe is not part of the original standard, so this would + * need to be optional... + * Jean II */ /*------------------------------------------------------------------*/ @@ -704,10 +711,8 @@ static void irda_usb_submit(struct irda_usb_cb *self, struct sk_buff *skb, struc /* Reinitialize URB */ usb_fill_bulk_urb(urb, self->usbdev, usb_rcvbulkpipe(self->usbdev, self->bulk_in_ep), - skb->data, skb->truesize, + skb->data, IRDA_SKB_MAX_MTU, irda_usb_receive, skb); - /* Note : unlink *must* be synchronous because of the code in - * irda_usb_net_close() -> free the skb - Jean II */ urb->status = 0; /* Can be called from irda_usb_receive (irq handler) -> GFP_ATOMIC */ @@ -734,6 +739,7 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) struct irda_skb_cb *cb; struct sk_buff *newskb; struct sk_buff *dataskb; + struct urb *next_urb; int docopy; IRDA_DEBUG(2, "%s(), len=%d\n", __FUNCTION__, urb->actual_length); @@ -755,20 +761,37 @@ static void irda_usb_receive(struct urb *urb, struct pt_regs *regs) if (urb->status != 0) { switch (urb->status) { case -EILSEQ: - self->stats.rx_errors++; self->stats.rx_crc_errors++; - break; + /* Also precursor to a hot-unplug on UHCI. */ + /* Fallthrough... */ case -ECONNRESET: /* -104 */ - IRDA_DEBUG(0, "%s(), Connection Reset (-104), transfer_flags 0x%04X \n", __FUNCTION__, urb->transfer_flags); + /* Random error, if I remember correctly */ /* uhci_cleanup_unlink() is going to kill the Rx * URB just after we return. No problem, at this * point the URB will be idle ;-) - Jean II */ - break; + case -ESHUTDOWN: /* -108 */ + /* That's usually a hot-unplug. Submit will fail... */ + case -ETIMEDOUT: /* -110 */ + /* Usually precursor to a hot-unplug on OHCI. */ default: - IRDA_DEBUG(0, "%s(), RX status %d,transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); + self->stats.rx_errors++; + IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __FUNCTION__, urb->status, urb->transfer_flags); break; } - goto done; + /* If we received an error, we don't want to resubmit the + * Rx URB straight away but to give the USB layer a little + * bit of breathing room. + * We are in the USB thread context, therefore there is a + * danger of recursion (new URB we submit fails, we come + * back here). + * With recent USB stack (2.6.15+), I'm seeing that on + * hot unplug of the dongle... + * Lowest effective timer is 10ms... + * Jean II */ + self->rx_defer_timer.function = &irda_usb_rx_defer_expired; + self->rx_defer_timer.data = (unsigned long) urb; + mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000)); + return; } /* Check for empty frames */ @@ -845,13 +868,45 @@ done: * idle slot.... * Jean II */ /* Note : with this scheme, we could submit the idle URB before - * processing the Rx URB. Another time... Jean II */ + * processing the Rx URB. I don't think it would buy us anything as + * we are running in the USB thread context. Jean II */ + next_urb = self->idle_rx_urb; - /* Submit the idle URB to replace the URB we've just received */ - irda_usb_submit(self, skb, self->idle_rx_urb); /* Recycle Rx URB : Now, the idle URB is the present one */ urb->context = NULL; self->idle_rx_urb = urb; + + /* Submit the idle URB to replace the URB we've just received. + * Do it last to avoid race conditions... Jean II */ + irda_usb_submit(self, skb, next_urb); +} + +/*------------------------------------------------------------------*/ +/* + * In case of errors, we want the USB layer to have time to recover. + * Now, it is time to resubmit ouur Rx URB... + */ +static void irda_usb_rx_defer_expired(unsigned long data) +{ + struct urb *urb = (struct urb *) data; + struct sk_buff *skb = (struct sk_buff *) urb->context; + struct irda_usb_cb *self; + struct irda_skb_cb *cb; + struct urb *next_urb; + + IRDA_DEBUG(2, "%s()\n", __FUNCTION__); + + /* Find ourselves */ + cb = (struct irda_skb_cb *) skb->cb; + IRDA_ASSERT(cb != NULL, return;); + self = (struct irda_usb_cb *) cb->context; + IRDA_ASSERT(self != NULL, return;); + + /* Same stuff as when Rx is done, see above... */ + next_urb = self->idle_rx_urb; + urb->context = NULL; + self->idle_rx_urb = urb; + irda_usb_submit(self, skb, next_urb); } /*------------------------------------------------------------------*/ @@ -990,6 +1045,9 @@ static int irda_usb_net_close(struct net_device *netdev) /* Stop network Tx queue */ netif_stop_queue(netdev); + /* Kill defered Rx URB */ + del_timer(&self->rx_defer_timer); + /* Deallocate all the Rx path buffers (URBs and skb) */ for (i = 0; i < IU_MAX_RX_URBS; i++) { struct urb *urb = self->rx_urb[i]; @@ -1365,6 +1423,7 @@ static int irda_usb_probe(struct usb_interface *intf, self = net->priv; self->netdev = net; spin_lock_init(&self->lock); + init_timer(&self->rx_defer_timer); /* Create all of the needed urbs */ for (i = 0; i < IU_MAX_RX_URBS; i++) { @@ -1498,6 +1557,9 @@ static void irda_usb_disconnect(struct usb_interface *intf) * This will stop/desactivate the Tx path. - Jean II */ self->present = 0; + /* Kill defered Rx URB */ + del_timer(&self->rx_defer_timer); + /* We need to have irq enabled to unlink the URBs. That's OK, * at this point the Tx path is gone - Jean II */ spin_unlock_irqrestore(&self->lock, flags); @@ -1507,11 +1569,11 @@ static void irda_usb_disconnect(struct usb_interface *intf) /* Accept no more transmissions */ /*netif_device_detach(self->netdev);*/ netif_stop_queue(self->netdev); - /* Stop all the receive URBs */ + /* Stop all the receive URBs. Must be synchronous. */ for (i = 0; i < IU_MAX_RX_URBS; i++) usb_kill_urb(self->rx_urb[i]); /* Cancel Tx and speed URB. - * Toggle flags to make sure it's synchronous. */ + * Make sure it's synchronous to avoid races. */ usb_kill_urb(self->tx_urb); usb_kill_urb(self->speed_urb); } diff --git a/drivers/net/irda/irda-usb.h b/drivers/net/irda/irda-usb.h index bd8f66542322..4026af42dd47 100644 --- a/drivers/net/irda/irda-usb.h +++ b/drivers/net/irda/irda-usb.h @@ -136,8 +136,6 @@ struct irda_usb_cb { __u16 bulk_out_mtu; /* Max Tx packet size in bytes */ __u8 bulk_int_ep; /* Interrupt Endpoint assignments */ - wait_queue_head_t wait_q; /* for timeouts */ - struct urb *rx_urb[IU_MAX_RX_URBS]; /* URBs used to receive data frames */ struct urb *idle_rx_urb; /* Pointer to idle URB in Rx path */ struct urb *tx_urb; /* URB used to send data frames */ @@ -147,17 +145,18 @@ struct irda_usb_cb { struct net_device_stats stats; struct irlap_cb *irlap; /* The link layer we are binded to */ struct qos_info qos; - hashbin_t *tx_list; /* Queued transmit skb's */ char *speed_buff; /* Buffer for speed changes */ struct timeval stamp; struct timeval now; - spinlock_t lock; /* For serializing operations */ + spinlock_t lock; /* For serializing Tx operations */ __u16 xbofs; /* Current xbofs setting */ __s16 new_xbofs; /* xbofs we need to set */ __u32 speed; /* Current speed */ __s32 new_speed; /* speed we need to set */ + + struct timer_list rx_defer_timer; /* Wait for Rx error to clear */ }; diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 6e1018448eea..8cc0d0bbdf50 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -287,6 +287,20 @@ enum RTL8169_register_content { TxInterFrameGapShift = 24, TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + /* Config1 register p.24 */ + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + /* TBICSR p.28 */ TBIReset = 0x80000000, TBILoopback = 0x40000000, @@ -433,6 +447,7 @@ struct rtl8169_private { unsigned int (*phy_reset_pending)(void __iomem *); unsigned int (*link_ok)(void __iomem *); struct work_struct task; + unsigned wol_enabled : 1; }; MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); @@ -607,6 +622,80 @@ static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex) *duplex = p->duplex; } +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + + wol->wolopts = 0; + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + wol->supported = WAKE_ANY; + + spin_lock_irq(&tp->lock); + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + goto out_unlock; + + options = RTL_R8(Config3); + if (options & LinkUp) + wol->wolopts |= WAKE_PHY; + if (options & MagicPacket) + wol->wolopts |= WAKE_MAGIC; + + options = RTL_R8(Config5); + if (options & UWF) + wol->wolopts |= WAKE_UCAST; + if (options & BWF) + wol->wolopts |= WAKE_BCAST; + if (options & MWF) + wol->wolopts |= WAKE_MCAST; + +out_unlock: + spin_unlock_irq(&tp->lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int i; + static struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_ANY, Config1, PMEnable }, + { WAKE_PHY, Config3, LinkUp }, + { WAKE_MAGIC, Config3, MagicPacket }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake } + }; + + spin_lock_irq(&tp->lock); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + for (i = 0; i < ARRAY_SIZE(cfg); i++) { + u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wol->wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + tp->wol_enabled = (wol->wolopts) ? 1 : 0; + + spin_unlock_irq(&tp->lock); + + return 0; +} + static void rtl8169_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -1025,6 +1114,8 @@ static struct ethtool_ops rtl8169_ethtool_ops = { .get_tso = ethtool_op_get_tso, .set_tso = ethtool_op_set_tso, .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, .get_strings = rtl8169_get_strings, .get_stats_count = rtl8169_get_stats_count, .get_ethtool_stats = rtl8169_get_ethtool_stats, @@ -1442,6 +1533,11 @@ rtl8169_init_board(struct pci_dev *pdev, struct net_device **dev_out, } tp->chipset = i; + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); + RTL_W8(Cfg9346, Cfg9346_Lock); + *ioaddr_out = ioaddr; *dev_out = dev; out: @@ -1612,49 +1708,6 @@ rtl8169_remove_one(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); } -#ifdef CONFIG_PM - -static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct rtl8169_private *tp = netdev_priv(dev); - void __iomem *ioaddr = tp->mmio_addr; - unsigned long flags; - - if (!netif_running(dev)) - return 0; - - netif_device_detach(dev); - netif_stop_queue(dev); - spin_lock_irqsave(&tp->lock, flags); - - /* Disable interrupts, stop Rx and Tx */ - RTL_W16(IntrMask, 0); - RTL_W8(ChipCmd, 0); - - /* Update the error counts. */ - tp->stats.rx_missed_errors += RTL_R32(RxMissed); - RTL_W32(RxMissed, 0); - spin_unlock_irqrestore(&tp->lock, flags); - - return 0; -} - -static int rtl8169_resume(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - - if (!netif_running(dev)) - return 0; - - netif_device_attach(dev); - rtl8169_hw_start(dev); - - return 0; -} - -#endif /* CONFIG_PM */ - static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, struct net_device *dev) { @@ -2700,6 +2753,56 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev) return &tp->stats; } +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + if (!netif_running(dev)) + goto out; + + netif_device_detach(dev); + netif_stop_queue(dev); + + spin_lock_irq(&tp->lock); + + rtl8169_asic_down(ioaddr); + + tp->stats.rx_missed_errors += RTL_R32(RxMissed); + RTL_W32(RxMissed, 0); + + spin_unlock_irq(&tp->lock); + + pci_save_state(pdev); + pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); +out: + return 0; +} + +static int rtl8169_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + + if (!netif_running(dev)) + goto out; + + netif_device_attach(dev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + + rtl8169_schedule_work(dev, rtl8169_reset_task); +out: + return 0; +} + +#endif /* CONFIG_PM */ + static struct pci_driver rtl8169_pci_driver = { .name = MODULENAME, .id_table = rtl8169_pci_tbl, diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 67fb19b8fde9..25e028b7ce48 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -879,13 +879,12 @@ static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) int i; xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); - xm_read16(hw, port, XM_PHY_DATA); + *val = xm_read16(hw, port, XM_PHY_DATA); - /* Need to wait for external PHY */ for (i = 0; i < PHY_RETRIES; i++) { - udelay(1); if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) goto ready; + udelay(1); } return -ETIMEDOUT; @@ -918,7 +917,12 @@ static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) ready: xm_write16(hw, port, XM_PHY_DATA, val); - return 0; + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; } static void genesis_init(struct skge_hw *hw) @@ -1168,13 +1172,17 @@ static void genesis_mac_init(struct skge_hw *hw, int port) u32 r; const u8 zero[6] = { 0 }; - /* Clear MIB counters */ - xm_write16(hw, port, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); - /* Clear two times according to Errata #3 */ - xm_write16(hw, port, XM_STAT_CMD, - XM_SC_CLR_RXC | XM_SC_CLR_TXC); + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name); + reset_ok: /* Unreset the XMAC. */ skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); @@ -1191,7 +1199,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port) r |= GP_DIR_2|GP_IO_2; skge_write32(hw, B2_GP_IO, r); - skge_read32(hw, B2_GP_IO); + /* Enable GMII interface */ xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); @@ -1205,6 +1213,13 @@ static void genesis_mac_init(struct skge_hw *hw, int port) for (i = 1; i < 16; i++) xm_outaddr(hw, port, XM_EXM(i), zero); + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* configure Rx High Water Mark (XM_RX_HI_WM) */ xm_write16(hw, port, XM_RX_HI_WM, 1450); @@ -2170,8 +2185,10 @@ static int skge_up(struct net_device *dev) skge->tx_avail = skge->tx_ring.count - 1; /* Enable IRQ from port */ + spin_lock_irq(&hw->hw_lock); hw->intr_mask |= portirqmask[port]; skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); /* Initialize MAC */ spin_lock_bh(&hw->phy_lock); @@ -2229,8 +2246,10 @@ static int skge_down(struct net_device *dev) else yukon_stop(skge); + spin_lock_irq(&hw->hw_lock); hw->intr_mask &= ~portirqmask[skge->port]; skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); /* Stop transmitter */ skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); @@ -2678,8 +2697,7 @@ static int skge_poll(struct net_device *dev, int *budget) /* restart receiver */ wmb(); - skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), - CSR_START | CSR_IRQ_CL_F); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); *budget -= work_done; dev->quota -= work_done; @@ -2687,10 +2705,11 @@ static int skge_poll(struct net_device *dev, int *budget) if (work_done >= to_do) return 1; /* not done */ - netif_rx_complete(dev); - hw->intr_mask |= portirqmask[skge->port]; - skge_write32(hw, B0_IMSK, hw->intr_mask); - skge_read32(hw, B0_IMSK); + spin_lock_irq(&hw->hw_lock); + __netif_rx_complete(dev); + hw->intr_mask |= portirqmask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); return 0; } @@ -2850,18 +2869,10 @@ static void skge_extirq(unsigned long data) } spin_unlock(&hw->phy_lock); - local_irq_disable(); + spin_lock_irq(&hw->hw_lock); hw->intr_mask |= IS_EXT_REG; skge_write32(hw, B0_IMSK, hw->intr_mask); - local_irq_enable(); -} - -static inline void skge_wakeup(struct net_device *dev) -{ - struct skge_port *skge = netdev_priv(dev); - - prefetch(skge->rx_ring.to_clean); - netif_rx_schedule(dev); + spin_unlock_irq(&hw->hw_lock); } static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) @@ -2872,15 +2883,17 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) if (status == 0 || status == ~0) /* hotplug or shared irq */ return IRQ_NONE; - status &= hw->intr_mask; + spin_lock(&hw->hw_lock); if (status & IS_R1_F) { + skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F); hw->intr_mask &= ~IS_R1_F; - skge_wakeup(hw->dev[0]); + netif_rx_schedule(hw->dev[0]); } if (status & IS_R2_F) { + skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F); hw->intr_mask &= ~IS_R2_F; - skge_wakeup(hw->dev[1]); + netif_rx_schedule(hw->dev[1]); } if (status & IS_XA1_F) @@ -2922,6 +2935,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs) } skge_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock(&hw->hw_lock); return IRQ_HANDLED; } @@ -3290,6 +3304,7 @@ static int __devinit skge_probe(struct pci_dev *pdev, hw->pdev = pdev; spin_lock_init(&hw->phy_lock); + spin_lock_init(&hw->hw_lock); tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); diff --git a/drivers/net/skge.h b/drivers/net/skge.h index 2efdacc290e5..941f12a333b6 100644 --- a/drivers/net/skge.h +++ b/drivers/net/skge.h @@ -2402,6 +2402,7 @@ struct skge_hw { struct tasklet_struct ext_tasklet; spinlock_t phy_lock; + spinlock_t hw_lock; }; enum { diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index bfeba5b9cd7a..ca8160d68229 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -195,11 +195,11 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) pr_debug("sky2_set_power_state %d\n", state); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_PMC, &power_control); + power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_PMC); vaux = (sky2_read16(hw, B0_CTST) & Y2_VAUX_AVAIL) && (power_control & PCI_PM_CAP_PME_D3cold); - pci_read_config_word(hw->pdev, hw->pm_cap + PCI_PM_CTRL, &power_control); + power_control = sky2_pci_read16(hw, hw->pm_cap + PCI_PM_CTRL); power_control |= PCI_PM_CTRL_PME_STATUS; power_control &= ~(PCI_PM_CTRL_STATE_MASK); @@ -223,7 +223,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) sky2_write8(hw, B2_Y2_CLK_GATE, 0); /* Turn off phy power saving */ - pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); /* looks like this XL is back asswards .. */ @@ -232,18 +232,28 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) if (hw->ports > 1) reg1 |= PCI_Y2_PHY2_COMA; } - pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); + + if (hw->chip_id == CHIP_ID_YUKON_EC_U) { + sky2_pci_write32(hw, PCI_DEV_REG3, 0); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); + reg1 &= P_ASPM_CONTROL_MSK; + sky2_pci_write32(hw, PCI_DEV_REG4, reg1); + sky2_pci_write32(hw, PCI_DEV_REG5, 0); + } + + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); + break; case PCI_D3hot: case PCI_D3cold: /* Turn on phy power saving */ - pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®1); + reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) reg1 &= ~(PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); else reg1 |= (PCI_Y2_PHY1_POWD | PCI_Y2_PHY2_POWD); - pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg1); + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) sky2_write8(hw, B2_Y2_CLK_GATE, 0); @@ -265,7 +275,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) ret = -1; } - pci_write_config_byte(hw->pdev, hw->pm_cap + PCI_PM_CTRL, power_control); + sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); return ret; } @@ -463,16 +473,31 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); } - gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { + /* apply fixes in PHY AFE */ + gm_phy_write(hw, port, 22, 255); + /* increase differential signal amplitude in 10BASE-T */ + gm_phy_write(hw, port, 24, 0xaa99); + gm_phy_write(hw, port, 23, 0x2011); - if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { - /* turn on 100 Mbps LED (LED_LINK100) */ - ledover |= PHY_M_LED_MO_100(MO_LED_ON); - } + /* fix for IEEE A/B Symmetry failure in 1000BASE-T */ + gm_phy_write(hw, port, 24, 0xa204); + gm_phy_write(hw, port, 23, 0x2002); - if (ledover) - gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + /* set page register to 0 */ + gm_phy_write(hw, port, 22, 0); + } else { + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); + if (sky2->autoneg == AUTONEG_DISABLE || sky2->speed == SPEED_100) { + /* turn on 100 Mbps LED (LED_LINK100) */ + ledover |= PHY_M_LED_MO_100(MO_LED_ON); + } + + if (ledover) + gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); + + } /* Enable phy interrupt on auto-negotiation complete (or link up) */ if (sky2->autoneg == AUTONEG_ENABLE) gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); @@ -953,6 +978,12 @@ static int sky2_rx_start(struct sky2_port *sky2) sky2->rx_put = sky2->rx_next = 0; sky2_qset(hw, rxq); + + if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev >= 2) { + /* MAC Rx RAM Read is controlled by hardware */ + sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); + } + sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); rx_set_checksum(sky2); @@ -1035,9 +1066,10 @@ static int sky2_up(struct net_device *dev) RB_RST_SET); sky2_qset(hw, txqaddr[port]); - if (hw->chip_id == CHIP_ID_YUKON_EC_U) - sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); + /* Set almost empty threshold */ + if (hw->chip_id == CHIP_ID_YUKON_EC_U && hw->chip_rev == 1) + sky2_write16(hw, Q_ADDR(txqaddr[port], Q_AL), 0x1a0); sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, TX_RING_SIZE - 1); @@ -1047,8 +1079,10 @@ static int sky2_up(struct net_device *dev) goto err_out; /* Enable interrupts from phy/mac for port */ + spin_lock_irq(&hw->hw_lock); hw->intr_mask |= (port == 0) ? Y2_IS_PORT_1 : Y2_IS_PORT_2; sky2_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); return 0; err_out: @@ -1348,10 +1382,10 @@ static int sky2_down(struct net_device *dev) netif_stop_queue(dev); /* Disable port IRQ */ - local_irq_disable(); + spin_lock_irq(&hw->hw_lock); hw->intr_mask &= ~((sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); sky2_write32(hw, B0_IMSK, hw->intr_mask); - local_irq_enable(); + spin_unlock_irq(&hw->hw_lock); flush_scheduled_work(); @@ -1633,10 +1667,10 @@ static void sky2_phy_task(void *arg) out: up(&sky2->phy_sema); - local_irq_disable(); + spin_lock_irq(&hw->hw_lock); hw->intr_mask |= (sky2->port == 0) ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2; sky2_write32(hw, B0_IMSK, hw->intr_mask); - local_irq_enable(); + spin_unlock_irq(&hw->hw_lock); } @@ -1863,6 +1897,17 @@ static int sky2_poll(struct net_device *dev0, int *budget) sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); + /* + * Kick the STAT_LEV_TIMER_CTRL timer. + * This fixes my hangs on Yukon-EC (0xb6) rev 1. + * The if clause is there to start the timer only if it has been + * configured correctly and not been disabled via ethtool. + */ + if (sky2_read8(hw, STAT_LEV_TIMER_CTRL) == TIM_START) { + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_STOP); + sky2_write8(hw, STAT_LEV_TIMER_CTRL, TIM_START); + } + hwidx = sky2_read16(hw, STAT_PUT_IDX); BUG_ON(hwidx >= STATUS_RING_SIZE); rmb(); @@ -1945,16 +1990,19 @@ exit_loop: sky2_tx_check(hw, 0, tx_done[0]); sky2_tx_check(hw, 1, tx_done[1]); + if (sky2_read8(hw, STAT_TX_TIMER_CTRL) == TIM_START) { + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); + sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); + } + if (likely(work_done < to_do)) { - /* need to restart TX timer */ - if (is_ec_a1(hw)) { - sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP); - sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START); - } + spin_lock_irq(&hw->hw_lock); + __netif_rx_complete(dev0); - netif_rx_complete(dev0); hw->intr_mask |= Y2_IS_STAT_BMU; sky2_write32(hw, B0_IMSK, hw->intr_mask); + spin_unlock_irq(&hw->hw_lock); + return 0; } else { *budget -= work_done; @@ -2017,13 +2065,13 @@ static void sky2_hw_intr(struct sky2_hw *hw) if (status & (Y2_IS_MST_ERR | Y2_IS_IRQ_STAT)) { u16 pci_err; - pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); + pci_err = sky2_pci_read16(hw, PCI_STATUS); if (net_ratelimit()) printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", pci_name(hw->pdev), pci_err); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - pci_write_config_word(hw->pdev, PCI_STATUS, + sky2_pci_write16(hw, PCI_STATUS, pci_err | PCI_STATUS_ERROR_BITS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); } @@ -2032,7 +2080,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) /* PCI-Express uncorrectable Error occurred */ u32 pex_err; - pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); + pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT); if (net_ratelimit()) printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", @@ -2040,7 +2088,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) /* clear the interrupt */ sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, + sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); @@ -2086,6 +2134,7 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port) hw->intr_mask &= ~(port == 0 ? Y2_IS_IRQ_PHY1 : Y2_IS_IRQ_PHY2); sky2_write32(hw, B0_IMSK, hw->intr_mask); + schedule_work(&sky2->phy_task); } @@ -2099,6 +2148,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) if (status == 0 || status == ~0) return IRQ_NONE; + spin_lock(&hw->hw_lock); if (status & Y2_IS_HW_ERR) sky2_hw_intr(hw); @@ -2127,7 +2177,7 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs) sky2_write32(hw, B0_Y2_SP_ICR, 2); - sky2_read32(hw, B0_IMSK); + spin_unlock(&hw->hw_lock); return IRQ_HANDLED; } @@ -2170,7 +2220,7 @@ static int sky2_reset(struct sky2_hw *hw) { u16 status; u8 t8, pmd_type; - int i, err; + int i; sky2_write8(hw, B0_CTST, CS_RST_CLR); @@ -2192,25 +2242,18 @@ static int sky2_reset(struct sky2_hw *hw) sky2_write8(hw, B0_CTST, CS_RST_CLR); /* clear PCI errors, if any */ - err = pci_read_config_word(hw->pdev, PCI_STATUS, &status); - if (err) - goto pci_err; + status = sky2_pci_read16(hw, PCI_STATUS); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); - err = pci_write_config_word(hw->pdev, PCI_STATUS, - status | PCI_STATUS_ERROR_BITS); - if (err) - goto pci_err; + sky2_pci_write16(hw, PCI_STATUS, status | PCI_STATUS_ERROR_BITS); + sky2_write8(hw, B0_CTST, CS_MRST_CLR); /* clear any PEX errors */ - if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { - err = pci_write_config_dword(hw->pdev, PEX_UNC_ERR_STAT, - 0xffffffffUL); - if (err) - goto pci_err; - } + if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) + sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL); + pmd_type = sky2_read8(hw, B2_PMD_TYP); hw->copper = !(pmd_type == 'L' || pmd_type == 'S'); @@ -2309,8 +2352,7 @@ static int sky2_reset(struct sky2_hw *hw) sky2_write8(hw, STAT_FIFO_ISR_WM, 16); sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000)); - sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100)); - sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20)); + sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 7)); } /* enable status unit */ @@ -2321,14 +2363,6 @@ static int sky2_reset(struct sky2_hw *hw) sky2_write8(hw, STAT_ISR_TIMER_CTRL, TIM_START); return 0; - -pci_err: - /* This is to catch a BIOS bug workaround where - * mmconfig table doesn't have other buses. - */ - printk(KERN_ERR PFX "%s: can't access PCI config space\n", - pci_name(hw->pdev)); - return err; } static u32 sky2_supported_modes(const struct sky2_hw *hw) @@ -2852,11 +2886,11 @@ static int sky2_set_coalesce(struct net_device *dev, (ecmd->rx_coalesce_usecs_irq < tmin || ecmd->rx_coalesce_usecs_irq > tmax)) return -EINVAL; - if (ecmd->tx_max_coalesced_frames > 0xffff) + if (ecmd->tx_max_coalesced_frames >= TX_RING_SIZE-1) return -EINVAL; - if (ecmd->rx_max_coalesced_frames > 0xff) + if (ecmd->rx_max_coalesced_frames > RX_MAX_PENDING) return -EINVAL; - if (ecmd->rx_max_coalesced_frames_irq > 0xff) + if (ecmd->rx_max_coalesced_frames_irq >RX_MAX_PENDING) return -EINVAL; if (ecmd->tx_coalesce_usecs == 0) @@ -3198,17 +3232,6 @@ static int __devinit sky2_probe(struct pci_dev *pdev, } } -#ifdef __BIG_ENDIAN - /* byte swap descriptors in hardware */ - { - u32 reg; - - pci_read_config_dword(pdev, PCI_DEV_REG2, ®); - reg |= PCI_REV_DESC; - pci_write_config_dword(pdev, PCI_DEV_REG2, reg); - } -#endif - err = -ENOMEM; hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) { @@ -3226,6 +3249,18 @@ static int __devinit sky2_probe(struct pci_dev *pdev, goto err_out_free_hw; } hw->pm_cap = pm_cap; + spin_lock_init(&hw->hw_lock); + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + reg = sky2_pci_read32(hw, PCI_DEV_REG2); + reg |= PCI_REV_DESC; + sky2_pci_write32(hw, PCI_DEV_REG2, reg); + } +#endif /* ring for status responses */ hw->st_le = pci_alloc_consistent(hw->pdev, STATUS_LE_BYTES, diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index fd12c289a238..3edb98075e0a 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h @@ -5,14 +5,22 @@ #define _SKY2_H /* PCI config registers */ -#define PCI_DEV_REG1 0x40 -#define PCI_DEV_REG2 0x44 -#define PCI_DEV_STATUS 0x7c -#define PCI_OS_PCI_X (1<<26) +enum { + PCI_DEV_REG1 = 0x40, + PCI_DEV_REG2 = 0x44, + PCI_DEV_STATUS = 0x7c, + PCI_DEV_REG3 = 0x80, + PCI_DEV_REG4 = 0x84, + PCI_DEV_REG5 = 0x88, +}; -#define PEX_LNK_STAT 0xf2 -#define PEX_UNC_ERR_STAT 0x104 -#define PEX_DEV_CTRL 0xe8 +enum { + PEX_DEV_CAP = 0xe4, + PEX_DEV_CTRL = 0xe8, + PEX_DEV_STA = 0xea, + PEX_LNK_STAT = 0xf2, + PEX_UNC_ERR_STAT= 0x104, +}; /* Yukon-2 */ enum pci_dev_reg_1 { @@ -37,6 +45,25 @@ enum pci_dev_reg_2 { PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ }; +/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ +enum pci_dev_reg_4 { + /* (Link Training & Status State Machine) */ + P_TIMER_VALUE_MSK = 0xffL<<16, /* Bit 23..16: Timer Value Mask */ + /* (Active State Power Management) */ + P_FORCE_ASPM_REQUEST = 1<<15, /* Force ASPM Request (A1 only) */ + P_ASPM_GPHY_LINK_DOWN = 1<<14, /* GPHY Link Down (A1 only) */ + P_ASPM_INT_FIFO_EMPTY = 1<<13, /* Internal FIFO Empty (A1 only) */ + P_ASPM_CLKRUN_REQUEST = 1<<12, /* CLKRUN Request (A1 only) */ + + P_ASPM_FORCE_CLKREQ_ENA = 1<<4, /* Force CLKREQ Enable (A1b only) */ + P_ASPM_CLKREQ_PAD_CTL = 1<<3, /* CLKREQ PAD Control (A1 only) */ + P_ASPM_A1_MODE_SELECT = 1<<2, /* A1 Mode Select (A1 only) */ + P_CLK_GATE_PEX_UNIT_ENA = 1<<1, /* Enable Gate PEX Unit Clock */ + P_CLK_GATE_ROOT_COR_ENA = 1<<0, /* Enable Gate Root Core Clock */ + P_ASPM_CONTROL_MSK = P_FORCE_ASPM_REQUEST | P_ASPM_GPHY_LINK_DOWN + | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, +}; + #define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_SIG_SYSTEM_ERROR | \ @@ -507,6 +534,16 @@ enum { }; #define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; /* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ enum { @@ -909,10 +946,12 @@ enum { PHY_BCOM_ID1_C0 = 0x6044, PHY_BCOM_ID1_C5 = 0x6047, - PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ - PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ - PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ + PHY_MARV_ID1_FE = 0x0C83, /* Yukon-FE (PHY 88E3082 Rev.A1) */ + PHY_MARV_ID1_ECU= 0x0CB0, /* Yukon-ECU (PHY 88E1149 Rev.B2?) */ }; /* Advertisement register bits */ @@ -1837,8 +1876,9 @@ struct sky2_port { struct sky2_hw { void __iomem *regs; struct pci_dev *pdev; - u32 intr_mask; struct net_device *dev[2]; + spinlock_t hw_lock; + u32 intr_mask; int pm_cap; int msi; @@ -1912,4 +1952,25 @@ static inline void gma_set_addr(struct sky2_hw *hw, unsigned port, unsigned reg, gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); } + +/* PCI config space access */ +static inline u32 sky2_pci_read32(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read32(hw, Y2_CFG_SPC + reg); +} + +static inline u16 sky2_pci_read16(const struct sky2_hw *hw, unsigned reg) +{ + return sky2_read16(hw, Y2_CFG_SPC + reg); +} + +static inline void sky2_pci_write32(struct sky2_hw *hw, unsigned reg, u32 val) +{ + sky2_write32(hw, Y2_CFG_SPC + reg, val); +} + +static inline void sky2_pci_write16(struct sky2_hw *hw, unsigned reg, u16 val) +{ + sky2_write16(hw, Y2_CFG_SPC + reg, val); +} #endif diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c index 0702f0eeb784..44024c76d187 100644 --- a/drivers/net/wireless/ipw2200.c +++ b/drivers/net/wireless/ipw2200.c @@ -4636,9 +4636,9 @@ static void ipw_rx_notification(struct ipw_priv *priv, } default: - IPW_ERROR("Unknown notification: " - "subtype=%d,flags=0x%2x,size=%d\n", - notif->subtype, notif->flags, notif->size); + IPW_DEBUG_NOTIF("Unknown notification: " + "subtype=%d,flags=0x%2x,size=%d\n", + notif->subtype, notif->flags, notif->size); } } diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig index 6912399d0937..6f50cc9323d9 100644 --- a/drivers/s390/block/Kconfig +++ b/drivers/s390/block/Kconfig @@ -55,21 +55,13 @@ config DASD_DIAG Disks under VM. If you are not running under VM or unsure what it is, say "N". -config DASD_EER - tristate "Extended error reporting (EER)" - depends on DASD - help - This driver provides a character device interface to the - DASD extended error reporting. This is only needed if you want to - use applications written for the EER facility. - config DASD_CMB tristate "Compatibility interface for DASD channel measurement blocks" depends on DASD help - This driver provides an additional interface to the channel - measurement facility, which is normally accessed though sysfs, with - a set of ioctl functions specific to the dasd driver. + This driver provides an additional interface to the channel measurement + facility, which is normally accessed though sysfs, with a set of + ioctl functions specific to the dasd driver. This is only needed if you want to use applications written for linux-2.4 dasd channel measurement facility interface. diff --git a/drivers/s390/block/Makefile b/drivers/s390/block/Makefile index 0c0d871e8f51..58c6780134f7 100644 --- a/drivers/s390/block/Makefile +++ b/drivers/s390/block/Makefile @@ -5,7 +5,6 @@ dasd_eckd_mod-objs := dasd_eckd.o dasd_3990_erp.o dasd_9343_erp.o dasd_fba_mod-objs := dasd_fba.o dasd_3370_erp.o dasd_9336_erp.o dasd_diag_mod-objs := dasd_diag.o -dasd_eer_mod-objs := dasd_eer.o dasd_mod-objs := dasd.o dasd_ioctl.o dasd_proc.o dasd_devmap.o \ dasd_genhd.o dasd_erp.o @@ -14,6 +13,5 @@ obj-$(CONFIG_DASD_DIAG) += dasd_diag_mod.o obj-$(CONFIG_DASD_ECKD) += dasd_eckd_mod.o obj-$(CONFIG_DASD_FBA) += dasd_fba_mod.o obj-$(CONFIG_DASD_CMB) += dasd_cmb.o -obj-$(CONFIG_DASD_EER) += dasd_eer.o obj-$(CONFIG_BLK_DEV_XPRAM) += xpram.o obj-$(CONFIG_DCSSBLK) += dcssblk.o diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 08c88fcd8963..af1d5b404cee 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -18,7 +18,6 @@ #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/hdreg.h> -#include <linux/notifier.h> #include <asm/ccwdev.h> #include <asm/ebcdic.h> @@ -58,7 +57,6 @@ static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); static void dasd_flush_ccw_queue(struct dasd_device *, int); static void dasd_tasklet(struct dasd_device *); static void do_kick_device(void *data); -static void dasd_disable_eer(struct dasd_device *device); /* * SECTION: Operations on the device structure. @@ -153,10 +151,13 @@ dasd_state_new_to_known(struct dasd_device *device) static inline void dasd_state_known_to_new(struct dasd_device * device) { - /* disable extended error reporting for this device */ - dasd_disable_eer(device); /* Forget the discipline information. */ + if (device->discipline) + module_put(device->discipline->owner); device->discipline = NULL; + if (device->base_discipline) + module_put(device->base_discipline->owner); + device->base_discipline = NULL; device->state = DASD_STATE_NEW; dasd_free_queue(device); @@ -871,9 +872,6 @@ dasd_handle_state_change_pending(struct dasd_device *device) struct dasd_ccw_req *cqr; struct list_head *l, *n; - /* first of all call extended error reporting */ - dasd_write_eer_trigger(DASD_EER_STATECHANGE, device, NULL); - device->stopped &= ~DASD_STOPPED_PENDING; /* restart all 'running' IO on queue */ @@ -1093,19 +1091,6 @@ restart: } goto restart; } - - /* first of all call extended error reporting */ - if (device->eer && cqr->status == DASD_CQR_FAILED) { - dasd_write_eer_trigger(DASD_EER_FATALERROR, - device, cqr); - - /* restart request */ - cqr->status = DASD_CQR_QUEUED; - cqr->retries = 255; - device->stopped |= DASD_STOPPED_QUIESCE; - goto restart; - } - /* Process finished ERP request. */ if (cqr->refers) { __dasd_process_erp(device, cqr); @@ -1243,8 +1228,7 @@ __dasd_start_head(struct dasd_device * device) cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); /* check FAILFAST */ if (device->stopped & ~DASD_STOPPED_PENDING && - test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && - (!device->eer)) { + test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags)) { cqr->status = DASD_CQR_FAILED; dasd_schedule_bh(device); } @@ -1880,9 +1864,10 @@ dasd_generic_remove (struct ccw_device *cdev) */ int dasd_generic_set_online (struct ccw_device *cdev, - struct dasd_discipline *discipline) + struct dasd_discipline *base_discipline) { + struct dasd_discipline *discipline; struct dasd_device *device; int rc; @@ -1890,6 +1875,7 @@ dasd_generic_set_online (struct ccw_device *cdev, if (IS_ERR(device)) return PTR_ERR(device); + discipline = base_discipline; if (device->features & DASD_FEATURE_USEDIAG) { if (!dasd_diag_discipline_pointer) { printk (KERN_WARNING @@ -1901,6 +1887,16 @@ dasd_generic_set_online (struct ccw_device *cdev, } discipline = dasd_diag_discipline_pointer; } + if (!try_module_get(base_discipline->owner)) { + dasd_delete_device(device); + return -EINVAL; + } + if (!try_module_get(discipline->owner)) { + module_put(base_discipline->owner); + dasd_delete_device(device); + return -EINVAL; + } + device->base_discipline = base_discipline; device->discipline = discipline; rc = discipline->check_device(device); @@ -1909,6 +1905,8 @@ dasd_generic_set_online (struct ccw_device *cdev, "dasd_generic couldn't online device %s " "with discipline %s rc=%i\n", cdev->dev.bus_id, discipline->name, rc); + module_put(discipline->owner); + module_put(base_discipline->owner); dasd_delete_device(device); return rc; } @@ -1986,9 +1984,6 @@ dasd_generic_notify(struct ccw_device *cdev, int event) switch (event) { case CIO_GONE: case CIO_NO_PATH: - /* first of all call extended error reporting */ - dasd_write_eer_trigger(DASD_EER_NOPATH, device, NULL); - if (device->state < DASD_STATE_BASIC) break; /* Device is active. We want to keep it. */ @@ -2046,51 +2041,6 @@ dasd_generic_auto_online (struct ccw_driver *dasd_discipline_driver) put_driver(drv); } -/* - * notifications for extended error reports - */ -static struct notifier_block *dasd_eer_chain; - -int -dasd_register_eer_notifier(struct notifier_block *nb) -{ - return notifier_chain_register(&dasd_eer_chain, nb); -} - -int -dasd_unregister_eer_notifier(struct notifier_block *nb) -{ - return notifier_chain_unregister(&dasd_eer_chain, nb); -} - -/* - * Notify the registered error reporting module of a problem - */ -void -dasd_write_eer_trigger(unsigned int id, struct dasd_device *device, - struct dasd_ccw_req *cqr) -{ - if (device->eer) { - struct dasd_eer_trigger temp; - temp.id = id; - temp.device = device; - temp.cqr = cqr; - notifier_call_chain(&dasd_eer_chain, DASD_EER_TRIGGER, - (void *)&temp); - } -} - -/* - * Tell the registered error reporting module to disable error reporting for - * a given device and to cleanup any private data structures on that device. - */ -static void -dasd_disable_eer(struct dasd_device *device) -{ - notifier_call_chain(&dasd_eer_chain, DASD_EER_DISABLE, (void *)device); -} - - static int __init dasd_init(void) { @@ -2172,11 +2122,6 @@ EXPORT_SYMBOL_GPL(dasd_generic_set_online); EXPORT_SYMBOL_GPL(dasd_generic_set_offline); EXPORT_SYMBOL_GPL(dasd_generic_auto_online); -EXPORT_SYMBOL(dasd_register_eer_notifier); -EXPORT_SYMBOL(dasd_unregister_eer_notifier); -EXPORT_SYMBOL(dasd_write_eer_trigger); - - /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index c811380b9079..4ee0f934e325 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c @@ -1108,9 +1108,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense) case 0x0B: DEV_MESSAGE(KERN_WARNING, device, "%s", "FORMAT F - Volume is suspended duplex"); - /* call extended error reporting (EER) */ - dasd_write_eer_trigger(DASD_EER_PPRCSUSPEND, device, - erp->refers); break; case 0x0C: DEV_MESSAGE(KERN_WARNING, device, "%s", diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h index e15dd7978050..bc3823d35223 100644 --- a/drivers/s390/block/dasd_eckd.h +++ b/drivers/s390/block/dasd_eckd.h @@ -29,7 +29,6 @@ #define DASD_ECKD_CCW_PSF 0x27 #define DASD_ECKD_CCW_RSSD 0x3e #define DASD_ECKD_CCW_LOCATE_RECORD 0x47 -#define DASD_ECKD_CCW_SNSS 0x54 #define DASD_ECKD_CCW_DEFINE_EXTENT 0x63 #define DASD_ECKD_CCW_WRITE_MT 0x85 #define DASD_ECKD_CCW_READ_MT 0x86 diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c deleted file mode 100644 index f70cd7716b24..000000000000 --- a/drivers/s390/block/dasd_eer.c +++ /dev/null @@ -1,1090 +0,0 @@ -/* - * character device driver for extended error reporting - * - * - * Copyright (C) 2005 IBM Corporation - * extended error reporting for DASD ECKD devices - * Author(s): Stefan Weinhuber <wein@de.ibm.com> - * - */ - -#include <linux/init.h> -#include <linux/fs.h> -#include <linux/kernel.h> -#include <linux/miscdevice.h> -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/device.h> -#include <linux/workqueue.h> -#include <linux/poll.h> -#include <linux/notifier.h> - -#include <asm/uaccess.h> -#include <asm/semaphore.h> -#include <asm/atomic.h> -#include <asm/ebcdic.h> - -#include "dasd_int.h" -#include "dasd_eckd.h" - - -MODULE_LICENSE("GPL"); - -MODULE_AUTHOR("Stefan Weinhuber <wein@de.ibm.com>"); -MODULE_DESCRIPTION("DASD extended error reporting module"); - - -#ifdef PRINTK_HEADER -#undef PRINTK_HEADER -#endif /* PRINTK_HEADER */ -#define PRINTK_HEADER "dasd(eer):" - - - - - -/*****************************************************************************/ -/* the internal buffer */ -/*****************************************************************************/ - -/* - * The internal buffer is meant to store obaque blobs of data, so it doesn't - * know of higher level concepts like triggers. - * It consists of a number of pages that are used as a ringbuffer. Each data - * blob is stored in a simple record that consists of an integer, which - * contains the size of the following data, and the data bytes themselfes. - * - * To allow for multiple independent readers we create one internal buffer - * each time the device is opened and destroy the buffer when the file is - * closed again. - * - * One record can be written to a buffer by using the functions - * - dasd_eer_start_record (one time per record to write the size to the buffer - * and reserve the space for the data) - * - dasd_eer_write_buffer (one or more times per record to write the data) - * The data can be written in several steps but you will have to compute - * the total size up front for the invocation of dasd_eer_start_record. - * If the ringbuffer is full, dasd_eer_start_record will remove the required - * number of old records. - * - * A record is typically read in two steps, first read the integer that - * specifies the size of the following data, then read the data. - * Both can be done by - * - dasd_eer_read_buffer - * - * For all mentioned functions you need to get the bufferlock first and keep it - * until a complete record is written or read. - */ - - -/* - * Alle information necessary to keep track of an internal buffer is kept in - * a struct eerbuffer. The buffer specific to a file pointer is strored in - * the private_data field of that file. To be able to write data to all - * existing buffers, each buffer is also added to the bufferlist. - * If the user doesn't want to read a complete record in one go, we have to - * keep track of the rest of the record. residual stores the number of bytes - * that are still to deliver. If the rest of the record is invalidated between - * two reads then residual will be set to -1 so that the next read will fail. - * All entries in the eerbuffer structure are protected with the bufferlock. - * To avoid races between writing to a buffer on the one side and creating - * and destroying buffers on the other side, the bufferlock must also be used - * to protect the bufferlist. - */ - -struct eerbuffer { - struct list_head list; - char **buffer; - int buffersize; - int buffer_page_count; - int head; - int tail; - int residual; -}; - -LIST_HEAD(bufferlist); - -static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED; - -DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue); - -/* - * How many free bytes are available on the buffer. - * needs to be called with bufferlock held - */ -static int -dasd_eer_get_free_bytes(struct eerbuffer *eerb) -{ - if (eerb->head < eerb->tail) { - return eerb->tail - eerb->head - 1; - } else - return eerb->buffersize - eerb->head + eerb->tail -1; -} - -/* - * How many bytes of buffer space are used. - * needs to be called with bufferlock held - */ -static int -dasd_eer_get_filled_bytes(struct eerbuffer *eerb) -{ - - if (eerb->head >= eerb->tail) { - return eerb->head - eerb->tail; - } else - return eerb->buffersize - eerb->tail + eerb->head; -} - -/* - * The dasd_eer_write_buffer function just copies count bytes of data - * to the buffer. Make sure to call dasd_eer_start_record first, to - * make sure that enough free space is available. - * needs to be called with bufferlock held - */ -static void -dasd_eer_write_buffer(struct eerbuffer *eerb, int count, char *data) -{ - - unsigned long headindex,localhead; - unsigned long rest, len; - char *nextdata; - - nextdata = data; - rest = count; - while (rest > 0) { - headindex = eerb->head / PAGE_SIZE; - localhead = eerb->head % PAGE_SIZE; - len = min(rest, (PAGE_SIZE - localhead)); - memcpy(eerb->buffer[headindex]+localhead, nextdata, len); - nextdata += len; - rest -= len; - eerb->head += len; - if ( eerb->head == eerb->buffersize ) - eerb->head = 0; /* wrap around */ - if (eerb->head > eerb->buffersize) { - MESSAGE(KERN_ERR, "%s", "runaway buffer head."); - BUG(); - } - } -} - -/* - * needs to be called with bufferlock held - */ -static int -dasd_eer_read_buffer(struct eerbuffer *eerb, int count, char *data) -{ - - unsigned long tailindex,localtail; - unsigned long rest, len, finalcount; - char *nextdata; - - finalcount = min(count, dasd_eer_get_filled_bytes(eerb)); - nextdata = data; - rest = finalcount; - while (rest > 0) { - tailindex = eerb->tail / PAGE_SIZE; - localtail = eerb->tail % PAGE_SIZE; - len = min(rest, (PAGE_SIZE - localtail)); - memcpy(nextdata, eerb->buffer[tailindex]+localtail, len); - nextdata += len; - rest -= len; - eerb->tail += len; - if ( eerb->tail == eerb->buffersize ) - eerb->tail = 0; /* wrap around */ - if (eerb->tail > eerb->buffersize) { - MESSAGE(KERN_ERR, "%s", "runaway buffer tail."); - BUG(); - } - } - return finalcount; -} - -/* - * Whenever you want to write a blob of data to the internal buffer you - * have to start by using this function first. It will write the number - * of bytes that will be written to the buffer. If necessary it will remove - * old records to make room for the new one. - * needs to be called with bufferlock held - */ -static int -dasd_eer_start_record(struct eerbuffer *eerb, int count) -{ - int tailcount; - if (count + sizeof(count) > eerb->buffersize) - return -ENOMEM; - while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) { - if (eerb->residual > 0) { - eerb->tail += eerb->residual; - if (eerb->tail >= eerb->buffersize) - eerb->tail -= eerb->buffersize; - eerb->residual = -1; - } - dasd_eer_read_buffer(eerb, sizeof(tailcount), - (char*)(&tailcount)); - eerb->tail += tailcount; - if (eerb->tail >= eerb->buffersize) - eerb->tail -= eerb->buffersize; - } - dasd_eer_write_buffer(eerb, sizeof(count), (char*)(&count)); - - return 0; -}; - -/* - * release pages that are not used anymore - */ -static void -dasd_eer_free_buffer_pages(char **buf, int no_pages) -{ - int i; - - for (i = 0; i < no_pages; ++i) { - free_page((unsigned long)buf[i]); - } -} - -/* - * allocate a new set of memory pages - */ -static int -dasd_eer_allocate_buffer_pages(char **buf, int no_pages) -{ - int i; - - for (i = 0; i < no_pages; ++i) { - buf[i] = (char *) get_zeroed_page(GFP_KERNEL); - if (!buf[i]) { - dasd_eer_free_buffer_pages(buf, i); - return -ENOMEM; - } - } - return 0; -} - -/* - * empty the buffer by resetting head and tail - * In case there is a half read data blob in the buffer, we set residual - * to -1 to indicate that the remainder of the blob is lost. - */ -static void -dasd_eer_purge_buffer(struct eerbuffer *eerb) -{ - unsigned long flags; - - spin_lock_irqsave(&bufferlock, flags); - if (eerb->residual > 0) - eerb->residual = -1; - eerb->tail=0; - eerb->head=0; - spin_unlock_irqrestore(&bufferlock, flags); -} - -/* - * set the size of the buffer, newsize is the new number of pages to be used - * we don't try to copy any data back an forth, so any resize will also purge - * the buffer - */ -static int -dasd_eer_resize_buffer(struct eerbuffer *eerb, int newsize) -{ - int i, oldcount, reuse; - char **new; - char **old; - unsigned long flags; - - if (newsize < 1) - return -EINVAL; - if (eerb->buffer_page_count == newsize) { - /* documented behaviour is that any successfull invocation - * will purge all records */ - dasd_eer_purge_buffer(eerb); - return 0; - } - new = kmalloc(newsize*sizeof(char*), GFP_KERNEL); - if (!new) - return -ENOMEM; - - reuse=min(eerb->buffer_page_count, newsize); - for (i = 0; i < reuse; ++i) { - new[i] = eerb->buffer[i]; - } - if (eerb->buffer_page_count < newsize) { - if (dasd_eer_allocate_buffer_pages( - &new[eerb->buffer_page_count], - newsize - eerb->buffer_page_count)) { - kfree(new); - return -ENOMEM; - } - } - - spin_lock_irqsave(&bufferlock, flags); - old = eerb->buffer; - eerb->buffer = new; - if (eerb->residual > 0) - eerb->residual = -1; - eerb->tail = 0; - eerb->head = 0; - oldcount = eerb->buffer_page_count; - eerb->buffer_page_count = newsize; - spin_unlock_irqrestore(&bufferlock, flags); - - if (oldcount > newsize) { - for (i = newsize; i < oldcount; ++i) { - free_page((unsigned long)old[i]); - } - } - kfree(old); - - return 0; -} - - -/*****************************************************************************/ -/* The extended error reporting functionality */ -/*****************************************************************************/ - -/* - * When a DASD device driver wants to report an error, it calls the - * function dasd_eer_write_trigger (via a notifier mechanism) and gives the - * respective trigger ID as parameter. - * Currently there are four kinds of triggers: - * - * DASD_EER_FATALERROR: all kinds of unrecoverable I/O problems - * DASD_EER_PPRCSUSPEND: PPRC was suspended - * DASD_EER_NOPATH: There is no path to the device left. - * DASD_EER_STATECHANGE: The state of the device has changed. - * - * For the first three triggers all required information can be supplied by - * the caller. For these triggers a record is written by the function - * dasd_eer_write_standard_trigger. - * - * When dasd_eer_write_trigger is called to write a DASD_EER_STATECHANGE - * trigger, we have to gather the necessary sense data first. We cannot queue - * the necessary SNSS (sense subsystem status) request immediatly, since we - * are likely to run in a deadlock situation. Instead, we schedule a - * work_struct that calls the function dasd_eer_sense_subsystem_status to - * create and start an SNSS request asynchronously. - * - * To avoid memory allocations at runtime, the necessary memory is allocated - * when the extended error reporting is enabled for a device (by - * dasd_eer_probe). There is one private eer data structure for each eer - * enabled DASD device. It contains memory for the work_struct, one SNSS cqr - * and a flags field that is used to coordinate the use of the cqr. The call - * to write a state change trigger can come in at any time, so we have one flag - * CQR_IN_USE that protects the cqr itself. When this flag indicates that the - * cqr is currently in use, dasd_eer_sense_subsystem_status cannot start a - * second request but sets the SNSS_REQUESTED flag instead. - * - * When the request is finished, the callback function dasd_eer_SNSS_cb - * is called. This function will invoke the function - * dasd_eer_write_SNSS_trigger to finally write the trigger. It will also - * check the SNSS_REQUESTED flag and if it is set it will call - * dasd_eer_sense_subsystem_status again. - * - * To avoid race conditions during the handling of the lock, the flags must - * be protected by the snsslock. - */ - -struct dasd_eer_private { - struct dasd_ccw_req *cqr; - unsigned long flags; - struct work_struct worker; -}; - -static void dasd_eer_destroy(struct dasd_device *device, - struct dasd_eer_private *eer); -static int -dasd_eer_write_trigger(struct dasd_eer_trigger *trigger); -static void dasd_eer_sense_subsystem_status(void *data); -static int dasd_eer_notify(struct notifier_block *self, - unsigned long action, void *data); - -struct workqueue_struct *dasd_eer_workqueue; - -#define SNSS_DATA_SIZE 44 -static spinlock_t snsslock = SPIN_LOCK_UNLOCKED; - -#define DASD_EER_BUSID_SIZE 10 -struct dasd_eer_header { - __u32 total_size; - __u32 trigger; - __u64 tv_sec; - __u64 tv_usec; - char busid[DASD_EER_BUSID_SIZE]; -} __attribute__ ((packed)); - -static struct notifier_block dasd_eer_nb = { - .notifier_call = dasd_eer_notify, -}; - -/* - * flags for use with dasd_eer_private - */ -#define CQR_IN_USE 0 -#define SNSS_REQUESTED 1 - -/* - * This function checks if extended error reporting is available for a given - * dasd_device. If yes, then it creates and returns a struct dasd_eer, - * otherwise it returns an -EPERM error pointer. - */ -struct dasd_eer_private * -dasd_eer_probe(struct dasd_device *device) -{ - struct dasd_eer_private *private; - - if (!(device && device->discipline - && !strcmp(device->discipline->name, "ECKD"))) { - return ERR_PTR(-EPERM); - } - /* allocate the private data structure */ - private = (struct dasd_eer_private *)kmalloc( - sizeof(struct dasd_eer_private), GFP_KERNEL); - if (!private) { - return ERR_PTR(-ENOMEM); - } - INIT_WORK(&private->worker, dasd_eer_sense_subsystem_status, - (void *)device); - private->cqr = dasd_kmalloc_request("ECKD", - 1 /* SNSS */ , - SNSS_DATA_SIZE , - device); - if (!private->cqr) { - kfree(private); - return ERR_PTR(-ENOMEM); - } - private->flags = 0; - return private; -}; - -/* - * If our private SNSS request is queued, remove it from the - * dasd ccw queue so we can free the requests memory. - */ -static void -dasd_eer_dequeue_SNSS_request(struct dasd_device *device, - struct dasd_eer_private *eer) -{ - struct list_head *lst, *nxt; - struct dasd_ccw_req *cqr, *erpcqr; - dasd_erp_fn_t erp_fn; - - spin_lock_irq(get_ccwdev_lock(device->cdev)); - list_for_each_safe(lst, nxt, &device->ccw_queue) { - cqr = list_entry(lst, struct dasd_ccw_req, list); - /* we are looking for two kinds or requests */ - /* first kind: our SNSS request: */ - if (cqr == eer->cqr) { - if (cqr->status == DASD_CQR_IN_IO) - device->discipline->term_IO(cqr); - list_del(&cqr->list); - break; - } - /* second kind: ERP requests for our SNSS request */ - if (cqr->refers) { - /* If this erp request chain ends in our cqr, then */ - /* cal the erp_postaction to clean it up */ - erpcqr = cqr; - while (erpcqr->refers) { - erpcqr = erpcqr->refers; - } - if (erpcqr == eer->cqr) { - erp_fn = device->discipline->erp_postaction( - cqr); - erp_fn(cqr); - } - continue; - } - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); -} - -/* - * This function dismantles a struct dasd_eer that was created by - * dasd_eer_probe. Since we want to free our private data structure, - * we must make sure that the memory is not in use anymore. - * We have to flush the work queue and remove a possible SNSS request - * from the dasd queue. - */ -static void -dasd_eer_destroy(struct dasd_device *device, struct dasd_eer_private *eer) -{ - flush_workqueue(dasd_eer_workqueue); - dasd_eer_dequeue_SNSS_request(device, eer); - dasd_kfree_request(eer->cqr, device); - kfree(eer); -}; - -/* - * enable the extended error reporting for a particular device - */ -static int -dasd_eer_enable_on_device(struct dasd_device *device) -{ - void *eer; - if (!device) - return -ENODEV; - if (device->eer) - return 0; - if (!try_module_get(THIS_MODULE)) { - return -EINVAL; - } - eer = (void *)dasd_eer_probe(device); - if (IS_ERR(eer)) { - module_put(THIS_MODULE); - return PTR_ERR(eer); - } - device->eer = eer; - return 0; -} - -/* - * enable the extended error reporting for a particular device - */ -static int -dasd_eer_disable_on_device(struct dasd_device *device) -{ - struct dasd_eer_private *eer = device->eer; - - if (!device) - return -ENODEV; - if (!device->eer) - return 0; - device->eer = NULL; - dasd_eer_destroy(device,eer); - module_put(THIS_MODULE); - - return 0; -} - -/* - * Set extended error reporting (eer) - * Note: This will be registered as a DASD ioctl, to be called on DASD devices. - */ -static int -dasd_ioctl_set_eer(struct block_device *bdev, int no, long args) -{ - struct dasd_device *device; - int intval; - - if (!capable(CAP_SYS_ADMIN)) - return -EACCES; - if (bdev != bdev->bd_contains) - /* Error-reporting is not allowed for partitions */ - return -EINVAL; - if (get_user(intval, (int __user *) args)) - return -EFAULT; - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - - intval = (intval != 0); - DEV_MESSAGE (KERN_DEBUG, device, - "set eer on device to %d", intval); - if (intval) - return dasd_eer_enable_on_device(device); - else - return dasd_eer_disable_on_device(device); -} - -/* - * Get value of extended error reporting. - * Note: This will be registered as a DASD ioctl, to be called on DASD devices. - */ -static int -dasd_ioctl_get_eer(struct block_device *bdev, int no, long args) -{ - struct dasd_device *device; - - device = bdev->bd_disk->private_data; - if (device == NULL) - return -ENODEV; - return put_user((device->eer != NULL), (int __user *) args); -} - -/* - * The following function can be used for those triggers that have - * all necessary data available when the function is called. - * If the parameter cqr is not NULL, the chain of requests will be searched - * for valid sense data, and all valid sense data sets will be added to - * the triggers data. - */ -static int -dasd_eer_write_standard_trigger(int trigger, struct dasd_device *device, - struct dasd_ccw_req *cqr) -{ - struct dasd_ccw_req *temp_cqr; - int data_size; - struct timeval tv; - struct dasd_eer_header header; - unsigned long flags; - struct eerbuffer *eerb; - - /* go through cqr chain and count the valid sense data sets */ - temp_cqr = cqr; - data_size = 0; - while (temp_cqr) { - if (temp_cqr->irb.esw.esw0.erw.cons) - data_size += 32; - temp_cqr = temp_cqr->refers; - } - - header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ - header.trigger = trigger; - do_gettimeofday(&tv); - header.tv_sec = tv.tv_sec; - header.tv_usec = tv.tv_usec; - strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); - - spin_lock_irqsave(&bufferlock, flags); - list_for_each_entry(eerb, &bufferlist, list) { - dasd_eer_start_record(eerb, header.total_size); - dasd_eer_write_buffer(eerb, sizeof(header), (char*)(&header)); - temp_cqr = cqr; - while (temp_cqr) { - if (temp_cqr->irb.esw.esw0.erw.cons) - dasd_eer_write_buffer(eerb, 32, cqr->irb.ecw); - temp_cqr = temp_cqr->refers; - } - dasd_eer_write_buffer(eerb, 4,"EOR"); - } - spin_unlock_irqrestore(&bufferlock, flags); - - wake_up_interruptible(&dasd_eer_read_wait_queue); - - return 0; -} - -/* - * This function writes a DASD_EER_STATECHANGE trigger. - */ -static void -dasd_eer_write_SNSS_trigger(struct dasd_device *device, - struct dasd_ccw_req *cqr) -{ - int data_size; - int snss_rc; - struct timeval tv; - struct dasd_eer_header header; - unsigned long flags; - struct eerbuffer *eerb; - - snss_rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; - if (snss_rc) - data_size = 0; - else - data_size = SNSS_DATA_SIZE; - - header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ - header.trigger = DASD_EER_STATECHANGE; - do_gettimeofday(&tv); - header.tv_sec = tv.tv_sec; - header.tv_usec = tv.tv_usec; - strncpy(header.busid, device->cdev->dev.bus_id, DASD_EER_BUSID_SIZE); - - spin_lock_irqsave(&bufferlock, flags); - list_for_each_entry(eerb, &bufferlist, list) { - dasd_eer_start_record(eerb, header.total_size); - dasd_eer_write_buffer(eerb, sizeof(header),(char*)(&header)); - if (!snss_rc) - dasd_eer_write_buffer(eerb, SNSS_DATA_SIZE, cqr->data); - dasd_eer_write_buffer(eerb, 4,"EOR"); - } - spin_unlock_irqrestore(&bufferlock, flags); - - wake_up_interruptible(&dasd_eer_read_wait_queue); -} - -/* - * callback function for use with SNSS request - */ -static void -dasd_eer_SNSS_cb(struct dasd_ccw_req *cqr, void *data) -{ - struct dasd_device *device; - struct dasd_eer_private *private; - unsigned long irqflags; - - device = (struct dasd_device *)data; - private = (struct dasd_eer_private *)device->eer; - dasd_eer_write_SNSS_trigger(device, cqr); - spin_lock_irqsave(&snsslock, irqflags); - if(!test_and_clear_bit(SNSS_REQUESTED, &private->flags)) { - clear_bit(CQR_IN_USE, &private->flags); - spin_unlock_irqrestore(&snsslock, irqflags); - return; - }; - clear_bit(CQR_IN_USE, &private->flags); - spin_unlock_irqrestore(&snsslock, irqflags); - dasd_eer_sense_subsystem_status(device); - return; -} - -/* - * clean a used cqr before using it again - */ -static void -dasd_eer_clean_SNSS_request(struct dasd_ccw_req *cqr) -{ - struct ccw1 *cpaddr = cqr->cpaddr; - void *data = cqr->data; - - memset(cqr, 0, sizeof(struct dasd_ccw_req)); - memset(cpaddr, 0, sizeof(struct ccw1)); - memset(data, 0, SNSS_DATA_SIZE); - cqr->cpaddr = cpaddr; - cqr->data = data; - strncpy((char *) &cqr->magic, "ECKD", 4); - ASCEBC((char *) &cqr->magic, 4); - set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); -} - -/* - * build and start an SNSS request - * This function is called from a work queue so we have to - * pass the dasd_device pointer as a void pointer. - */ -static void -dasd_eer_sense_subsystem_status(void *data) -{ - struct dasd_device *device; - struct dasd_eer_private *private; - struct dasd_ccw_req *cqr; - struct ccw1 *ccw; - unsigned long irqflags; - - device = (struct dasd_device *)data; - private = (struct dasd_eer_private *)device->eer; - if (!private) /* device not eer enabled any more */ - return; - cqr = private->cqr; - spin_lock_irqsave(&snsslock, irqflags); - if(test_and_set_bit(CQR_IN_USE, &private->flags)) { - set_bit(SNSS_REQUESTED, &private->flags); - spin_unlock_irqrestore(&snsslock, irqflags); - return; - }; - spin_unlock_irqrestore(&snsslock, irqflags); - dasd_eer_clean_SNSS_request(cqr); - cqr->device = device; - cqr->retries = 255; - cqr->expires = 10 * HZ; - - ccw = cqr->cpaddr; - ccw->cmd_code = DASD_ECKD_CCW_SNSS; - ccw->count = SNSS_DATA_SIZE; - ccw->flags = 0; - ccw->cda = (__u32)(addr_t)cqr->data; - - cqr->buildclk = get_clock(); - cqr->status = DASD_CQR_FILLED; - cqr->callback = dasd_eer_SNSS_cb; - cqr->callback_data = (void *)device; - dasd_add_request_head(cqr); - - return; -} - -/* - * This function is called for all triggers. It calls the appropriate - * function that writes the actual trigger records. - */ -static int -dasd_eer_write_trigger(struct dasd_eer_trigger *trigger) -{ - int rc; - struct dasd_eer_private *private = trigger->device->eer; - - switch (trigger->id) { - case DASD_EER_FATALERROR: - case DASD_EER_PPRCSUSPEND: - rc = dasd_eer_write_standard_trigger( - trigger->id, trigger->device, trigger->cqr); - break; - case DASD_EER_NOPATH: - rc = dasd_eer_write_standard_trigger( - trigger->id, trigger->device, NULL); - break; - case DASD_EER_STATECHANGE: - if (queue_work(dasd_eer_workqueue, &private->worker)) { - rc=0; - } else { - /* If the work_struct was already queued, it can't - * be queued again. But this is OK since we don't - * need to have it queued twice. - */ - rc = -EBUSY; - } - break; - default: /* unknown trigger, so we write it without any sense data */ - rc = dasd_eer_write_standard_trigger( - trigger->id, trigger->device, NULL); - break; - } - return rc; -} - -/* - * This function is registered with the dasd device driver and gets called - * for all dasd eer notifications. - */ -static int dasd_eer_notify(struct notifier_block *self, - unsigned long action, void *data) -{ - switch (action) { - case DASD_EER_DISABLE: - dasd_eer_disable_on_device((struct dasd_device *)data); - break; - case DASD_EER_TRIGGER: - dasd_eer_write_trigger((struct dasd_eer_trigger *)data); - break; - } - return NOTIFY_OK; -} - - -/*****************************************************************************/ -/* the device operations */ -/*****************************************************************************/ - -/* - * On the one side we need a lock to access our internal buffer, on the - * other side a copy_to_user can sleep. So we need to copy the data we have - * to transfer in a readbuffer, which is protected by the readbuffer_mutex. - */ -static char readbuffer[PAGE_SIZE]; -DECLARE_MUTEX(readbuffer_mutex); - - -static int -dasd_eer_open(struct inode *inp, struct file *filp) -{ - struct eerbuffer *eerb; - unsigned long flags; - - eerb = kmalloc(sizeof(struct eerbuffer), GFP_KERNEL); - eerb->head = 0; - eerb->tail = 0; - eerb->residual = 0; - eerb->buffer_page_count = 1; - eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE; - eerb->buffer = kmalloc(eerb->buffer_page_count*sizeof(char*), - GFP_KERNEL); - if (!eerb->buffer) - return -ENOMEM; - if (dasd_eer_allocate_buffer_pages(eerb->buffer, - eerb->buffer_page_count)) { - kfree(eerb->buffer); - return -ENOMEM; - } - filp->private_data = eerb; - spin_lock_irqsave(&bufferlock, flags); - list_add(&eerb->list, &bufferlist); - spin_unlock_irqrestore(&bufferlock, flags); - - return nonseekable_open(inp,filp); -} - -static int -dasd_eer_close(struct inode *inp, struct file *filp) -{ - struct eerbuffer *eerb; - unsigned long flags; - - eerb = (struct eerbuffer *)filp->private_data; - spin_lock_irqsave(&bufferlock, flags); - list_del(&eerb->list); - spin_unlock_irqrestore(&bufferlock, flags); - dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count); - kfree(eerb->buffer); - kfree(eerb); - - return 0; -} - -static long -dasd_eer_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) -{ - int intval; - struct eerbuffer *eerb; - - eerb = (struct eerbuffer *)filp->private_data; - switch (cmd) { - case DASD_EER_PURGE: - dasd_eer_purge_buffer(eerb); - return 0; - case DASD_EER_SETBUFSIZE: - if (get_user(intval, (int __user *)arg)) - return -EFAULT; - return dasd_eer_resize_buffer(eerb, intval); - default: - return -ENOIOCTLCMD; - } -} - -static ssize_t -dasd_eer_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) -{ - int tc,rc; - int tailcount,effective_count; - unsigned long flags; - struct eerbuffer *eerb; - - eerb = (struct eerbuffer *)filp->private_data; - if(down_interruptible(&readbuffer_mutex)) - return -ERESTARTSYS; - - spin_lock_irqsave(&bufferlock, flags); - - if (eerb->residual < 0) { /* the remainder of this record */ - /* has been deleted */ - eerb->residual = 0; - spin_unlock_irqrestore(&bufferlock, flags); - up(&readbuffer_mutex); - return -EIO; - } else if (eerb->residual > 0) { - /* OK we still have a second half of a record to deliver */ - effective_count = min(eerb->residual, (int)count); - eerb->residual -= effective_count; - } else { - tc = 0; - while (!tc) { - tc = dasd_eer_read_buffer(eerb, - sizeof(tailcount), (char*)(&tailcount)); - if (!tc) { - /* no data available */ - spin_unlock_irqrestore(&bufferlock, flags); - up(&readbuffer_mutex); - if (filp->f_flags & O_NONBLOCK) - return -EAGAIN; - rc = wait_event_interruptible( - dasd_eer_read_wait_queue, - eerb->head != eerb->tail); - if (rc) { - return rc; - } - if(down_interruptible(&readbuffer_mutex)) - return -ERESTARTSYS; - spin_lock_irqsave(&bufferlock, flags); - } - } - WARN_ON(tc != sizeof(tailcount)); - effective_count = min(tailcount,(int)count); - eerb->residual = tailcount - effective_count; - } - - tc = dasd_eer_read_buffer(eerb, effective_count, readbuffer); - WARN_ON(tc != effective_count); - - spin_unlock_irqrestore(&bufferlock, flags); - - if (copy_to_user(buf, readbuffer, effective_count)) { - up(&readbuffer_mutex); - return -EFAULT; - } - - up(&readbuffer_mutex); - return effective_count; -} - -static unsigned int -dasd_eer_poll (struct file *filp, poll_table *ptable) -{ - unsigned int mask; - unsigned long flags; - struct eerbuffer *eerb; - - eerb = (struct eerbuffer *)filp->private_data; - poll_wait(filp, &dasd_eer_read_wait_queue, ptable); - spin_lock_irqsave(&bufferlock, flags); - if (eerb->head != eerb->tail) - mask = POLLIN | POLLRDNORM ; - else - mask = 0; - spin_unlock_irqrestore(&bufferlock, flags); - return mask; -} - -static struct file_operations dasd_eer_fops = { - .open = &dasd_eer_open, - .release = &dasd_eer_close, - .unlocked_ioctl = &dasd_eer_ioctl, - .compat_ioctl = &dasd_eer_ioctl, - .read = &dasd_eer_read, - .poll = &dasd_eer_poll, - .owner = THIS_MODULE, -}; - -static struct miscdevice dasd_eer_dev = { - .minor = MISC_DYNAMIC_MINOR, - .name = "dasd_eer", - .fops = &dasd_eer_fops, -}; - - -/*****************************************************************************/ -/* Init and exit */ -/*****************************************************************************/ - -static int -__init dasd_eer_init(void) -{ - int rc; - - dasd_eer_workqueue = create_singlethread_workqueue("dasd_eer"); - if (!dasd_eer_workqueue) { - MESSAGE(KERN_ERR , "%s", "dasd_eer_init could not " - "create workqueue \n"); - rc = -ENOMEM; - goto out; - } - - rc = dasd_register_eer_notifier(&dasd_eer_nb); - if (rc) { - MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " - "register error reporting"); - goto queue; - } - - dasd_ioctl_no_register(THIS_MODULE, BIODASDEERSET, dasd_ioctl_set_eer); - dasd_ioctl_no_register(THIS_MODULE, BIODASDEERGET, dasd_ioctl_get_eer); - - /* we don't need our own character device, - * so we just register as misc device */ - rc = misc_register(&dasd_eer_dev); - if (rc) { - MESSAGE(KERN_ERR, "%s", "dasd_eer_init could not " - "register misc device"); - goto unregister; - } - - return 0; - -unregister: - dasd_unregister_eer_notifier(&dasd_eer_nb); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, - dasd_ioctl_set_eer); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, - dasd_ioctl_get_eer); -queue: - destroy_workqueue(dasd_eer_workqueue); -out: - return rc; - -} -module_init(dasd_eer_init); - -static void -__exit dasd_eer_exit(void) -{ - dasd_unregister_eer_notifier(&dasd_eer_nb); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERSET, - dasd_ioctl_set_eer); - dasd_ioctl_no_unregister(THIS_MODULE, BIODASDEERGET, - dasd_ioctl_get_eer); - destroy_workqueue(dasd_eer_workqueue); - - WARN_ON(misc_deregister(&dasd_eer_dev) != 0); -} -module_exit(dasd_eer_exit); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index d1b08fa13fd2..0592354cc604 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -275,34 +275,6 @@ struct dasd_discipline { extern struct dasd_discipline *dasd_diag_discipline_pointer; - -/* - * Notification numbers for extended error reporting notifications: - * The DASD_EER_DISABLE notification is sent before a dasd_device (and it's - * eer pointer) is freed. The error reporting module needs to do all necessary - * cleanup steps. - * The DASD_EER_TRIGGER notification sends the actual error reports (triggers). - */ -#define DASD_EER_DISABLE 0 -#define DASD_EER_TRIGGER 1 - -/* Trigger IDs for extended error reporting DASD_EER_TRIGGER notification */ -#define DASD_EER_FATALERROR 1 -#define DASD_EER_NOPATH 2 -#define DASD_EER_STATECHANGE 3 -#define DASD_EER_PPRCSUSPEND 4 - -/* - * The dasd_eer_trigger structure contains all data that we need to send - * along with an DASD_EER_TRIGGER notification. - */ -struct dasd_eer_trigger { - unsigned int id; - struct dasd_device *device; - struct dasd_ccw_req *cqr; -}; - - struct dasd_device { /* Block device stuff. */ struct gendisk *gdp; @@ -316,11 +288,9 @@ struct dasd_device { unsigned long flags; /* per device flags */ unsigned short features; /* copy of devmap-features (read-only!) */ - /* extended error reporting stuff (eer) */ - void *eer; - /* Device discipline stuff. */ struct dasd_discipline *discipline; + struct dasd_discipline *base_discipline; char *private; /* Device state and target state. */ @@ -519,12 +489,6 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); int dasd_generic_set_offline (struct ccw_device *cdev); int dasd_generic_notify(struct ccw_device *, int); void dasd_generic_auto_online (struct ccw_driver *); -int dasd_register_eer_notifier(struct notifier_block *); -int dasd_unregister_eer_notifier(struct notifier_block *); -void dasd_write_eer_trigger(unsigned int , struct dasd_device *, - struct dasd_ccw_req *); - - /* externals in dasd_devmap.c */ extern int dasd_max_devindex; diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 45ce032772f4..9ed37dc9a1b0 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c @@ -165,8 +165,13 @@ qdio_do_eqbs(struct qdio_q *q, unsigned char *state, q_no = q->q_no; if(!q->is_input_q) q_no += irq->no_input_qs; +again: ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt); rc = qdio_check_ccq(q, ccq); + if (rc == 1) { + QDIO_DBF_TEXT5(1,trace,"eqAGAIN"); + goto again; + } if (rc < 0) { QDIO_DBF_TEXT2(1,trace,"eqberr"); sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no); @@ -195,8 +200,13 @@ qdio_do_sqbs(struct qdio_q *q, unsigned char state, q_no = q->q_no; if(!q->is_input_q) q_no += irq->no_input_qs; +again: ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt); rc = qdio_check_ccq(q, ccq); + if (rc == 1) { + QDIO_DBF_TEXT5(1,trace,"sqAGAIN"); + goto again; + } if (rc < 0) { QDIO_DBF_TEXT3(1,trace,"sqberr"); sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no); @@ -1187,8 +1197,7 @@ tiqdio_is_inbound_q_done(struct qdio_q *q) if (!no_used) return 1; - - if (!q->siga_sync) + if (!q->siga_sync && !irq->is_qebsm) /* we'll check for more primed buffers in qeth_stop_polling */ return 0; if (irq->is_qebsm) { diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c index f6900538be90..87a8c3d2072c 100644 --- a/drivers/scsi/esp.c +++ b/drivers/scsi/esp.c @@ -2068,14 +2068,12 @@ static int esp_reset(struct scsi_cmnd *SCptr) { struct esp *esp = (struct esp *) SCptr->device->host->hostdata; + spin_lock_irq(esp->ehost->host_lock); (void) esp_do_resetbus(esp); - spin_unlock_irq(esp->ehost->host_lock); wait_event(esp->reset_queue, (esp->resetting_bus == 0)); - spin_lock_irq(esp->ehost->host_lock); - return SUCCESS; } diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 7ddd5a69352a..5f1d7580218d 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c @@ -2514,7 +2514,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) assert(sg != NULL); if (qc->flags & ATA_QCFLAG_SINGLE) - assert(qc->n_elem == 1); + assert(qc->n_elem <= 1); VPRINTK("unmapping %u sg elements\n", qc->n_elem); @@ -2537,7 +2537,7 @@ static void ata_sg_clean(struct ata_queued_cmd *qc) kunmap_atomic(addr, KM_IRQ0); } } else { - if (sg_dma_len(&sg[0]) > 0) + if (qc->n_elem) dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), dir); @@ -2570,7 +2570,7 @@ static void ata_fill_sg(struct ata_queued_cmd *qc) unsigned int idx; assert(qc->__sg != NULL); - assert(qc->n_elem > 0); + assert(qc->n_elem > 0 || qc->pad_len > 0); idx = 0; ata_for_each_sg(sg, qc) { @@ -2715,6 +2715,7 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) int dir = qc->dma_dir; struct scatterlist *sg = qc->__sg; dma_addr_t dma_address; + int trim_sg = 0; /* we must lengthen transfers to end on a 32-bit boundary */ qc->pad_len = sg->length & 3; @@ -2734,13 +2735,15 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) sg_dma_len(psg) = ATA_DMA_PAD_SZ; /* trim sg */ sg->length -= qc->pad_len; + if (sg->length == 0) + trim_sg = 1; DPRINTK("padding done, sg->length=%u pad_len=%u\n", sg->length, qc->pad_len); } - if (!sg->length) { - sg_dma_address(sg) = 0; + if (trim_sg) { + qc->n_elem--; goto skip_map; } @@ -2753,9 +2756,9 @@ static int ata_sg_setup_one(struct ata_queued_cmd *qc) } sg_dma_address(sg) = dma_address; -skip_map: sg_dma_len(sg) = sg->length; +skip_map: DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c index de05e2883f9c..80480f0fb2b8 100644 --- a/drivers/scsi/sata_qstor.c +++ b/drivers/scsi/sata_qstor.c @@ -277,7 +277,7 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc) u8 *prd = pp->pkt + QS_CPB_BYTES; assert(qc->__sg != NULL); - assert(qc->n_elem > 0); + assert(qc->n_elem > 0 || qc->pad_len > 0); nelem = 0; ata_for_each_sg(sg, qc) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 791c4dc550ae..94f5e8ed83a7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -90,7 +90,7 @@ static int spi_suspend(struct device *dev, pm_message_t message) int value; struct spi_driver *drv = to_spi_driver(dev->driver); - if (!drv->suspend) + if (!drv || !drv->suspend) return 0; /* suspend will stop irqs and dma; no more i/o */ @@ -105,7 +105,7 @@ static int spi_resume(struct device *dev) int value; struct spi_driver *drv = to_spi_driver(dev->driver); - if (!drv->resume) + if (!drv || !drv->resume) return 0; /* resume may restart the i/o queue */ @@ -449,7 +449,6 @@ void spi_unregister_master(struct spi_master *master) { (void) device_for_each_child(master->cdev.dev, NULL, __unregister); class_device_unregister(&master->cdev); - master->cdev.dev = NULL; } EXPORT_SYMBOL_GPL(spi_unregister_master); diff --git a/drivers/video/aty/radeon_pm.c b/drivers/video/aty/radeon_pm.c index 556895e99645..1f8d805c61e5 100644 --- a/drivers/video/aty/radeon_pm.c +++ b/drivers/video/aty/radeon_pm.c @@ -1321,8 +1321,6 @@ static void radeon_pm_full_reset_sdram(struct radeonfb_info *rinfo) mdelay( 15); } -#ifdef CONFIG_PPC_OF - static void radeon_pm_reset_pad_ctlr_strength(struct radeonfb_info *rinfo) { u32 tmp, tmp2; @@ -1836,6 +1834,8 @@ static void radeon_reinitialize_M10(struct radeonfb_info *rinfo) radeon_pm_m10_enable_lvds_spread_spectrum(rinfo); } +#ifdef CONFIG_PPC_OF + static void radeon_pm_m9p_reconfigure_mc(struct radeonfb_info *rinfo) { OUTREG(MC_CNTL, rinfo->save_regs[46]); @@ -2728,13 +2728,23 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) printk("radeonfb: Dynamic Clock Power Management disabled\n"); } +#if defined(CONFIG_PM) /* Check if we can power manage on suspend/resume. We can do * D2 on M6, M7 and M9, and we can resume from D3 cold a few other * "Mac" cards, but that's all. We need more infos about what the * BIOS does tho. Right now, all this PM stuff is pmac-only for that * reason. --BenH */ -#if defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) + /* Special case for Samsung P35 laptops + */ + if ((rinfo->pdev->vendor == PCI_VENDOR_ID_ATI) && + (rinfo->pdev->device == PCI_CHIP_RV350_NP) && + (rinfo->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG) && + (rinfo->pdev->subsystem_device == 0xc00c)) { + rinfo->reinit_func = radeon_reinitialize_M10; + rinfo->pm_mode |= radeon_pm_off; + } +#if defined(CONFIG_PPC_PMAC) if (_machine == _MACH_Pmac && rinfo->of_node) { if (rinfo->is_mobility && rinfo->pm_reg && rinfo->family <= CHIP_FAMILY_RV250) @@ -2778,7 +2788,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk) OUTREG(TV_DAC_CNTL, INREG(TV_DAC_CNTL) | 0x07000000); #endif } -#endif /* defined(CONFIG_PM) && defined(CONFIG_PPC_PMAC) */ +#endif /* defined(CONFIG_PPC_PMAC) */ +#endif /* defined(CONFIG_PM) */ } void radeonfb_pm_exit(struct radeonfb_info *rinfo) diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 5250c428fc1f..ef3386549140 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -66,7 +66,7 @@ static match_table_t tokens = { {Opt_afid, "afid=%u"}, {Opt_rfdno, "rfdno=%u"}, {Opt_wfdno, "wfdno=%u"}, - {Opt_debug, "debug=%u"}, + {Opt_debug, "debug=%x"}, {Opt_name, "name=%s"}, {Opt_remotename, "aname=%s"}, {Opt_unix, "proto=unix"}, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 217323b0c896..b41e8b379652 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1048,13 +1048,14 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, cifs_small_buf_release(iov[0].iov_base); else if(resp_buf_type == CIFS_LARGE_BUFFER) cifs_buf_release(iov[0].iov_base); - } else /* return buffer to caller to free */ /* BB FIXME how do we tell caller if it is not a large buffer */ { - *buf = iov[0].iov_base; + } else if(resp_buf_type != CIFS_NO_BUFFER) { + /* return buffer to caller to free */ + *buf = iov[0].iov_base; if(resp_buf_type == CIFS_SMALL_BUFFER) *pbuf_type = CIFS_SMALL_BUFFER; else if(resp_buf_type == CIFS_LARGE_BUFFER) *pbuf_type = CIFS_LARGE_BUFFER; - } + } /* else no valid buffer on return - leave as null */ /* Note: On -EAGAIN error only caller can retry on handle based calls since file handle passed in no longer valid */ diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index e488603fb1e7..ef5ae6f93c75 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1795,10 +1795,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, conjunction with 52K kvec constraint on arch with 4K page size */ - if(cifs_sb->rsize < PAGE_CACHE_SIZE) { - cifs_sb->rsize = PAGE_CACHE_SIZE; - /* Windows ME does this */ - cFYI(1,("Attempt to set readsize for mount to less than one page (4096)")); + if(cifs_sb->rsize < 2048) { + cifs_sb->rsize = 2048; + /* Windows ME may prefer this */ + cFYI(1,("readsize set to minimum 2048")); } cifs_sb->mnt_uid = volume_info.linux_uid; cifs_sb->mnt_gid = volume_info.linux_gid; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 6573f31f1fd9..075d3e945602 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -204,10 +204,6 @@ int proc_fill_super(struct super_block *s, void *data, int silent) root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); if (!root_inode) goto out_no_root; - /* - * Fixup the root inode's nlink value - */ - root_inode->i_nlink += nr_processes(); root_inode->i_uid = 0; root_inode->i_gid = 0; s->s_root = d_alloc_root(root_inode); diff --git a/fs/proc/root.c b/fs/proc/root.c index 68896283c8ae..c3fd3611112f 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -80,16 +80,16 @@ void __init proc_root_init(void) proc_bus = proc_mkdir("bus", NULL); } -static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) +static int proc_root_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat +) { - /* - * nr_threads is actually protected by the tasklist_lock; - * however, it's conventional to do reads, especially for - * reporting, without any locking whatsoever. - */ - if (dir->i_ino == PROC_ROOT_INO) /* check for safety... */ - dir->i_nlink = proc_root.nlink + nr_threads; + generic_fillattr(dentry->d_inode, stat); + stat->nlink = proc_root.nlink + nr_processes(); + return 0; +} +static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd) +{ if (!proc_lookup(dir, dentry, nd)) { return NULL; } @@ -134,6 +134,7 @@ static struct file_operations proc_root_operations = { */ static struct inode_operations proc_root_inode_operations = { .lookup = proc_root_lookup, + .getattr = proc_root_getattr, }; /* diff --git a/fs/super.c b/fs/super.c index 30294218fa63..e20b5580afd5 100644 --- a/fs/super.c +++ b/fs/super.c @@ -666,6 +666,16 @@ static int test_bdev_super(struct super_block *s, void *data) return (void *)s->s_bdev == data; } +static void bdev_uevent(struct block_device *bdev, enum kobject_action action) +{ + if (bdev->bd_disk) { + if (bdev->bd_part) + kobject_uevent(&bdev->bd_part->kobj, action); + else + kobject_uevent(&bdev->bd_disk->kobj, action); + } +} + struct super_block *get_sb_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)) @@ -707,8 +717,10 @@ struct super_block *get_sb_bdev(struct file_system_type *fs_type, up_write(&s->s_umount); deactivate_super(s); s = ERR_PTR(error); - } else + } else { s->s_flags |= MS_ACTIVE; + bdev_uevent(bdev, KOBJ_MOUNT); + } } return s; @@ -724,6 +736,7 @@ void kill_block_super(struct super_block *sb) { struct block_device *bdev = sb->s_bdev; + bdev_uevent(bdev, KOBJ_UMOUNT); generic_shutdown_super(sb); sync_blockdev(bdev); close_bdev_excl(bdev); diff --git a/include/asm-arm/arch-at91rm9200/gpio.h b/include/asm-arm/arch-at91rm9200/gpio.h index 0f0a61e2f129..6176ab2dc417 100644 --- a/include/asm-arm/arch-at91rm9200/gpio.h +++ b/include/asm-arm/arch-at91rm9200/gpio.h @@ -183,6 +183,7 @@ extern int at91_set_B_periph(unsigned pin, int use_pullup); extern int at91_set_gpio_input(unsigned pin, int use_pullup); extern int at91_set_gpio_output(unsigned pin, int value); extern int at91_set_deglitch(unsigned pin, int is_on); +extern int at91_set_multi_drive(unsigned pin, int is_on); /* callable at any time */ extern int at91_set_gpio_value(unsigned pin, int value); diff --git a/include/asm-arm/arch-ixp4xx/nas100d.h b/include/asm-arm/arch-ixp4xx/nas100d.h index 51ac0180427c..84467a5190d0 100644 --- a/include/asm-arm/arch-ixp4xx/nas100d.h +++ b/include/asm-arm/arch-ixp4xx/nas100d.h @@ -19,8 +19,8 @@ #error "Do not include this directly, instead #include <asm/hardware.h>" #endif -#define NAS100D_SDA_PIN 6 -#define NAS100D_SCL_PIN 5 +#define NAS100D_SDA_PIN 5 +#define NAS100D_SCL_PIN 6 /* * NAS100D PCI IRQs diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 06c12a037cba..d6a2c613be68 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h @@ -239,7 +239,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) " bra 2f; \n" " .fillinsn \n" "1:" - M32R_UNLOCK" %2, @%1; \n" + M32R_UNLOCK" %0, @%1; \n" " .fillinsn \n" "2:" : "=&r" (retval) diff --git a/include/asm-m68k/irq.h b/include/asm-m68k/irq.h index 325c86f8512d..9ac047c400c4 100644 --- a/include/asm-m68k/irq.h +++ b/include/asm-m68k/irq.h @@ -79,7 +79,7 @@ static __inline__ int irq_canonicalize(int irq) extern void (*enable_irq)(unsigned int); extern void (*disable_irq)(unsigned int); -#define enable_irq_nosync enable_irq +#define disable_irq_nosync disable_irq struct pt_regs; diff --git a/include/asm-m68k/raw_io.h b/include/asm-m68k/raw_io.h index 5439bcaa57c6..811ccd25d4a6 100644 --- a/include/asm-m68k/raw_io.h +++ b/include/asm-m68k/raw_io.h @@ -336,6 +336,7 @@ static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf, : "d0", "a0", "a1", "d6"); } +#define __raw_writel raw_outl #endif /* __KERNEL__ */ diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index 7a553e9d44d3..b96f3e0f3933 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h @@ -233,7 +233,7 @@ do { \ #define __get_user_check(x,ptr,size) \ ({ \ long __gu_err = -EFAULT; \ - const void __user * __gu_ptr = (ptr); \ + const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \ \ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \ __get_user_common((x), size, __gu_ptr); \ @@ -258,7 +258,7 @@ do { \ : "=r" (__gu_err), "=r" (__gu_tmp) \ : "0" (0), "o" (__m(addr)), "i" (-EFAULT)); \ \ - (val) = (__typeof__(val)) __gu_tmp; \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* @@ -284,7 +284,7 @@ do { \ " .previous \n" \ : "=r" (__gu_err), "=&r" (__gu_tmp) \ : "0" (0), "r" (addr), "i" (-EFAULT)); \ - (val) = __gu_tmp; \ + (val) = (__typeof__(*(addr))) __gu_tmp; \ } /* diff --git a/include/asm-mips/unistd.h b/include/asm-mips/unistd.h index 769305d20108..b5c78a4a0192 100644 --- a/include/asm-mips/unistd.h +++ b/include/asm-mips/unistd.h @@ -313,7 +313,7 @@ #define __NR_mknodat (__NR_Linux + 290) #define __NR_fchownat (__NR_Linux + 291) #define __NR_futimesat (__NR_Linux + 292) -#define __NR_newfstatat (__NR_Linux + 293) +#define __NR_fstatat (__NR_Linux + 293) #define __NR_unlinkat (__NR_Linux + 294) #define __NR_renameat (__NR_Linux + 295) #define __NR_linkat (__NR_Linux + 296) @@ -593,7 +593,7 @@ #define __NR_mknodat (__NR_Linux + 249) #define __NR_fchownat (__NR_Linux + 250) #define __NR_futimesat (__NR_Linux + 251) -#define __NR_newfstatat (__NR_Linux + 252) +#define __NR_fstatat (__NR_Linux + 252) #define __NR_unlinkat (__NR_Linux + 253) #define __NR_renameat (__NR_Linux + 254) #define __NR_linkat (__NR_Linux + 255) @@ -877,7 +877,7 @@ #define __NR_mknodat (__NR_Linux + 253) #define __NR_fchownat (__NR_Linux + 254) #define __NR_futimesat (__NR_Linux + 255) -#define __NR_newfstatat (__NR_Linux + 256) +#define __NR_fstatat (__NR_Linux + 256) #define __NR_unlinkat (__NR_Linux + 257) #define __NR_renameat (__NR_Linux + 258) #define __NR_linkat (__NR_Linux + 259) diff --git a/include/asm-ppc/machdep.h b/include/asm-ppc/machdep.h index 39200def8d11..a3e8a45e45a9 100644 --- a/include/asm-ppc/machdep.h +++ b/include/asm-ppc/machdep.h @@ -154,19 +154,6 @@ extern char cmd_line[COMMAND_LINE_SIZE]; extern void setup_pci_ptrs(void); -/* - * Power macintoshes have either a CUDA or a PMU controlling - * system reset, power, NVRAM, RTC. - */ -typedef enum sys_ctrler_kind { - SYS_CTRLER_UNKNOWN = 0, - SYS_CTRLER_CUDA = 1, - SYS_CTRLER_PMU = 2, - SYS_CTRLER_SMU = 3, -} sys_ctrler_t; - -extern sys_ctrler_t sys_ctrler; - #ifdef CONFIG_SMP struct smp_ops_t { void (*message_pass)(int target, int msg); diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h index c744ff33b1df..1630c26e8f45 100644 --- a/include/asm-s390/dasd.h +++ b/include/asm-s390/dasd.h @@ -204,8 +204,7 @@ typedef struct attrib_data_t { * * Here ist how the ioctl-nr should be used: * 0 - 31 DASD driver itself - * 32 - 229 still open - * 230 - 239 DASD extended error reporting + * 32 - 239 still open * 240 - 255 reserved for EMC *******************************************************************************/ @@ -237,22 +236,12 @@ typedef struct attrib_data_t { #define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t) /* Get Attributes (cache operations) */ #define BIODASDGATTR _IOR(DASD_IOCTL_LETTER,5,attrib_data_t) -/* retrieve extended error-reporting value */ -#define BIODASDEERGET _IOR(DASD_IOCTL_LETTER,6,int) /* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */ #define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t) /* Set Attributes (cache operations) */ #define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t) -/* retrieve extended error-reporting value */ -#define BIODASDEERSET _IOW(DASD_IOCTL_LETTER,3,int) - - -/* remove all records from the eer buffer */ -#define DASD_EER_PURGE _IO(DASD_IOCTL_LETTER,230) -/* set the number of pages that are used for the internal eer buffer */ -#define DASD_EER_SETBUFSIZE _IOW(DASD_IOCTL_LETTER,230,int) #endif /* DASD_H */ diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 84d3d9f034ce..d3bc25e6d27d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -427,7 +427,8 @@ extern int acpi_mp_config; extern struct acpi_table_mcfg_config *pci_mmcfg_config; extern int pci_mmcfg_config_num; -extern int sbf_port ; +extern int sbf_port; +extern unsigned long acpi_video_flags; #else /* !CONFIG_ACPI */ diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 2a8d8da70961..c374b5fa8d3b 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -41,8 +41,10 @@ enum kobject_action { KOBJ_ADD = (__force kobject_action_t) 0x01, /* exclusive to core */ KOBJ_REMOVE = (__force kobject_action_t) 0x02, /* exclusive to core */ KOBJ_CHANGE = (__force kobject_action_t) 0x03, /* device state change */ - KOBJ_OFFLINE = (__force kobject_action_t) 0x04, /* device offline */ - KOBJ_ONLINE = (__force kobject_action_t) 0x05, /* device online */ + KOBJ_MOUNT = (__force kobject_action_t) 0x04, /* mount event for block devices (broken) */ + KOBJ_UMOUNT = (__force kobject_action_t) 0x05, /* umount event for block devices (broken) */ + KOBJ_OFFLINE = (__force kobject_action_t) 0x06, /* device offline */ + KOBJ_ONLINE = (__force kobject_action_t) 0x07, /* device online */ }; struct kobject { diff --git a/include/linux/libata.h b/include/linux/libata.h index 9e5db2949c58..c91be5e64ede 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -557,17 +557,29 @@ ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) } static inline struct scatterlist * +ata_qc_first_sg(struct ata_queued_cmd *qc) +{ + if (qc->n_elem) + return qc->__sg; + if (qc->pad_len) + return &qc->pad_sgent; + return NULL; +} + +static inline struct scatterlist * ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) { if (sg == &qc->pad_sgent) return NULL; if (++sg - qc->__sg < qc->n_elem) return sg; - return qc->pad_len ? &qc->pad_sgent : NULL; + if (qc->pad_len) + return &qc->pad_sgent; + return NULL; } #define ata_for_each_sg(sg, qc) \ - for (sg = qc->__sg; sg; sg = ata_qc_next_sg(sg, qc)) + for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) static inline unsigned int ata_tag_valid(unsigned int tag) { diff --git a/include/linux/mm.h b/include/linux/mm.h index 26e1663a5cbe..498ff8778fb6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1051,7 +1051,11 @@ int shrink_slab(unsigned long scanned, gfp_t gfp_mask, void drop_pagecache(void); void drop_slab(void); +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else extern int randomize_va_space; +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index f38872abc126..bdc556d88498 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h @@ -49,7 +49,7 @@ struct mmc_command { /* * These are the command types. */ -#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_TYPE) +#define mmc_cmd_type(cmd) ((cmd)->flags & MMC_CMD_MASK) unsigned int retries; /* max number of retries */ unsigned int error; /* command error */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 547d649b274e..b4dc6e2e10c9 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -398,7 +398,7 @@ extern struct inode_operations nfs_symlink_inode_operations; extern int nfs_register_sysctl(void); extern void nfs_unregister_sysctl(void); #else -#define nfs_register_sysctl() do { } while(0) +#define nfs_register_sysctl() 0 #define nfs_unregister_sysctl() do { } while(0) #endif diff --git a/include/linux/swap.h b/include/linux/swap.h index f3e17d5963c3..d572b19afb7d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -147,7 +147,7 @@ struct swap_list_t { #define vm_swap_full() (nr_swap_pages*2 < total_swap_pages) /* linux/mm/oom_kill.c */ -extern void out_of_memory(gfp_t gfp_mask, int order); +extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order); /* linux/mm/memory.c */ extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 32a4139c4ad8..0e92bf7ec28e 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -146,6 +146,7 @@ enum KERN_RANDOMIZE=68, /* int: randomize virtual address space */ KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ KERN_SPIN_RETRY=70, /* int: number of spinlock retries */ + KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */ }; diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 685c25175d96..d7e7e637b92a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -841,7 +841,7 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask) for (aux = context->aux; aux; aux = aux->next) { - ab = audit_log_start(context, GFP_KERNEL, aux->type); + ab = audit_log_start(context, gfp_mask, aux->type); if (!ab) continue; /* audit_panic has been called */ @@ -878,14 +878,14 @@ static void audit_log_exit(struct audit_context *context, gfp_t gfp_mask) } if (context->pwd && context->pwdmnt) { - ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD); + ab = audit_log_start(context, gfp_mask, AUDIT_CWD); if (ab) { audit_log_d_path(ab, "cwd=", context->pwd, context->pwdmnt); audit_log_end(ab); } } for (i = 0; i < context->name_count; i++) { - ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH); + ab = audit_log_start(context, gfp_mask, AUDIT_PATH); if (!ab) continue; /* audit_panic has been called */ diff --git a/kernel/exit.c b/kernel/exit.c index 93cee3671332..531aadca5530 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -360,6 +360,9 @@ void daemonize(const char *name, ...) fs = init_task.fs; current->fs = fs; atomic_inc(&fs->count); + exit_namespace(current); + current->namespace = init_task.namespace; + get_namespace(current->namespace); exit_files(current); current->files = init_task.files; atomic_inc(¤t->files->count); diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 17313b99e53d..1067090db6b1 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -104,6 +104,8 @@ cond_syscall(sys_setreuid16); cond_syscall(sys_setuid16); cond_syscall(sys_vm86old); cond_syscall(sys_vm86); +cond_syscall(compat_sys_ipc); +cond_syscall(compat_sys_sysctl); /* arch-specific weak syscall entries */ cond_syscall(sys_pciconfig_read); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 7654d55c47f5..c05a2b7125e1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -44,14 +44,12 @@ #include <linux/limits.h> #include <linux/dcache.h> #include <linux/syscalls.h> +#include <linux/nfs_fs.h> +#include <linux/acpi.h> #include <asm/uaccess.h> #include <asm/processor.h> -#ifdef CONFIG_ROOT_NFS -#include <linux/nfs_fs.h> -#endif - #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ @@ -638,6 +636,7 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif +#if defined(CONFIG_MMU) { .ctl_name = KERN_RANDOMIZE, .procname = "randomize_va_space", @@ -646,6 +645,7 @@ static ctl_table kern_table[] = { .mode = 0644, .proc_handler = &proc_dointvec, }, +#endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .ctl_name = KERN_SPIN_RETRY, @@ -656,6 +656,16 @@ static ctl_table kern_table[] = { .proc_handler = &proc_dointvec, }, #endif +#ifdef CONFIG_ACPI_SLEEP + { + .ctl_name = KERN_ACPI_VIDEO_FLAGS, + .procname = "acpi_video_flags", + .data = &acpi_video_flags, + .maxlen = sizeof (unsigned long), + .mode = 0644, + .proc_handler = &proc_dointvec, + }, +#endif { .ctl_name = 0 } }; diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c index a6b1e271d53c..351045f4f63c 100644 --- a/lib/iomap_copy.c +++ b/lib/iomap_copy.c @@ -15,8 +15,8 @@ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. */ -#include <linux/io.h> #include <linux/module.h> +#include <linux/io.h> /** * __iowrite32_copy - copy data to MMIO space, in 32-bit units diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 1b1985c136ec..086a0c6e888e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -38,6 +38,10 @@ static char *action_to_string(enum kobject_action action) return "remove"; case KOBJ_CHANGE: return "change"; + case KOBJ_MOUNT: + return "mount"; + case KOBJ_UMOUNT: + return "umount"; case KOBJ_OFFLINE: return "offline"; case KOBJ_ONLINE: diff --git a/mm/mempolicy.c b/mm/mempolicy.c index bedfa4f09c80..880831bd3003 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -587,7 +587,7 @@ redo: } list_add(&page->lru, &newlist); nr_pages++; - if (nr_pages > MIGRATE_CHUNK_SIZE); + if (nr_pages > MIGRATE_CHUNK_SIZE) break; } err = migrate_pages(pagelist, &newlist, &moved, &failed); @@ -808,7 +808,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask, nodes_clear(*nodes); if (maxnode == 0 || !nmask) return 0; - if (maxnode > PAGE_SIZE) + if (maxnode > PAGE_SIZE*BITS_PER_BYTE) return -EINVAL; nlongs = BITS_TO_LONGS(maxnode); diff --git a/mm/nommu.c b/mm/nommu.c index c10262d68232..99d21020ec9d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -57,6 +57,8 @@ EXPORT_SYMBOL(vmalloc); EXPORT_SYMBOL(vfree); EXPORT_SYMBOL(vmalloc_to_page); EXPORT_SYMBOL(vmalloc_32); +EXPORT_SYMBOL(vmap); +EXPORT_SYMBOL(vunmap); /* * Handle all mappings that got truncated by a "truncate()" diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b05ab8f2a562..8123fad5a485 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -58,15 +58,17 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) /* * Processes which fork a lot of child processes are likely - * a good choice. We add the vmsize of the children if they + * a good choice. We add half the vmsize of the children if they * have an own mm. This prevents forking servers to flood the - * machine with an endless amount of children + * machine with an endless amount of children. In case a single + * child is eating the vast majority of memory, adding only half + * to the parents will make the child our kill candidate of choice. */ list_for_each(tsk, &p->children) { struct task_struct *chld; chld = list_entry(tsk, struct task_struct, sibling); if (chld->mm != p->mm && chld->mm) - points += chld->mm->total_vm; + points += chld->mm->total_vm/2 + 1; } /* @@ -131,17 +133,47 @@ unsigned long badness(struct task_struct *p, unsigned long uptime) } /* + * Types of limitations to the nodes from which allocations may occur + */ +#define CONSTRAINT_NONE 1 +#define CONSTRAINT_MEMORY_POLICY 2 +#define CONSTRAINT_CPUSET 3 + +/* + * Determine the type of allocation constraint. + */ +static inline int constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask) +{ +#ifdef CONFIG_NUMA + struct zone **z; + nodemask_t nodes = node_online_map; + + for (z = zonelist->zones; *z; z++) + if (cpuset_zone_allowed(*z, gfp_mask)) + node_clear((*z)->zone_pgdat->node_id, + nodes); + else + return CONSTRAINT_CPUSET; + + if (!nodes_empty(nodes)) + return CONSTRAINT_MEMORY_POLICY; +#endif + + return CONSTRAINT_NONE; +} + +/* * Simple selection loop. We chose the process with the highest * number of 'points'. We expect the caller will lock the tasklist. * * (not docbooked, we don't want this one cluttering up the manual) */ -static struct task_struct * select_bad_process(void) +static struct task_struct *select_bad_process(unsigned long *ppoints) { - unsigned long maxpoints = 0; struct task_struct *g, *p; struct task_struct *chosen = NULL; struct timespec uptime; + *ppoints = 0; do_posix_clock_monotonic_gettime(&uptime); do_each_thread(g, p) { @@ -169,9 +201,9 @@ static struct task_struct * select_bad_process(void) return p; points = badness(p, uptime.tv_sec); - if (points > maxpoints || !chosen) { + if (points > *ppoints || !chosen) { chosen = p; - maxpoints = points; + *ppoints = points; } } while_each_thread(g, p); return chosen; @@ -182,7 +214,7 @@ static struct task_struct * select_bad_process(void) * CAP_SYS_RAW_IO set, send SIGTERM instead (but it's unlikely that * we select a process with CAP_SYS_RAW_IO set). */ -static void __oom_kill_task(task_t *p) +static void __oom_kill_task(task_t *p, const char *message) { if (p->pid == 1) { WARN_ON(1); @@ -198,8 +230,8 @@ static void __oom_kill_task(task_t *p) return; } task_unlock(p); - printk(KERN_ERR "Out of Memory: Killed process %d (%s).\n", - p->pid, p->comm); + printk(KERN_ERR "%s: Killed process %d (%s).\n", + message, p->pid, p->comm); /* * We give our sacrificial lamb high priority and access to @@ -212,7 +244,7 @@ static void __oom_kill_task(task_t *p) force_sig(SIGKILL, p); } -static struct mm_struct *oom_kill_task(task_t *p) +static struct mm_struct *oom_kill_task(task_t *p, const char *message) { struct mm_struct *mm = get_task_mm(p); task_t * g, * q; @@ -224,35 +256,38 @@ static struct mm_struct *oom_kill_task(task_t *p) return NULL; } - __oom_kill_task(p); + __oom_kill_task(p, message); /* * kill all processes that share the ->mm (i.e. all threads), * but are in a different thread group */ do_each_thread(g, q) if (q->mm == mm && q->tgid != p->tgid) - __oom_kill_task(q); + __oom_kill_task(q, message); while_each_thread(g, q); return mm; } -static struct mm_struct *oom_kill_process(struct task_struct *p) +static struct mm_struct *oom_kill_process(struct task_struct *p, + unsigned long points, const char *message) { struct mm_struct *mm; struct task_struct *c; struct list_head *tsk; + printk(KERN_ERR "Out of Memory: Kill process %d (%s) score %li and " + "children.\n", p->pid, p->comm, points); /* Try to kill a child first */ list_for_each(tsk, &p->children) { c = list_entry(tsk, struct task_struct, sibling); if (c->mm == p->mm) continue; - mm = oom_kill_task(c); + mm = oom_kill_task(c, message); if (mm) return mm; } - return oom_kill_task(p); + return oom_kill_task(p, message); } /** @@ -263,10 +298,11 @@ static struct mm_struct *oom_kill_process(struct task_struct *p) * OR try to be smart about which process to kill. Note that we * don't have to be perfect here, we just have to be good. */ -void out_of_memory(gfp_t gfp_mask, int order) +void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order) { struct mm_struct *mm = NULL; - task_t * p; + task_t *p; + unsigned long points; if (printk_ratelimit()) { printk("oom-killer: gfp_mask=0x%x, order=%d\n", @@ -277,25 +313,48 @@ void out_of_memory(gfp_t gfp_mask, int order) cpuset_lock(); read_lock(&tasklist_lock); + + /* + * Check if there were limitations on the allocation (only relevant for + * NUMA) that may require different handling. + */ + switch (constrained_alloc(zonelist, gfp_mask)) { + case CONSTRAINT_MEMORY_POLICY: + mm = oom_kill_process(current, points, + "No available memory (MPOL_BIND)"); + break; + + case CONSTRAINT_CPUSET: + mm = oom_kill_process(current, points, + "No available memory in cpuset"); + break; + + case CONSTRAINT_NONE: retry: - p = select_bad_process(); + /* + * Rambo mode: Shoot down a process and hope it solves whatever + * issues we may have. + */ + p = select_bad_process(&points); - if (PTR_ERR(p) == -1UL) - goto out; + if (PTR_ERR(p) == -1UL) + goto out; - /* Found nothing?!?! Either we hang forever, or we panic. */ - if (!p) { - read_unlock(&tasklist_lock); - cpuset_unlock(); - panic("Out of memory and no killable processes...\n"); - } + /* Found nothing?!?! Either we hang forever, or we panic. */ + if (!p) { + read_unlock(&tasklist_lock); + cpuset_unlock(); + panic("Out of memory and no killable processes...\n"); + } - mm = oom_kill_process(p); - if (!mm) - goto retry; + mm = oom_kill_process(p, points, "Out of memory"); + if (!mm) + goto retry; + + break; + } - out: - read_unlock(&tasklist_lock); +out: cpuset_unlock(); if (mm) mmput(mm); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 208812b25597..791690d7d3fa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1015,7 +1015,7 @@ rebalance: if (page) goto got_pg; - out_of_memory(gfp_mask, order); + out_of_memory(zonelist, gfp_mask, order); goto restart; } diff --git a/mm/shmem.c b/mm/shmem.c index f7ac7b812f92..7c455fbaff7b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -45,6 +45,7 @@ #include <linux/swapops.h> #include <linux/mempolicy.h> #include <linux/namei.h> +#include <linux/ctype.h> #include <asm/uaccess.h> #include <asm/div64.h> #include <asm/pgtable.h> @@ -874,6 +875,51 @@ redirty: } #ifdef CONFIG_NUMA +static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +{ + char *nodelist = strchr(value, ':'); + int err = 1; + + if (nodelist) { + /* NUL-terminate policy string */ + *nodelist++ = '\0'; + if (nodelist_parse(nodelist, *policy_nodes)) + goto out; + } + if (!strcmp(value, "default")) { + *policy = MPOL_DEFAULT; + /* Don't allow a nodelist */ + if (!nodelist) + err = 0; + } else if (!strcmp(value, "prefer")) { + *policy = MPOL_PREFERRED; + /* Insist on a nodelist of one node only */ + if (nodelist) { + char *rest = nodelist; + while (isdigit(*rest)) + rest++; + if (!*rest) + err = 0; + } + } else if (!strcmp(value, "bind")) { + *policy = MPOL_BIND; + /* Insist on a nodelist */ + if (nodelist) + err = 0; + } else if (!strcmp(value, "interleave")) { + *policy = MPOL_INTERLEAVE; + /* Default to nodes online if no nodelist */ + if (!nodelist) + *policy_nodes = node_online_map; + err = 0; + } +out: + /* Restore string for error message */ + if (nodelist) + *--nodelist = ':'; + return err; +} + static struct page *shmem_swapin_async(struct shared_policy *p, swp_entry_t entry, unsigned long idx) { @@ -926,6 +972,11 @@ shmem_alloc_page(gfp_t gfp, struct shmem_inode_info *info, return page; } #else +static inline int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes) +{ + return 1; +} + static inline struct page * shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx) { @@ -1859,7 +1910,23 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, { char *this_char, *value, *rest; - while ((this_char = strsep(&options, ",")) != NULL) { + while (options != NULL) { + this_char = options; + for (;;) { + /* + * NUL-terminate this option: unfortunately, + * mount options form a comma-separated list, + * but mpol's nodelist may also contain commas. + */ + options = strchr(options, ','); + if (options == NULL) + break; + options++; + if (!isdigit(*options)) { + options[-1] = '\0'; + break; + } + } if (!*this_char) continue; if ((value = strchr(this_char,'=')) != NULL) { @@ -1910,18 +1977,8 @@ static int shmem_parse_options(char *options, int *mode, uid_t *uid, if (*rest) goto bad_val; } else if (!strcmp(this_char,"mpol")) { - if (!strcmp(value,"default")) - *policy = MPOL_DEFAULT; - else if (!strcmp(value,"preferred")) - *policy = MPOL_PREFERRED; - else if (!strcmp(value,"bind")) - *policy = MPOL_BIND; - else if (!strcmp(value,"interleave")) - *policy = MPOL_INTERLEAVE; - else + if (shmem_parse_mpol(value,policy,policy_nodes)) goto bad_val; - } else if (!strcmp(this_char,"mpol_nodelist")) { - nodelist_parse(value, *policy_nodes); } else { printk(KERN_ERR "tmpfs: Bad mount option %s\n", this_char); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6766f118f070..2144952d1c6c 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -411,6 +411,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) C(pkt_type); C(ip_summed); C(priority); +#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) + C(ipvs_property); +#endif C(protocol); n->destructor = NULL; #ifdef CONFIG_NETFILTER @@ -422,13 +425,6 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) C(nfct_reasm); nf_conntrack_get_reasm(skb->nfct_reasm); #endif -#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) - C(ipvs_property); -#endif -#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) - C(nfct_reasm); - nf_conntrack_get_reasm(skb->nfct_reasm); -#endif #ifdef CONFIG_BRIDGE_NETFILTER C(nf_bridge); nf_bridge_get(skb->nf_bridge); diff --git a/net/ipv4/netfilter/ip_nat_core.c b/net/ipv4/netfilter/ip_nat_core.c index c1a61462507f..1741d555ad0d 100644 --- a/net/ipv4/netfilter/ip_nat_core.c +++ b/net/ipv4/netfilter/ip_nat_core.c @@ -434,6 +434,7 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb, } *inside; struct ip_conntrack_tuple inner, target; int hdrlen = (*pskb)->nh.iph->ihl * 4; + unsigned long statusbit; if (!skb_make_writable(pskb, hdrlen + sizeof(*inside))) return 0; @@ -495,17 +496,16 @@ int ip_nat_icmp_reply_translation(struct sk_buff **pskb, /* Change outer to look the reply to an incoming packet * (proto 0 means don't invert per-proto part). */ + if (manip == IP_NAT_MANIP_SRC) + statusbit = IPS_SRC_NAT; + else + statusbit = IPS_DST_NAT; - /* Obviously, we need to NAT destination IP, but source IP - should be NAT'ed only if it is from a NAT'd host. + /* Invert if this is reply dir. */ + if (dir == IP_CT_DIR_REPLY) + statusbit ^= IPS_NAT_MASK; - Explanation: some people use NAT for anonymizing. Also, - CERT recommends dropping all packets from private IP - addresses (although ICMP errors from internal links with - such addresses are not too uncommon, as Alan Cox points - out) */ - if (manip != IP_NAT_MANIP_SRC - || ((*pskb)->nh.iph->saddr == ct->tuplehash[dir].tuple.src.ip)) { + if (ct->status & statusbit) { invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); if (!manip_pkt(0, pskb, 0, &target, manip)) return 0; diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c index 7c3f7d380240..ab1f88fa21ec 100644 --- a/net/ipv4/netfilter/ip_nat_standalone.c +++ b/net/ipv4/netfilter/ip_nat_standalone.c @@ -200,20 +200,14 @@ ip_nat_in(unsigned int hooknum, const struct net_device *out, int (*okfn)(struct sk_buff *)) { - struct ip_conntrack *ct; - enum ip_conntrack_info ctinfo; unsigned int ret; + u_int32_t daddr = (*pskb)->nh.iph->daddr; ret = ip_nat_fn(hooknum, pskb, in, out, okfn); if (ret != NF_DROP && ret != NF_STOLEN - && (ct = ip_conntrack_get(*pskb, &ctinfo)) != NULL) { - enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo); - - if (ct->tuplehash[dir].tuple.dst.ip != - ct->tuplehash[!dir].tuple.src.ip) { - dst_release((*pskb)->dst); - (*pskb)->dst = NULL; - } + && daddr != (*pskb)->nh.iph->daddr) { + dst_release((*pskb)->dst); + (*pskb)->dst = NULL; } return ret; } @@ -276,7 +270,7 @@ ip_nat_local_fn(unsigned int hooknum, ct->tuplehash[!dir].tuple.src.ip #ifdef CONFIG_XFRM || ct->tuplehash[dir].tuple.dst.u.all != - ct->tuplehash[dir].tuple.src.u.all + ct->tuplehash[!dir].tuple.src.u.all #endif ) return ip_route_me_harder(pskb) == 0 ? ret : NF_DROP; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 92ead3cf956b..faea8a120ee2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -458,7 +458,7 @@ ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, mtu = IPV6_MIN_MTU; t->dev->mtu = mtu; - if ((len = sizeof (*ipv6h) + ipv6h->payload_len) > mtu) { + if ((len = sizeof (*ipv6h) + ntohs(ipv6h->payload_len)) > mtu) { rel_type = ICMPV6_PKT_TOOBIG; rel_code = 0; rel_info = mtu; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 98ec53bd3ac7..5e6b05ac1260 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -885,8 +885,6 @@ restart: * We can't enlist stable bundles either. */ write_unlock_bh(&policy->lock); - - xfrm_pol_put(policy); if (dst) dst_free(dst); diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c index 418c6d4e5daf..a529b62972b4 100644 --- a/sound/core/control_compat.c +++ b/sound/core/control_compat.c @@ -167,7 +167,7 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, int *countp) { struct snd_kcontrol *kctl; - struct snd_ctl_elem_info info; + struct snd_ctl_elem_info *info; int err; down_read(&card->controls_rwsem); @@ -176,13 +176,19 @@ static int get_ctl_type(struct snd_card *card, struct snd_ctl_elem_id *id, up_read(&card->controls_rwsem); return -ENXIO; } - info.id = *id; - err = kctl->info(kctl, &info); + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) { + up_read(&card->controls_rwsem); + return -ENOMEM; + } + info->id = *id; + err = kctl->info(kctl, info); up_read(&card->controls_rwsem); if (err >= 0) { - err = info.type; - *countp = info.count; + err = info->type; + *countp = info->count; } + kfree(info); return err; } |