diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 5 | ||||
-rw-r--r-- | arch/ia64/hp/sim/simscsi.c | 29 | ||||
-rw-r--r-- | arch/ia64/kernel/acpi.c | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/entry.S | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_asm.S | 96 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.c | 135 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv.h | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/mca_drv_asm.S | 48 | ||||
-rw-r--r-- | arch/ia64/kernel/perfmon.c | 5 | ||||
-rw-r--r-- | arch/ia64/lib/Makefile | 1 | ||||
-rw-r--r-- | arch/ia64/lib/dec_and_lock.c | 42 |
12 files changed, 227 insertions, 145 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ea4a889d8196..8f699a2e7981 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -302,11 +302,6 @@ config PREEMPT source "mm/Kconfig" -config HAVE_DEC_LOCK - bool - depends on (SMP || PREEMPT) - default y - config IA32_SUPPORT bool "Support for Linux/x86 binaries" help diff --git a/arch/ia64/hp/sim/simscsi.c b/arch/ia64/hp/sim/simscsi.c index 56405dbfd739..a18983a3c934 100644 --- a/arch/ia64/hp/sim/simscsi.c +++ b/arch/ia64/hp/sim/simscsi.c @@ -233,6 +233,23 @@ simscsi_readwrite10 (struct scsi_cmnd *sc, int mode) simscsi_readwrite(sc, mode, offset, ((sc->cmnd[7] << 8) | sc->cmnd[8])*512); } +static void simscsi_fillresult(struct scsi_cmnd *sc, char *buf, unsigned len) +{ + + int scatterlen = sc->use_sg; + struct scatterlist *slp; + + if (scatterlen == 0) + memcpy(sc->request_buffer, buf, len); + else for (slp = (struct scatterlist *)sc->buffer; scatterlen-- > 0 && len > 0; slp++) { + unsigned thislen = min(len, slp->length); + + memcpy(page_address(slp->page) + slp->offset, buf, thislen); + slp++; + len -= thislen; + } +} + static int simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) { @@ -240,6 +257,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) char fname[MAX_ROOT_LEN+16]; size_t disk_size; char *buf; + char localbuf[36]; #if DEBUG_SIMSCSI register long sp asm ("sp"); @@ -263,7 +281,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) /* disk doesn't exist... */ break; } - buf = sc->request_buffer; + buf = localbuf; buf[0] = 0; /* magnetic disk */ buf[1] = 0; /* not a removable medium */ buf[2] = 2; /* SCSI-2 compliant device */ @@ -273,6 +291,7 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[6] = 0; /* reserved */ buf[7] = 0; /* various flags */ memcpy(buf + 8, "HP SIMULATED DISK 0.00", 28); + simscsi_fillresult(sc, buf, 36); sc->result = GOOD; break; @@ -304,16 +323,13 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) simscsi_readwrite10(sc, SSC_WRITE); break; - case READ_CAPACITY: if (desc[target_id] < 0 || sc->request_bufflen < 8) { break; } - buf = sc->request_buffer; - + buf = localbuf; disk_size = simscsi_get_disk_size(desc[target_id]); - /* pretend to be a 1GB disk (partition table contains real stuff): */ buf[0] = (disk_size >> 24) & 0xff; buf[1] = (disk_size >> 16) & 0xff; buf[2] = (disk_size >> 8) & 0xff; @@ -323,13 +339,14 @@ simscsi_queuecommand (struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *)) buf[5] = 0; buf[6] = 2; buf[7] = 0; + simscsi_fillresult(sc, buf, 8); sc->result = GOOD; break; case MODE_SENSE: case MODE_SENSE_10: /* sd.c uses this to determine whether disk does write-caching. */ - memset(sc->request_buffer, 0, 128); + simscsi_fillresult(sc, (char *)empty_zero_page, sc->request_bufflen); sc->result = GOOD; break; diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 28a4529fdd60..7e926471e4ec 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -899,7 +899,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) if ((err = iosapic_init(phys_addr, gsi_base))) return err; -#if CONFIG_ACPI_NUMA +#ifdef CONFIG_ACPI_NUMA acpi_map_iosapic(handle, 0, NULL, NULL); #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index ba0b6a1f429f..0741b066b98f 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -491,7 +491,7 @@ GLOBAL_ENTRY(prefetch_stack) ;; lfetch.fault [r16], 128 br.ret.sptk.many rp -END(prefetch_switch_stack) +END(prefetch_stack) GLOBAL_ENTRY(execve) mov r15=__NR_execve // put syscall number in place diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6dc726ad7137..d0a5106fba24 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1016,6 +1016,11 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs) cmc_polling_enabled = 1; spin_unlock(&cmc_history_lock); + /* If we're being hit with CMC interrupts, we won't + * ever execute the schedule_work() below. Need to + * disable CMC interrupts on this processor now. + */ + ia64_mca_cmc_vector_disable(NULL); schedule_work(&cmc_disable_work); /* diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 499a065f4e60..db32fc1d3935 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -489,24 +489,27 @@ ia64_state_save: ;; st8 [temp1]=r17,16 // pal_min_state st8 [temp2]=r6,16 // prev_IA64_KR_CURRENT + mov r6=IA64_KR(CURRENT_STACK) + ;; + st8 [temp1]=r6,16 // prev_IA64_KR_CURRENT_STACK + st8 [temp2]=r0,16 // prev_task, starts off as NULL mov r6=cr.ifa ;; - st8 [temp1]=r0,16 // prev_task, starts off as NULL - st8 [temp2]=r12,16 // cr.isr + st8 [temp1]=r12,16 // cr.isr + st8 [temp2]=r6,16 // cr.ifa mov r12=cr.itir ;; - st8 [temp1]=r6,16 // cr.ifa - st8 [temp2]=r12,16 // cr.itir + st8 [temp1]=r12,16 // cr.itir + st8 [temp2]=r11,16 // cr.iipa mov r12=cr.iim ;; - st8 [temp1]=r11,16 // cr.iipa - st8 [temp2]=r12,16 // cr.iim - mov r6=cr.iha + st8 [temp1]=r12,16 // cr.iim (p1) mov r12=IA64_MCA_COLD_BOOT (p2) mov r12=IA64_INIT_WARM_BOOT + mov r6=cr.iha ;; - st8 [temp1]=r6,16 // cr.iha - st8 [temp2]=r12 // os_status, default is cold boot + st8 [temp2]=r6,16 // cr.iha + st8 [temp1]=r12 // os_status, default is cold boot mov r6=IA64_MCA_SAME_CONTEXT ;; st8 [temp1]=r6 // context, default is same context @@ -823,9 +826,12 @@ ia64_state_restore: ld8 r12=[temp1],16 // sal_ra ld8 r9=[temp2],16 // sal_gp ;; - ld8 r22=[temp1],24 // pal_min_state, virtual. skip prev_task + ld8 r22=[temp1],16 // pal_min_state, virtual ld8 r21=[temp2],16 // prev_IA64_KR_CURRENT ;; + ld8 r16=[temp1],16 // prev_IA64_KR_CURRENT_STACK + ld8 r20=[temp2],16 // prev_task + ;; ld8 temp3=[temp1],16 // cr.isr ld8 temp4=[temp2],16 // cr.ifa ;; @@ -846,6 +852,45 @@ ia64_state_restore: ld8 r8=[temp1] // os_status ld8 r10=[temp2] // context + /* Wire IA64_TR_CURRENT_STACK to the stack that we are resuming to. To + * avoid any dependencies on the algorithm in ia64_switch_to(), just + * purge any existing CURRENT_STACK mapping and insert the new one. + * + * r16 contains prev_IA64_KR_CURRENT_STACK, r21 contains + * prev_IA64_KR_CURRENT, these values may have been changed by the C + * code. Do not use r8, r9, r10, r22, they contain values ready for + * the return to SAL. + */ + + mov r15=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r15=r15,IA64_GRANULE_SHIFT + ;; + dep r15=-1,r15,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r15,r18 + ;; + srlz.d + + extr.u r19=r21,61,3 // r21 = prev_IA64_KR_CURRENT + shl r20=r16,IA64_GRANULE_SHIFT // r16 = prev_IA64_KR_CURRENT_STACK + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + cmp.ne p6,p0=RGN_KERNEL,r19 // new stack is in the kernel region? + or r21=r20,r21 // construct PA | page properties +(p6) br.spnt 1f // the dreaded cpu 0 idle task in region 5:( + ;; + mov cr.itir=r18 + mov cr.ifa=r21 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d +1: + br.sptk b0 //EndStub////////////////////////////////////////////////////////////////////// @@ -982,6 +1027,7 @@ ia64_set_kernel_registers: add temp4=temp4, temp1 // &struct ia64_sal_os_state.os_gp add r12=temp1, temp3 // kernel stack pointer on MCA/INIT stack add r13=temp1, r3 // set current to start of MCA/INIT stack + add r20=temp1, r3 // physical start of MCA/INIT stack ;; ld8 r1=[temp4] // OS GP from SAL OS state ;; @@ -991,7 +1037,35 @@ ia64_set_kernel_registers: ;; mov IA64_KR(CURRENT)=r13 - // FIXME: do I need to wire IA64_KR_CURRENT_STACK and IA64_TR_CURRENT_STACK? + /* Wire IA64_TR_CURRENT_STACK to the MCA/INIT handler stack. To avoid + * any dependencies on the algorithm in ia64_switch_to(), just purge + * any existing CURRENT_STACK mapping and insert the new one. + */ + + mov r16=IA64_KR(CURRENT_STACK) // physical granule mapped by IA64_TR_CURRENT_STACK + ;; + shl r16=r16,IA64_GRANULE_SHIFT + ;; + dep r16=-1,r16,61,3 // virtual granule + mov r18=IA64_GRANULE_SHIFT<<2 // for cr.itir.ps + ;; + ptr.d r16,r18 + ;; + srlz.d + + shr.u r16=r20,IA64_GRANULE_SHIFT // r20 = physical start of MCA/INIT stack + movl r21=PAGE_KERNEL // page properties + ;; + mov IA64_KR(CURRENT_STACK)=r16 + or r21=r20,r21 // construct PA | page properties + ;; + mov cr.itir=r18 + mov cr.ifa=r13 + mov r20=IA64_TR_CURRENT_STACK + ;; + itr.d dtr[r20]=r21 + ;; + srlz.d br.sptk b0 diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 6e683745af49..f081c60ab206 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -56,8 +56,9 @@ static struct page *page_isolate[MAX_PAGE_ISOLATE]; static int num_page_isolate = 0; typedef enum { - ISOLATE_NG = 0, - ISOLATE_OK = 1 + ISOLATE_NG, + ISOLATE_OK, + ISOLATE_NONE } isolate_status_t; /* @@ -74,7 +75,7 @@ static struct { * @paddr: poisoned memory location * * Return value: - * ISOLATE_OK / ISOLATE_NG + * one of isolate_status_t, ISOLATE_OK/NG/NONE. */ static isolate_status_t @@ -84,23 +85,26 @@ mca_page_isolate(unsigned long paddr) struct page *p; /* whether physical address is valid or not */ - if ( !ia64_phys_addr_valid(paddr) ) - return ISOLATE_NG; + if (!ia64_phys_addr_valid(paddr)) + return ISOLATE_NONE; + + if (!pfn_valid(paddr)) + return ISOLATE_NONE; /* convert physical address to physical page number */ p = pfn_to_page(paddr>>PAGE_SHIFT); /* check whether a page number have been already registered or not */ - for( i = 0; i < num_page_isolate; i++ ) - if( page_isolate[i] == p ) + for (i = 0; i < num_page_isolate; i++) + if (page_isolate[i] == p) return ISOLATE_OK; /* already listed */ /* limitation check */ - if( num_page_isolate == MAX_PAGE_ISOLATE ) + if (num_page_isolate == MAX_PAGE_ISOLATE) return ISOLATE_NG; /* kick pages having attribute 'SLAB' or 'Reserved' */ - if( PageSlab(p) || PageReserved(p) ) + if (PageSlab(p) || PageReserved(p)) return ISOLATE_NG; /* add attribute 'Reserved' and register the page */ @@ -122,10 +126,15 @@ mca_handler_bh(unsigned long paddr) current->pid, current->comm); spin_lock(&mca_bh_lock); - if (mca_page_isolate(paddr) == ISOLATE_OK) { + switch (mca_page_isolate(paddr)) { + case ISOLATE_OK: printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr); - } else { + break; + case ISOLATE_NG: printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr); + break; + default: + break; } spin_unlock(&mca_bh_lock); @@ -139,10 +148,10 @@ mca_handler_bh(unsigned long paddr) * @peidx: pointer to index of processor error section */ -static void +static void mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) { - /* + /* * calculate the start address of * "struct cpuid_info" and "sal_processor_static_info_t". */ @@ -164,7 +173,7 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) } /** - * mca_make_slidx - Make index of SAL error record + * mca_make_slidx - Make index of SAL error record * @buffer: pointer to SAL error record * @slidx: pointer to index of SAL error record * @@ -172,12 +181,12 @@ mca_make_peidx(sal_log_processor_info_t *slpi, peidx_table_t *peidx) * 1 if record has platform error / 0 if not */ #define LOG_INDEX_ADD_SECT_PTR(sect, ptr) \ - { slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ - hl->hdr = ptr; \ - list_add(&hl->list, &(sect)); \ - slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } + {slidx_list_t *hl = &slidx_pool.buffer[slidx_pool.cur_idx]; \ + hl->hdr = ptr; \ + list_add(&hl->list, &(sect)); \ + slidx_pool.cur_idx = (slidx_pool.cur_idx + 1)%slidx_pool.max_idx; } -static int +static int mca_make_slidx(void *buffer, slidx_table_t *slidx) { int platform_err = 0; @@ -214,28 +223,36 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) sp = (sal_log_section_hdr_t *)((char*)buffer + ercd_pos); if (!efi_guidcmp(sp->guid, SAL_PROC_DEV_ERR_SECT_GUID)) { LOG_INDEX_ADD_SECT_PTR(slidx->proc_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_MEM_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->mem_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SEL_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->sel_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_PCI_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_bus_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SMBIOS_DEV_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->smbios_dev_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_PCI_COMP_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->pci_comp_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_SPECIFIC_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_specific_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_HOST_CTLR_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->host_ctlr_err, sp); - } else if (!efi_guidcmp(sp->guid, SAL_PLAT_BUS_ERR_SECT_GUID)) { + } else if (!efi_guidcmp(sp->guid, + SAL_PLAT_BUS_ERR_SECT_GUID)) { platform_err = 1; LOG_INDEX_ADD_SECT_PTR(slidx->plat_bus_err, sp); } else { @@ -253,15 +270,16 @@ mca_make_slidx(void *buffer, slidx_table_t *slidx) * Return value: * 0 on Success / -ENOMEM on Failure */ -static int +static int init_record_index_pools(void) { int i; int rec_max_size; /* Maximum size of SAL error records */ int sect_min_size; /* Minimum size of SAL error sections */ /* minimum size table of each section */ - static int sal_log_sect_min_sizes[] = { - sizeof(sal_log_processor_info_t) + sizeof(sal_processor_static_info_t), + static int sal_log_sect_min_sizes[] = { + sizeof(sal_log_processor_info_t) + + sizeof(sal_processor_static_info_t), sizeof(sal_log_mem_dev_err_info_t), sizeof(sal_log_sel_dev_err_info_t), sizeof(sal_log_pci_bus_err_info_t), @@ -294,7 +312,8 @@ init_record_index_pools(void) /* - 3 - */ slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; - slidx_pool.buffer = (slidx_list_t *) kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); + slidx_pool.buffer = (slidx_list_t *) + kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL); return slidx_pool.buffer ? 0 : -ENOMEM; } @@ -308,6 +327,7 @@ init_record_index_pools(void) * is_mca_global - Check whether this MCA is global or not * @peidx: pointer of index of processor error section * @pbci: pointer to pal_bus_check_info_t + * @sos: pointer to hand off struct between SAL and OS * * Return value: * MCA_IS_LOCAL / MCA_IS_GLOBAL @@ -317,11 +337,12 @@ static mca_type_t is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); - /* + /* * PAL can request a rendezvous, if the MCA has a global scope. - * If "rz_always" flag is set, SAL requests MCA rendezvous + * If "rz_always" flag is set, SAL requests MCA rendezvous * in spite of global MCA. * Therefore it is local MCA when rendezvous has not been requested. * Failed to rendezvous, the system must be down. @@ -381,13 +402,15 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci, * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_read_error(slidx_table_t *slidx, + peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { sal_log_mod_error_info_t *smei; @@ -453,24 +476,28 @@ recover_from_read_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_chec * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, + pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { int status = 0; - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); if (psp->bc && pbci->eb && pbci->bsi == 0) { switch(pbci->type) { case 1: /* partial read */ case 3: /* full line(cpu) read */ case 9: /* I/O space read */ - status = recover_from_read_error(slidx, peidx, pbci, sos); + status = recover_from_read_error(slidx, peidx, pbci, + sos); break; case 0: /* unknown */ case 2: /* partial write */ @@ -481,7 +508,8 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ case 8: /* write coalescing transactions */ case 10: /* I/O space write */ case 11: /* inter-processor interrupt message(IPI) */ - case 12: /* interrupt acknowledge or external task priority cycle */ + case 12: /* interrupt acknowledge or + external task priority cycle */ default: break; } @@ -496,6 +524,7 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ * @slidx: pointer of index of SAL error record * @peidx: pointer of index of processor error section * @pbci: pointer of pal_bus_check_info + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure @@ -509,15 +538,17 @@ recover_from_platform_error(slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_ */ static int -recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t *peidx, pal_bus_check_info_t *pbci, +recover_from_processor_error(int platform, slidx_table_t *slidx, + peidx_table_t *peidx, pal_bus_check_info_t *pbci, struct ia64_sal_os_state *sos) { - pal_processor_state_info_t *psp = (pal_processor_state_info_t*)peidx_psp(peidx); + pal_processor_state_info_t *psp = + (pal_processor_state_info_t*)peidx_psp(peidx); - /* + /* * We cannot recover errors with other than bus_check. */ - if (psp->cc || psp->rc || psp->uc) + if (psp->cc || psp->rc || psp->uc) return 0; /* @@ -546,10 +577,10 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * * (e.g. a load from poisoned memory) * This means "there are some platform errors". */ - if (platform) + if (platform) return recover_from_platform_error(slidx, peidx, pbci, sos); - /* - * On account of strange SAL error record, we cannot recover. + /* + * On account of strange SAL error record, we cannot recover. */ return 0; } @@ -557,14 +588,14 @@ recover_from_processor_error(int platform, slidx_table_t *slidx, peidx_table_t * /** * mca_try_to_recover - Try to recover from MCA * @rec: pointer to a SAL error record + * @sos: pointer to hand off struct between SAL and OS * * Return value: * 1 on Success / 0 on Failure */ static int -mca_try_to_recover(void *rec, - struct ia64_sal_os_state *sos) +mca_try_to_recover(void *rec, struct ia64_sal_os_state *sos) { int platform_err; int n_proc_err; @@ -588,7 +619,8 @@ mca_try_to_recover(void *rec, } /* Make index of processor error section */ - mca_make_peidx((sal_log_processor_info_t*)slidx_first_entry(&slidx.proc_err)->hdr, &peidx); + mca_make_peidx((sal_log_processor_info_t*) + slidx_first_entry(&slidx.proc_err)->hdr, &peidx); /* Extract Processor BUS_CHECK[0] */ *((u64*)&pbci) = peidx_check_info(&peidx, bus_check, 0); @@ -598,7 +630,8 @@ mca_try_to_recover(void *rec, return 0; /* Try to recover a processor error */ - return recover_from_processor_error(platform_err, &slidx, &peidx, &pbci, sos); + return recover_from_processor_error(platform_err, &slidx, &peidx, + &pbci, sos); } /* @@ -611,7 +644,7 @@ int __init mca_external_handler_init(void) return -ENOMEM; /* register external mca handlers */ - if (ia64_reg_MCA_extension(mca_try_to_recover)){ + if (ia64_reg_MCA_extension(mca_try_to_recover)) { printk(KERN_ERR "ia64_reg_MCA_extension failed.\n"); kfree(slidx_pool.buffer); return -EFAULT; diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index 0227b761f2c4..e2f6fa1e0ef6 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h @@ -6,7 +6,7 @@ * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) */ /* - * Processor error section: + * Processor error section: * * +-sal_log_processor_info_t *info-------------+ * | sal_log_section_hdr_t header; | diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index 2d7e0217638d..3f298ee4d00c 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -13,45 +13,45 @@ #include <asm/ptrace.h> GLOBAL_ENTRY(mca_handler_bhhook) - invala // clear RSE ? - ;; // - cover // - ;; // - clrrrb // + invala // clear RSE ? + ;; + cover + ;; + clrrrb ;; - alloc r16=ar.pfs,0,2,1,0 // make a new frame + alloc r16=ar.pfs,0,2,1,0 // make a new frame ;; - mov ar.rsc=0 + mov ar.rsc=0 ;; - mov r13=IA64_KR(CURRENT) // current task pointer + mov r13=IA64_KR(CURRENT) // current task pointer ;; - mov r2=r13 + mov r2=r13 ;; - addl r22=IA64_RBS_OFFSET,r2 + addl r22=IA64_RBS_OFFSET,r2 ;; - mov ar.bspstore=r22 + mov ar.bspstore=r22 ;; - addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 + addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; - adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 + adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 ;; - st1 [r2]=r0 // clear current->thread.on_ustack flag - mov loc0=r16 - movl loc1=mca_handler_bh // recovery C function + st1 [r2]=r0 // clear current->thread.on_ustack flag + mov loc0=r16 + movl loc1=mca_handler_bh // recovery C function ;; - mov out0=r8 // poisoned address - mov b6=loc1 + mov out0=r8 // poisoned address + mov b6=loc1 ;; - mov loc1=rp + mov loc1=rp ;; - ssm psr.i + ssm psr.i ;; - br.call.sptk.many rp=b6 // does not return ... + br.call.sptk.many rp=b6 // does not return ... ;; - mov ar.pfs=loc0 - mov rp=loc1 + mov ar.pfs=loc0 + mov rp=loc1 ;; - mov r8=r0 + mov r8=r0 br.ret.sptk.many rp ;; END(mca_handler_bhhook) diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 1650353e3f77..d71731ee5b61 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -574,7 +574,7 @@ pfm_protect_ctx_ctxsw(pfm_context_t *x) return 0UL; } -static inline unsigned long +static inline void pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) { spin_unlock(&(x)->ctx_lock); @@ -2218,12 +2218,13 @@ static void pfm_free_fd(int fd, struct file *file) { struct files_struct *files = current->files; - struct fdtable *fdt = files_fdtable(files); + struct fdtable *fdt; /* * there ie no fd_uninstall(), so we do it here */ spin_lock(&files->file_lock); + fdt = files_fdtable(files); rcu_assign_pointer(fdt->fd[fd], NULL); spin_unlock(&files->file_lock); diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index 4be1546e1726..ac64664a1807 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -15,7 +15,6 @@ lib-$(CONFIG_ITANIUM) += copy_page.o copy_user.o memcpy.o lib-$(CONFIG_MCKINLEY) += copy_page_mck.o memcpy_mck.o lib-$(CONFIG_PERFMON) += carta_random.o lib-$(CONFIG_MD_RAID5) += xor.o -lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o AFLAGS___divdi3.o = AFLAGS___udivdi3.o = -DUNSIGNED diff --git a/arch/ia64/lib/dec_and_lock.c b/arch/ia64/lib/dec_and_lock.c deleted file mode 100644 index c7ce92f968f1..000000000000 --- a/arch/ia64/lib/dec_and_lock.c +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (C) 2003 Jerome Marchand, Bull S.A. - * Cleaned up by David Mosberger-Tang <davidm@hpl.hp.com> - * - * This file is released under the GPLv2, or at your option any later version. - * - * ia64 version of "atomic_dec_and_lock()" using the atomic "cmpxchg" instruction. This - * code is an adaptation of the x86 version of "atomic_dec_and_lock()". - */ - -#include <linux/compiler.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <asm/atomic.h> - -/* - * Decrement REFCOUNT and if the count reaches zero, acquire the spinlock. Both of these - * operations have to be done atomically, so that the count doesn't drop to zero without - * acquiring the spinlock first. - */ -int -_atomic_dec_and_lock (atomic_t *refcount, spinlock_t *lock) -{ - int old, new; - - do { - old = atomic_read(refcount); - new = old - 1; - - if (unlikely (old == 1)) { - /* oops, we may be decrementing to zero, do it the slow way... */ - spin_lock(lock); - if (atomic_dec_and_test(refcount)) - return 1; - spin_unlock(lock); - return 0; - } - } while (cmpxchg(&refcount->counter, old, new) != old); - return 0; -} - -EXPORT_SYMBOL(_atomic_dec_and_lock); |