summaryrefslogtreecommitdiff
path: root/arch/mips/kernel/smtc.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/smtc.c')
-rw-r--r--arch/mips/kernel/smtc.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a8b387197d5b..6a857bf030b0 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -4,6 +4,7 @@
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
#include <asm/cpu.h>
#include <asm/processor.h>
@@ -270,9 +271,12 @@ void smtc_configure_tlb(void)
* of their initialization in smtc_cpu_setup().
*/
- tlbsiz = tlbsiz & 0x3f; /* MIPS32 limits TLB indices to 64 */
- cpu_data[0].tlbsize = tlbsiz;
+ /* MIPS32 limits TLB indices to 64 */
+ if (tlbsiz > 64)
+ tlbsiz = 64;
+ cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
smtc_status |= SMTC_TLB_SHARED;
+ local_flush_tlb_all();
printk("TLB of %d entry pairs shared by %d VPEs\n",
tlbsiz, vpes);
@@ -1017,6 +1021,35 @@ void setup_cross_vpe_interrupts(void)
* SMTC-specific hacks invoked from elsewhere in the kernel.
*/
+void smtc_ipi_replay(void)
+{
+ /*
+ * To the extent that we've ever turned interrupts off,
+ * we may have accumulated deferred IPIs. This is subtle.
+ * If we use the smtc_ipi_qdepth() macro, we'll get an
+ * exact number - but we'll also disable interrupts
+ * and create a window of failure where a new IPI gets
+ * queued after we test the depth but before we re-enable
+ * interrupts. So long as IXMT never gets set, however,
+ * we should be OK: If we pick up something and dispatch
+ * it here, that's great. If we see nothing, but concurrent
+ * with this operation, another TC sends us an IPI, IXMT
+ * is clear, and we'll handle it as a real pseudo-interrupt
+ * and not a pseudo-pseudo interrupt.
+ */
+ if (IPIQ[smp_processor_id()].depth > 0) {
+ struct smtc_ipi *pipi;
+ extern void self_ipi(struct smtc_ipi *);
+
+ while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) {
+ self_ipi(pipi);
+ smtc_cpu_stats[smp_processor_id()].selfipis++;
+ }
+ }
+}
+
+EXPORT_SYMBOL(smtc_ipi_replay);
+
void smtc_idle_loop_hook(void)
{
#ifdef SMTC_IDLE_HOOK_DEBUG
@@ -1113,29 +1146,14 @@ void smtc_idle_loop_hook(void)
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
#endif /* SMTC_IDLE_HOOK_DEBUG */
+
/*
- * To the extent that we've ever turned interrupts off,
- * we may have accumulated deferred IPIs. This is subtle.
- * If we use the smtc_ipi_qdepth() macro, we'll get an
- * exact number - but we'll also disable interrupts
- * and create a window of failure where a new IPI gets
- * queued after we test the depth but before we re-enable
- * interrupts. So long as IXMT never gets set, however,
- * we should be OK: If we pick up something and dispatch
- * it here, that's great. If we see nothing, but concurrent
- * with this operation, another TC sends us an IPI, IXMT
- * is clear, and we'll handle it as a real pseudo-interrupt
- * and not a pseudo-pseudo interrupt.
+ * Replay any accumulated deferred IPIs. If "Instant Replay"
+ * is in use, there should never be any.
*/
- if (IPIQ[smp_processor_id()].depth > 0) {
- struct smtc_ipi *pipi;
- extern void self_ipi(struct smtc_ipi *);
-
- if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
- self_ipi(pipi);
- smtc_cpu_stats[smp_processor_id()].selfipis++;
- }
- }
+#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
+ smtc_ipi_replay();
+#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
}
void smtc_soft_dump(void)