summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c66
1 files changed, 64 insertions, 2 deletions
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 176f1751237f..c234ada64565 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -295,6 +295,9 @@ struct smi_info {
static int force_kipmid[SI_MAX_PARMS];
static int num_force_kipmid;
+static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
+static int num_max_busy_us;
+
static int unload_when_empty = 1;
static int try_smi_init(struct smi_info *smi);
@@ -925,23 +928,77 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
}
}
+/*
+ * Use -1 in the nsec value of the busy waiting timespec to tell that
+ * we are spinning in kipmid looking for something and not delaying
+ * between checks
+ */
+static inline void ipmi_si_set_not_busy(struct timespec *ts)
+{
+ ts->tv_nsec = -1;
+}
+static inline int ipmi_si_is_busy(struct timespec *ts)
+{
+ return ts->tv_nsec != -1;
+}
+
+static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
+ const struct smi_info *smi_info,
+ struct timespec *busy_until)
+{
+ unsigned int max_busy_us = 0;
+
+ if (smi_info->intf_num < num_max_busy_us)
+ max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
+ if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
+ ipmi_si_set_not_busy(busy_until);
+ else if (!ipmi_si_is_busy(busy_until)) {
+ getnstimeofday(busy_until);
+ timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+ } else {
+ struct timespec now;
+ getnstimeofday(&now);
+ if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+ ipmi_si_set_not_busy(busy_until);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+/*
+ * A busy-waiting loop for speeding up IPMI operation.
+ *
+ * Lousy hardware makes this hard. This is only enabled for systems
+ * that are not BT and do not have interrupts. It starts spinning
+ * when an operation is complete or until max_busy tells it to stop
+ * (if that is enabled). See the paragraph on kimid_max_busy_us in
+ * Documentation/IPMI.txt for details.
+ */
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
+ struct timespec busy_until;
+ ipmi_si_set_not_busy(&busy_until);
set_user_nice(current, 19);
while (!kthread_should_stop()) {
+ int busy_wait;
+
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+ busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
+ &busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
; /* do nothing */
- else if (smi_result == SI_SM_CALL_WITH_DELAY)
+ else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule();
else
- schedule_timeout_interruptible(1);
+ schedule_timeout_interruptible(0);
}
return 0;
}
@@ -1212,6 +1269,11 @@ module_param(unload_when_empty, int, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod.");
+module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
+MODULE_PARM_DESC(kipmid_max_busy_us,
+ "Max time (in microseconds) to busy-wait for IPMI data before"
+ " sleeping. 0 (default) means to wait forever. Set to 100-500"
+ " if kipmid is using up a lot of CPU time.");
static void std_irq_cleanup(struct smi_info *info)