summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2015-11-02 10:41:00 +0100
committerThomas Hellstrom <thellstrom@vmware.com>2015-11-05 12:47:25 +0100
commit05a924eada58f91d380de809a6273f94f61329a9 (patch)
tree896855dedaa4d0acccc61db9086799b6dfc2f5cc
parent28f44cf2652a513ad23f1068706395986a1ee33a (diff)
vmwgfx: Switch to threaded irqsfeature/thellstrom/threadirq
Switch to a threaded irq handler and convert all affected spinlocks to not block local irqs or softirqs. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
-rw-r--r--vmwgfx_cmdbuf.c61
-rw-r--r--vmwgfx_drv.c1
-rw-r--r--vmwgfx_drv.h21
-rw-r--r--vmwgfx_fence.c91
-rw-r--r--vmwgfx_irq.c53
5 files changed, 116 insertions, 111 deletions
diff --git a/vmwgfx_cmdbuf.c b/vmwgfx_cmdbuf.c
index dc3f6b7..d10c4f1 100644
--- a/vmwgfx_cmdbuf.c
+++ b/vmwgfx_cmdbuf.c
@@ -83,7 +83,6 @@ struct vmw_cmdbuf_context {
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
- * @tasklet: Tasklet struct for irq processing. Immutable.
* @alloc_queue: Wait queue for processes waiting to allocate command buffer
* space.
* @idle_queue: Wait queue for processes waiting for command buffer idle.
@@ -115,7 +114,6 @@ struct vmw_cmdbuf_man {
spinlock_t lock;
struct dma_pool *headers;
struct dma_pool *dheaders;
- struct tasklet_struct tasklet;
wait_queue_head_t alloc_queue;
wait_queue_head_t idle_queue;
bool irq_on;
@@ -276,9 +274,9 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
vmw_cmdbuf_header_inline_free(header);
return;
}
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
__vmw_cmdbuf_header_free(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
}
@@ -469,20 +467,17 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
}
/**
- * vmw_cmdbuf_man_tasklet - The main part of the command buffer interrupt
- * handler implemented as a tasklet.
+ * vmw_cmdbuf_irqthread - The main part of the command buffer interrupt
+ * handler.
*
- * @data: Tasklet closure. A pointer to the command buffer manager cast to
- * an unsigned long.
+ * @man: A pointer to the command buffer manager
*
- * The bottom half (tasklet) of the interrupt handler simply calls into the
+ * The main portion of the interrupt handler simply calls into the
* command buffer processor to free finished buffers and submit any
* queued buffers to hardware.
*/
-static void vmw_cmdbuf_man_tasklet(unsigned long data)
+void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_man *man = (struct vmw_cmdbuf_man *) data;
-
spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
@@ -505,7 +500,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
uint32_t dummy;
bool restart = false;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
list_for_each_entry_safe(entry, next, &man->error, list) {
restart = true;
DRM_ERROR("Command buffer error.\n");
@@ -514,7 +509,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
__vmw_cmdbuf_header_free(entry);
wake_up_all(&man->idle_queue);
}
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
if (restart && vmw_cmdbuf_startstop(man, true))
DRM_ERROR("Failed restarting command buffer context 0.\n");
@@ -537,7 +532,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
bool idle = false;
int i;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
vmw_cmdbuf_man_process(man);
for_each_cmdbuf_ctx(man, i, ctx) {
if (!list_empty(&ctx->submitted) ||
@@ -549,7 +544,7 @@ static bool vmw_cmdbuf_man_idle(struct vmw_cmdbuf_man *man,
idle = list_empty(&man->error);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return idle;
}
@@ -572,7 +567,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
if (!cur)
return;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
if (man->cur_pos == 0) {
__vmw_cmdbuf_header_free(cur);
goto out_unlock;
@@ -581,7 +576,7 @@ static void __vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man)
man->cur->cb_header->length = man->cur_pos;
vmw_cmdbuf_ctx_add(man, man->cur, SVGA_CB_CONTEXT_0);
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
man->cur = NULL;
man->cur_pos = 0;
}
@@ -679,7 +674,7 @@ retry:
info->ret = ret;
return true;
}
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
info->node = drm_mm_search_free(&man->mm, info->page_size, 0, 0);
if (!info->node) {
vmw_cmdbuf_man_process(man);
@@ -695,12 +690,12 @@ retry:
/* Atomic kmalloc failed? Preload and retry.*/
if (!info->node) {
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
goto retry;
}
out_unlock:
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return !!info->node;
}
@@ -827,9 +822,9 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
return 0;
out_no_cb_header:
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
drm_mm_put_block(header->node);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
return ret;
}
@@ -1051,19 +1046,6 @@ void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
}
/**
- * vmw_cmdbuf_tasklet_schedule - Schedule the interrupt handler bottom half.
- *
- * @man: The command buffer manager.
- */
-void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man)
-{
- if (!man)
- return;
-
- tasklet_schedule(&man->tasklet);
-}
-
-/**
* vmw_cmdbuf_send_device_command - Send a command through the device context.
*
* @man: The command buffer manager.
@@ -1086,9 +1068,9 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
memcpy(cmd, command, size);
header->cb_header->length = size;
header->cb_context = SVGA_CB_CONTEXT_DEVICE;
- spin_lock_bh(&man->lock);
+ spin_lock(&man->lock);
status = vmw_cmdbuf_header_submit(header);
- spin_unlock_bh(&man->lock);
+ spin_unlock(&man->lock);
vmw_cmdbuf_header_free(header);
if (status != SVGA_CB_STATUS_COMPLETED) {
@@ -1261,8 +1243,6 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
spin_lock_init(&man->lock);
mutex_init(&man->cur_mutex);
mutex_init(&man->space_mutex);
- tasklet_init(&man->tasklet, vmw_cmdbuf_man_tasklet,
- (unsigned long) man);
man->default_size = VMW_CMDBUF_INLINE_SIZE;
init_waitqueue_head(&man->alloc_queue);
init_waitqueue_head(&man->idle_queue);
@@ -1332,7 +1312,6 @@ void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
- tasklet_kill(&man->tasklet);
(void) cancel_work_sync(&man->work);
dma_pool_destroy(man->dheaders);
dma_pool_destroy(man->headers);
diff --git a/vmwgfx_drv.c b/vmwgfx_drv.c
index b1e5695..58ecca1 100644
--- a/vmwgfx_drv.c
+++ b/vmwgfx_drv.c
@@ -1502,6 +1502,7 @@ static struct drm_driver driver = {
.irq_postinstall = vmw_irq_postinstall,
.irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler,
+ .irq_thread_fn = vmw_thread_fn,
.get_vblank_counter = vmw_get_vblank_counter,
.enable_vblank = vmw_enable_vblank,
.disable_vblank = vmw_disable_vblank,
diff --git a/vmwgfx_drv.h b/vmwgfx_drv.h
index cc0cba0..5d3c218 100644
--- a/vmwgfx_drv.h
+++ b/vmwgfx_drv.h
@@ -353,6 +353,12 @@ struct vmw_otable_batch {
struct ttm_buffer_object *otable_bo;
};
+enum {
+ VMW_IRQTHREAD_FENCE,
+ VMW_IRQTHREAD_CMDBUF,
+ VMW_IRQTHREAD_MAX
+};
+
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
@@ -526,6 +532,7 @@ struct vmw_private {
*/
enum vmw_dma_map_mode map_mode;
struct vmw_cmdbuf_man *cman;
+ DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
};
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
@@ -558,24 +565,21 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- unsigned long irq_flags;
u32 val;
- spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
+ spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+ spin_unlock(&dev_priv->hw_lock);
return val;
}
@@ -856,6 +860,7 @@ extern int vmw_validate_single_buffer(struct vmw_private *dev_priv,
*/
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
+extern irqreturn_t vmw_thread_fn(int irq, void *arg);
extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
uint32_t seqno, bool interruptible,
unsigned long timeout);
@@ -1146,13 +1151,13 @@ extern void *vmw_cmdbuf_reserve(struct vmw_cmdbuf_man *man, size_t size,
extern void vmw_cmdbuf_commit(struct vmw_cmdbuf_man *man, size_t size,
struct vmw_cmdbuf_header *header,
bool flush);
-extern void vmw_cmdbuf_tasklet_schedule(struct vmw_cmdbuf_man *man);
extern void *vmw_cmdbuf_alloc(struct vmw_cmdbuf_man *man,
size_t size, bool interruptible,
struct vmw_cmdbuf_header **p_header);
extern void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header);
extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
+extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
/**
diff --git a/vmwgfx_fence.c b/vmwgfx_fence.c
index e61fa82..545b7d9 100644
--- a/vmwgfx_fence.c
+++ b/vmwgfx_fence.c
@@ -112,13 +112,13 @@ static void vmw_fence_obj_destroy_locked(struct kref *kref)
list_del_init(&fence->head);
num_fences = --fman->num_fence_objects;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
if (fence->destroy)
fence->destroy(fence);
else
kfree(fence);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
}
@@ -140,10 +140,10 @@ static void vmw_fence_work_func(struct work_struct *work)
INIT_LIST_HEAD(&list);
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
list_splice_init(&fman->cleanup_list, &list);
seqno_valid = fman->seqno_valid;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
if (!seqno_valid && fman->goal_irq_on) {
fman->goal_irq_on = false;
@@ -192,15 +192,14 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
lists_empty = list_empty(&fman->fence_list) &&
list_empty(&fman->cleanup_list);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
kfree(fman);
@@ -212,7 +211,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
uint32_t mask,
void (*destroy) (struct vmw_fence_obj *fence))
{
- unsigned long irq_flags;
unsigned int num_fences;
int ret = 0;
@@ -225,7 +223,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
fence->destroy = destroy;
init_waitqueue_head(&fence->queue);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
if (unlikely(fman->fifo_down)) {
ret = -EBUSY;
goto out_unlock;
@@ -234,7 +232,7 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
num_fences = ++fman->num_fence_objects;
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
return ret;
}
@@ -265,10 +263,10 @@ void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
fman = fence->fman;
*fence_p = NULL;
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
BUG_ON(atomic_read(&fence->kref.refcount) == 0);
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
}
void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
@@ -373,7 +371,6 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
void vmw_fences_update(struct vmw_fence_manager *fman)
{
- unsigned long flags;
struct vmw_fence_obj *fence, *next_fence;
struct list_head action_list;
bool needs_rerun;
@@ -382,7 +379,7 @@ void vmw_fences_update(struct vmw_fence_manager *fman)
seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
rerun:
- spin_lock_irqsave(&fman->lock, flags);
+ spin_lock(&fman->lock);
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
@@ -400,7 +397,7 @@ rerun:
if (!list_empty(&fman->cleanup_list))
(void) schedule_work(&fman->work);
- spin_unlock_irqrestore(&fman->lock, flags);
+ spin_unlock(&fman->lock);
/*
* Rerun if the fence goal seqno was updated, and the
@@ -421,12 +418,11 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
uint32_t flags)
{
struct vmw_fence_manager *fman = fence->fman;
- unsigned long irq_flags;
uint32_t signaled;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
signaled = fence->signaled;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
flags &= fence->signal_mask;
if ((signaled & flags) == flags)
@@ -435,9 +431,9 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0)
vmw_fences_update(fman);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
signaled = fence->signaled;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
return ((signaled & flags) == flags);
}
@@ -612,7 +608,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
struct list_head action_list;
int ret;
@@ -621,14 +616,14 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
* restart when we've released the fman->lock.
*/
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->fifo_down = true;
while (!list_empty(&fman->fence_list)) {
struct vmw_fence_obj *fence =
list_entry(fman->fence_list.prev, struct vmw_fence_obj,
head);
kref_get(&fence->kref);
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ret = vmw_fence_obj_wait(fence, fence->signal_mask,
false, false,
@@ -644,21 +639,19 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
wake_up_all(&fence->queue);
}
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
BUG_ON(!list_empty(&fence->head));
kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->fifo_down = false;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
@@ -744,11 +737,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
fman = fence->fman;
arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
- spin_lock_irq(&fman->lock);
+ spin_lock(&fman->lock);
arg->signaled_flags = fence->signaled;
arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock_irq(&fman->lock);
+ spin_unlock(&fman->lock);
ttm_base_object_unref(&base);
@@ -785,10 +778,9 @@ void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
{
struct vmw_event_fence_action *eaction;
struct drm_pending_event *event;
- unsigned long irq_flags;
while (1) {
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
if (list_empty(event_list))
goto out_unlock;
eaction = list_first_entry(event_list,
@@ -797,11 +789,11 @@ void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
list_del_init(&eaction->fpriv_head);
event = eaction->event;
eaction->event = NULL;
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
event->destroy(event);
}
out_unlock:
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
}
@@ -823,13 +815,12 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
struct drm_file *file_priv;
- unsigned long irq_flags;
if (unlikely(event == NULL))
return;
file_priv = event->file_priv;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
struct timeval tv;
@@ -843,7 +834,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
list_add_tail(&eaction->event->link, &file_priv->event_list);
eaction->event = NULL;
wake_up_all(&file_priv->event_wait);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock(&dev->event_lock);
}
/**
@@ -860,11 +851,10 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
struct vmw_event_fence_action *eaction =
container_of(action, struct vmw_event_fence_action, action);
struct vmw_fence_manager *fman = eaction->fence->fman;
- unsigned long irq_flags;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
list_del(&eaction->fpriv_head);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
vmw_fence_obj_unreference(&eaction->fence);
kfree(eaction);
@@ -884,11 +874,10 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
struct vmw_fence_action *action)
{
struct vmw_fence_manager *fman = fence->fman;
- unsigned long irq_flags;
bool run_update = false;
mutex_lock(&fman->goal_irq_mutex);
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
fman->pending_actions[action->type]++;
if (fence->signaled & DRM_VMW_FENCE_FLAG_EXEC) {
@@ -907,7 +896,7 @@ void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
run_update = vmw_fence_goal_check_locked(fence);
}
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
if (run_update) {
if (!fman->goal_irq_on) {
@@ -945,7 +934,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
struct vmw_event_fence_action *eaction;
struct vmw_fence_manager *fman = fence->fman;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- unsigned long irq_flags;
eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
if (unlikely(eaction == NULL))
@@ -962,9 +950,9 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- spin_lock_irqsave(&fman->lock, irq_flags);
+ spin_lock(&fman->lock);
list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
- spin_unlock_irqrestore(&fman->lock, irq_flags);
+ spin_unlock(&fman->lock);
vmw_fence_obj_add_action(fence, &eaction->action);
@@ -985,16 +973,15 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
{
struct vmw_event_fence_pending *event;
struct drm_device *dev = fence->fman->dev_priv->dev;
- unsigned long irq_flags;
int ret;
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock(&dev->event_lock);
ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
if (likely(ret == 0))
file_priv->event_space -= sizeof(event->event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock(&dev->event_lock);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate event space for this file.\n");
@@ -1038,9 +1025,9 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
- spin_lock_irqsave(&dev->event_lock, irq_flags);
+ spin_lock(&dev->event_lock);
file_priv->event_space += sizeof(*event);
- spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+ spin_unlock(&dev->event_lock);
out_no_space:
return ret;
}
diff --git a/vmwgfx_irq.c b/vmwgfx_irq.c
index e1a4ddb..fffe3db 100644
--- a/vmwgfx_irq.c
+++ b/vmwgfx_irq.c
@@ -30,11 +30,43 @@
#define VMW_FENCE_WRAP (1 << 24)
+/**
+ * vmw_thread_fn - Deferred (process context) irq handler
+ *
+ * @irq: irq number
+ * @arg: Closure argument. Pointer to a struct drm_device cast to void *
+ *
+ * This function implements the deferred part of irq processing.
+ * The function is guaranteed to run at least once after the
+ * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
+ *
+ * This function always returns IRQ_HANDLED regardless of whether it
+ * actually did any work.
+ */
+irqreturn_t vmw_thread_fn(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
+ dev_priv->irqthread_pending)) {
+ vmw_fences_update(dev_priv->fman);
+ wake_up_all(&dev_priv->fence_queue);
+ }
+
+ if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending))
+ vmw_cmdbuf_irqthread(dev_priv->cman);
+
+ return IRQ_HANDLED;
+}
+
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status, masked_status;
+ irqreturn_t ret = IRQ_HANDLED;
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
@@ -45,20 +77,21 @@ irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
if (!status)
return IRQ_NONE;
- if (masked_status & (SVGA_IRQFLAG_ANY_FENCE |
- SVGA_IRQFLAG_FENCE_GOAL)) {
- vmw_fences_update(dev_priv->fman);
- wake_up_all(&dev_priv->fence_queue);
- }
-
if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
- if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
- SVGA_IRQFLAG_ERROR))
- vmw_cmdbuf_tasklet_schedule(dev_priv->cman);
+ if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
+ SVGA_IRQFLAG_FENCE_GOAL)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
- return IRQ_HANDLED;
+ if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
+ SVGA_IRQFLAG_ERROR)) &&
+ !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
+ dev_priv->irqthread_pending))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)