diff options
author | K. Y. Srinivasan <kys@microsoft.com> | 2014-04-08 18:45:54 -0700 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-05-03 19:24:26 -0400 |
commit | 3a28fa35d6658703cd26f9c16aaea0eae06afd40 (patch) | |
tree | bcc8f07bbd9365e0a270c85af0e50ce3e1682987 /drivers/hv/channel_mgmt.c | |
parent | d3ba720dd58cdf6630fee4b89482c465d5ad0d0f (diff) |
Drivers: hv: vmbus: Implement per-CPU mapping of relid to channel
Currently the mapping of the relID to channel is done under the protection of a
single spin lock. Starting with ws2012, each channel is bound to a specific VCPU
in the guest. Use this binding to eliminate the spin lock by setting up
per-cpu state for mapping relId to the channel.
Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Reviewed-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/hv/channel_mgmt.c')
-rw-r--r-- | drivers/hv/channel_mgmt.c | 41 |
1 files changed, 40 insertions, 1 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 6f7fdd9a7e77..6c8b032cacba 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -149,6 +149,7 @@ static struct vmbus_channel *alloc_channel(void) spin_lock_init(&channel->sc_lock); INIT_LIST_HEAD(&channel->sc_list); + INIT_LIST_HEAD(&channel->percpu_list); channel->controlwq = create_workqueue("hv_vmbus_ctl"); if (!channel->controlwq) { @@ -188,7 +189,20 @@ static void free_channel(struct vmbus_channel *channel) queue_work(vmbus_connection.work_queue, &channel->work); } +static void percpu_channel_enq(void *arg) +{ + struct vmbus_channel *channel = arg; + int cpu = smp_processor_id(); + + list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]); +} +static void percpu_channel_deq(void *arg) +{ + struct vmbus_channel *channel = arg; + + list_del(&channel->percpu_list); +} /* * vmbus_process_rescind_offer - @@ -210,6 +224,12 @@ static void vmbus_process_rescind_offer(struct work_struct *work) msg.header.msgtype = CHANNELMSG_RELID_RELEASED; vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released)); + if (channel->target_cpu != smp_processor_id()) + smp_call_function_single(channel->target_cpu, + percpu_channel_deq, channel, true); + else + percpu_channel_deq(channel); + if (channel->primary_channel == NULL) { spin_lock_irqsave(&vmbus_connection.channel_lock, flags); list_del(&channel->listentry); @@ -245,6 +265,7 @@ static void vmbus_process_offer(struct work_struct *work) work); struct vmbus_channel *channel; bool fnew = true; + bool enq = false; int ret; unsigned long flags; @@ -264,12 +285,22 @@ static void vmbus_process_offer(struct work_struct *work) } } - if (fnew) + if (fnew) { list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); + enq = true; + } spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags); + if (enq) { + if (newchannel->target_cpu != smp_processor_id()) + smp_call_function_single(newchannel->target_cpu, + percpu_channel_enq, + newchannel, true); + else + percpu_channel_enq(newchannel); + } if (!fnew) { /* * Check to see if this is a sub-channel. @@ -282,6 +313,14 @@ static void vmbus_process_offer(struct work_struct *work) spin_lock_irqsave(&channel->sc_lock, flags); list_add_tail(&newchannel->sc_list, &channel->sc_list); spin_unlock_irqrestore(&channel->sc_lock, flags); + + if (newchannel->target_cpu != smp_processor_id()) + smp_call_function_single(newchannel->target_cpu, + percpu_channel_enq, + newchannel, true); + else + percpu_channel_enq(newchannel); + newchannel->state = CHANNEL_OPEN_STATE; if (channel->sc_creation_callback != NULL) channel->sc_creation_callback(newchannel); |