aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <sthemmin@microsoft.com>2017-01-13 09:19:32 -0800
committerStephen Hemminger <sthemmin@microsoft.com>2017-01-13 09:37:38 -0800
commitca541acc98931d7b878d96506d6974513190ce5f (patch)
treed15d04bb62a92541d1b144f1f452e8c41aa9533a
parent51118697b54cc97a0d2d009cf5f976c5c99f3734 (diff)
downloadvmbus-next-ca541acc98931d7b878d96506d6974513190ce5f.tar.gz
vmbus: remove conditional locking of vmbus_write
All current usage of vmbus write uses the acquire_lock flag, therefore having it be optional is unnecessary. This also fixes a sparse warning since sparse doesn't like when a function has conditional locking. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
-rw-r--r--drivers/hv/channel.c13
-rw-r--r--drivers/hv/channel_mgmt.c1
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c11
-rw-r--r--include/linux/hyperv.h15
5 files changed, 9 insertions, 34 deletions
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index e26285cde8e0d0..bae50c9aac6ed4 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -651,7 +651,6 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
int num_vecs = ((bufferlen != 0) ? 3 : 1);
@@ -670,7 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, num_vecs, lock);
+ return hv_ringbuffer_write(channel, bufferlist, num_vecs);
}
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
@@ -718,12 +717,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
if (pagecount > MAX_PAGE_BUFFER_COUNT)
return -EINVAL;
-
/*
* Adjust the size down since vmbus_channel_packet_page_buffer is the
* largest size we support
@@ -755,7 +752,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
@@ -790,7 +787,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
packetlen = desc_size + bufferlen;
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
@@ -810,7 +806,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
@@ -828,7 +824,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
u32 packetlen_aligned;
struct kvec bufferlist[3];
u64 aligned_data = 0;
- bool lock = channel->acquire_ring_lock;
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
multi_pagebuffer->len);
@@ -867,7 +862,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
bufferlist[2].iov_base = &aligned_data;
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
- return hv_ringbuffer_write(channel, bufferlist, 3, lock);
+ return hv_ringbuffer_write(channel, bufferlist, 3);
}
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0af7e39006c8e9..1e656097662f92 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -303,7 +303,6 @@ static struct vmbus_channel *alloc_channel(void)
if (!channel)
return NULL;
- channel->acquire_ring_lock = true;
spin_lock_init(&channel->inbound_lock);
spin_lock_init(&channel->lock);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 7ffdf7ebcdc9dd..9ae0bebfcfb340 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -526,8 +526,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
int hv_ringbuffer_write(struct vmbus_channel *channel,
- struct kvec *kv_list,
- u32 kv_count, bool lock);
+ struct kvec *kv_list, u32 kv_count);
int hv_ringbuffer_read(struct vmbus_channel *channel,
void *buffer, u32 buflen, u32 *buffer_actual_len,
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 2325242029c560..04b5c5fce7aef7 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -256,7 +256,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
/* Write to the ring buffer. */
int hv_ringbuffer_write(struct vmbus_channel *channel,
- struct kvec *kv_list, u32 kv_count, bool lock)
+ struct kvec *kv_list, u32 kv_count)
{
int i = 0;
u32 bytes_avail_towrite;
@@ -276,8 +276,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
totalbytes_towrite += sizeof(u64);
- if (lock)
- spin_lock_irqsave(&outring_info->ring_lock, flags);
+ spin_lock_irqsave(&outring_info->ring_lock, flags);
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
@@ -287,8 +286,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
* is empty since the read index == write index.
*/
if (bytes_avail_towrite <= totalbytes_towrite) {
- if (lock)
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
return -EAGAIN;
}
@@ -317,8 +315,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
/* Now, update the write location */
outring_info->ring_buffer->write_index = next_write_location;
- if (lock)
- spin_unlock_irqrestore(&outring_info->ring_lock, flags);
+ spin_unlock_irqrestore(&outring_info->ring_lock, flags);
hv_signal_on_write(old_write, channel);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b691a6530e8566..6a748f7406c66a 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -832,16 +832,6 @@ struct vmbus_channel {
*/
struct list_head percpu_list;
/*
- * On the channel send side, many of the VMBUS
- * device drivers explicity serialize access to the
- * outgoing ring buffer. Give more control to the
- * VMBUS device drivers in terms how to serialize
- * accesss to the outgoing ring buffer.
- * The default behavior will be to aquire the
- * ring lock to preserve the current behavior.
- */
- bool acquire_ring_lock;
- /*
* For performance critical channels (storage, networking
* etc,), Hyper-V has a mechanism to enhance the throughput
* at the expense of latency:
@@ -881,11 +871,6 @@ struct vmbus_channel {
};
-static inline void set_channel_lock_state(struct vmbus_channel *c, bool state)
-{
- c->acquire_ring_lock = state;
-}
-
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
{
return !!(c->offermsg.offer.chn_flags &