2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <linux/wait.h>
27 #include <linux/slab.h>
28 #include <linux/module.h>
29 #include <linux/hyperv.h>
30 #include <linux/uio.h>
32 #include "hyperv_vmbus.h"
34 #define NUM_PAGES_SPANNED(addr, len) \
35 ((PAGE_ALIGN(addr + len) >> PAGE_SHIFT) - (addr >> PAGE_SHIFT))
38 * vmbus_setevent- Trigger an event notification on the specified
41 static void vmbus_setevent(struct vmbus_channel *channel)
43 struct hv_monitor_page *monitorpage;
45 if (channel->offermsg.monitor_allocated) {
46 /* Each u32 represents 32 channels */
47 sync_set_bit(channel->offermsg.child_relid & 31,
48 (unsigned long *) vmbus_connection.send_int_page +
49 (channel->offermsg.child_relid >> 5));
51 /* Get the child to parent monitor page */
52 monitorpage = vmbus_connection.monitor_pages[1];
54 sync_set_bit(channel->monitor_bit,
55 (unsigned long *)&monitorpage->trigger_group
56 [channel->monitor_grp].pending);
59 vmbus_set_event(channel);
64 * vmbus_open - Open the specified channel.
66 int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
67 u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
68 void (*onchannelcallback)(void *context), void *context)
70 struct vmbus_channel_open_channel *open_msg;
71 struct vmbus_channel_msginfo *open_info = NULL;
77 spin_lock_irqsave(&newchannel->lock, flags);
78 if (newchannel->state == CHANNEL_OPEN_STATE) {
79 newchannel->state = CHANNEL_OPENING_STATE;
81 spin_unlock_irqrestore(&newchannel->lock, flags);
84 spin_unlock_irqrestore(&newchannel->lock, flags);
86 newchannel->onchannel_callback = onchannelcallback;
87 newchannel->channel_callback_context = context;
89 /* Allocate the ring buffer */
90 out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
91 get_order(send_ringbuffer_size + recv_ringbuffer_size));
98 in = (void *)((unsigned long)out + send_ringbuffer_size);
100 newchannel->ringbuffer_pages = out;
101 newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
102 recv_ringbuffer_size) >> PAGE_SHIFT;
104 ret = hv_ringbuffer_init(
105 &newchannel->outbound, out, send_ringbuffer_size);
112 ret = hv_ringbuffer_init(
113 &newchannel->inbound, in, recv_ringbuffer_size);
120 /* Establish the gpadl for the ring buffer */
121 newchannel->ringbuffer_gpadlhandle = 0;
123 ret = vmbus_establish_gpadl(newchannel,
124 newchannel->outbound.ring_buffer,
125 send_ringbuffer_size +
126 recv_ringbuffer_size,
127 &newchannel->ringbuffer_gpadlhandle);
134 /* Create and init the channel open message */
135 open_info = kmalloc(sizeof(*open_info) +
136 sizeof(struct vmbus_channel_open_channel),
143 init_completion(&open_info->waitevent);
145 open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
146 open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
147 open_msg->openid = newchannel->offermsg.child_relid;
148 open_msg->child_relid = newchannel->offermsg.child_relid;
149 open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
150 open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
152 open_msg->target_vp = newchannel->target_vp;
154 if (userdatalen > MAX_USER_DEFINED_BYTES) {
160 memcpy(open_msg->userdata, userdata, userdatalen);
162 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
163 list_add_tail(&open_info->msglistentry,
164 &vmbus_connection.chn_msg_list);
165 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
167 ret = vmbus_post_msg(open_msg,
168 sizeof(struct vmbus_channel_open_channel));
175 t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
181 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
182 list_del(&open_info->msglistentry);
183 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
185 if (open_info->response.open_result.status) {
190 newchannel->state = CHANNEL_OPENED_STATE;
195 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
196 list_del(&open_info->msglistentry);
197 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
200 vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
203 free_pages((unsigned long)out,
204 get_order(send_ringbuffer_size + recv_ringbuffer_size));
206 newchannel->state = CHANNEL_OPEN_STATE;
209 EXPORT_SYMBOL_GPL(vmbus_open);
212 * create_gpadl_header - Creates a gpadl for the specified buffer
214 static int create_gpadl_header(void *kbuffer, u32 size,
215 struct vmbus_channel_msginfo **msginfo,
220 struct vmbus_channel_gpadl_header *gpadl_header;
221 struct vmbus_channel_gpadl_body *gpadl_body;
222 struct vmbus_channel_msginfo *msgheader;
223 struct vmbus_channel_msginfo *msgbody = NULL;
226 int pfnsum, pfncount, pfnleft, pfncurr, pfnsize;
228 pagecount = size >> PAGE_SHIFT;
230 /* do we need a gpadl body msg */
231 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
232 sizeof(struct vmbus_channel_gpadl_header) -
233 sizeof(struct gpa_range);
234 pfncount = pfnsize / sizeof(u64);
236 if (pagecount > pfncount) {
237 /* we need a gpadl body */
238 /* fill in the header */
239 msgsize = sizeof(struct vmbus_channel_msginfo) +
240 sizeof(struct vmbus_channel_gpadl_header) +
241 sizeof(struct gpa_range) + pfncount * sizeof(u64);
242 msgheader = kzalloc(msgsize, GFP_KERNEL);
246 INIT_LIST_HEAD(&msgheader->submsglist);
247 msgheader->msgsize = msgsize;
249 gpadl_header = (struct vmbus_channel_gpadl_header *)
251 gpadl_header->rangecount = 1;
252 gpadl_header->range_buflen = sizeof(struct gpa_range) +
253 pagecount * sizeof(u64);
254 gpadl_header->range[0].byte_offset = 0;
255 gpadl_header->range[0].byte_count = size;
256 for (i = 0; i < pfncount; i++)
257 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
258 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
259 *msginfo = msgheader;
263 pfnleft = pagecount - pfncount;
265 /* how many pfns can we fit */
266 pfnsize = MAX_SIZE_CHANNEL_MESSAGE -
267 sizeof(struct vmbus_channel_gpadl_body);
268 pfncount = pfnsize / sizeof(u64);
270 /* fill in the body */
272 if (pfnleft > pfncount)
277 msgsize = sizeof(struct vmbus_channel_msginfo) +
278 sizeof(struct vmbus_channel_gpadl_body) +
279 pfncurr * sizeof(u64);
280 msgbody = kzalloc(msgsize, GFP_KERNEL);
283 struct vmbus_channel_msginfo *pos = NULL;
284 struct vmbus_channel_msginfo *tmp = NULL;
286 * Free up all the allocated messages.
288 list_for_each_entry_safe(pos, tmp,
289 &msgheader->submsglist,
292 list_del(&pos->msglistentry);
299 msgbody->msgsize = msgsize;
302 (struct vmbus_channel_gpadl_body *)msgbody->msg;
305 * Gpadl is u32 and we are using a pointer which could
307 * This is governed by the guest/host protocol and
308 * so the hypervisor gurantees that this is ok.
310 for (i = 0; i < pfncurr; i++)
311 gpadl_body->pfn[i] = slow_virt_to_phys(
312 kbuffer + PAGE_SIZE * (pfnsum + i)) >>
315 /* add to msg header */
316 list_add_tail(&msgbody->msglistentry,
317 &msgheader->submsglist);
322 /* everything fits in a header */
323 msgsize = sizeof(struct vmbus_channel_msginfo) +
324 sizeof(struct vmbus_channel_gpadl_header) +
325 sizeof(struct gpa_range) + pagecount * sizeof(u64);
326 msgheader = kzalloc(msgsize, GFP_KERNEL);
327 if (msgheader == NULL)
329 msgheader->msgsize = msgsize;
331 gpadl_header = (struct vmbus_channel_gpadl_header *)
333 gpadl_header->rangecount = 1;
334 gpadl_header->range_buflen = sizeof(struct gpa_range) +
335 pagecount * sizeof(u64);
336 gpadl_header->range[0].byte_offset = 0;
337 gpadl_header->range[0].byte_count = size;
338 for (i = 0; i < pagecount; i++)
339 gpadl_header->range[0].pfn_array[i] = slow_virt_to_phys(
340 kbuffer + PAGE_SIZE * i) >> PAGE_SHIFT;
342 *msginfo = msgheader;
354 * vmbus_establish_gpadl - Estabish a GPADL for the specified buffer
356 * @channel: a channel
357 * @kbuffer: from kmalloc or vmalloc
358 * @size: page-size multiple
359 * @gpadl_handle: some funky thing
361 int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
362 u32 size, u32 *gpadl_handle)
364 struct vmbus_channel_gpadl_header *gpadlmsg;
365 struct vmbus_channel_gpadl_body *gpadl_body;
366 struct vmbus_channel_msginfo *msginfo = NULL;
367 struct vmbus_channel_msginfo *submsginfo;
369 struct list_head *curr;
370 u32 next_gpadl_handle;
375 (atomic_inc_return(&vmbus_connection.next_gpadl_handle) - 1);
377 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
381 init_completion(&msginfo->waitevent);
383 gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
384 gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
385 gpadlmsg->child_relid = channel->offermsg.child_relid;
386 gpadlmsg->gpadl = next_gpadl_handle;
389 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
390 list_add_tail(&msginfo->msglistentry,
391 &vmbus_connection.chn_msg_list);
393 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
395 ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
401 list_for_each(curr, &msginfo->submsglist) {
403 submsginfo = (struct vmbus_channel_msginfo *)curr;
405 (struct vmbus_channel_gpadl_body *)submsginfo->msg;
407 gpadl_body->header.msgtype =
408 CHANNELMSG_GPADL_BODY;
409 gpadl_body->gpadl = next_gpadl_handle;
411 ret = vmbus_post_msg(gpadl_body,
412 submsginfo->msgsize -
413 sizeof(*submsginfo));
419 wait_for_completion(&msginfo->waitevent);
421 /* At this point, we received the gpadl created msg */
422 *gpadl_handle = gpadlmsg->gpadl;
425 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
426 list_del(&msginfo->msglistentry);
427 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
432 EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
435 * vmbus_teardown_gpadl -Teardown the specified GPADL handle
437 int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
439 struct vmbus_channel_gpadl_teardown *msg;
440 struct vmbus_channel_msginfo *info;
444 info = kmalloc(sizeof(*info) +
445 sizeof(struct vmbus_channel_gpadl_teardown), GFP_KERNEL);
449 init_completion(&info->waitevent);
451 msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
453 msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
454 msg->child_relid = channel->offermsg.child_relid;
455 msg->gpadl = gpadl_handle;
457 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
458 list_add_tail(&info->msglistentry,
459 &vmbus_connection.chn_msg_list);
460 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
461 ret = vmbus_post_msg(msg,
462 sizeof(struct vmbus_channel_gpadl_teardown));
467 wait_for_completion(&info->waitevent);
470 spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
471 list_del(&info->msglistentry);
472 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
477 EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
479 static void reset_channel_cb(void *arg)
481 struct vmbus_channel *channel = arg;
483 channel->onchannel_callback = NULL;
486 static int vmbus_close_internal(struct vmbus_channel *channel)
488 struct vmbus_channel_close_channel *msg;
491 channel->state = CHANNEL_OPEN_STATE;
492 channel->sc_creation_callback = NULL;
493 /* Stop callback and cancel the timer asap */
494 if (channel->target_cpu != get_cpu()) {
496 smp_call_function_single(channel->target_cpu, reset_channel_cb,
499 reset_channel_cb(channel);
503 /* Send a closing message */
505 msg = &channel->close_msg.msg;
507 msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
508 msg->child_relid = channel->offermsg.child_relid;
510 ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
513 pr_err("Close failed: close post msg return is %d\n", ret);
515 * If we failed to post the close msg,
516 * it is perhaps better to leak memory.
521 /* Tear down the gpadl for the channel's ring buffer */
522 if (channel->ringbuffer_gpadlhandle) {
523 ret = vmbus_teardown_gpadl(channel,
524 channel->ringbuffer_gpadlhandle);
526 pr_err("Close failed: teardown gpadl return %d\n", ret);
528 * If we failed to teardown gpadl,
529 * it is perhaps better to leak memory.
535 /* Cleanup the ring buffers for this channel */
536 hv_ringbuffer_cleanup(&channel->outbound);
537 hv_ringbuffer_cleanup(&channel->inbound);
539 free_pages((unsigned long)channel->ringbuffer_pages,
540 get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
543 * If the channel has been rescinded; process device removal.
545 if (channel->rescind)
546 hv_process_channel_removal(channel,
547 channel->offermsg.child_relid);
552 * vmbus_close - Close the specified channel
554 void vmbus_close(struct vmbus_channel *channel)
556 struct list_head *cur, *tmp;
557 struct vmbus_channel *cur_channel;
559 if (channel->primary_channel != NULL) {
561 * We will only close sub-channels when
562 * the primary is closed.
567 * Close all the sub-channels first and then close the
570 list_for_each_safe(cur, tmp, &channel->sc_list) {
571 cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
572 if (cur_channel->state != CHANNEL_OPENED_STATE)
574 vmbus_close_internal(cur_channel);
577 * Now close the primary.
579 vmbus_close_internal(channel);
581 EXPORT_SYMBOL_GPL(vmbus_close);
583 int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
584 u32 bufferlen, u64 requestid,
585 enum vmbus_packet_type type, u32 flags, bool kick_q)
587 struct vmpacket_descriptor desc;
588 u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
589 u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
590 struct kvec bufferlist[3];
591 u64 aligned_data = 0;
596 /* Setup the descriptor */
597 desc.type = type; /* VmbusPacketTypeDataInBand; */
598 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */
599 /* in 8-bytes granularity */
600 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3;
601 desc.len8 = (u16)(packetlen_aligned >> 3);
602 desc.trans_id = requestid;
604 bufferlist[0].iov_base = &desc;
605 bufferlist[0].iov_len = sizeof(struct vmpacket_descriptor);
606 bufferlist[1].iov_base = buffer;
607 bufferlist[1].iov_len = bufferlen;
608 bufferlist[2].iov_base = &aligned_data;
609 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
611 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
614 * Signalling the host is conditional on many factors:
615 * 1. The ring state changed from being empty to non-empty.
616 * This is tracked by the variable "signal".
617 * 2. The variable kick_q tracks if more data will be placed
618 * on the ring. We will not signal if more data is
621 * If we cannot write to the ring-buffer; signal the host
622 * even if we may not have written anything. This is a rare
623 * enough condition that it should not matter.
625 if (((ret == 0) && kick_q && signal) || (ret))
626 vmbus_setevent(channel);
630 EXPORT_SYMBOL(vmbus_sendpacket_ctl);
633 * vmbus_sendpacket() - Send the specified buffer on the given channel
634 * @channel: Pointer to vmbus_channel structure.
635 * @buffer: Pointer to the buffer you want to receive the data into.
636 * @bufferlen: Maximum size of what the the buffer will hold
637 * @requestid: Identifier of the request
638 * @type: Type of packet that is being send e.g. negotiate, time
641 * Sends data in @buffer directly to hyper-v via the vmbus
642 * This will send the data unparsed to hyper-v.
644 * Mainly used by Hyper-V drivers.
646 int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
647 u32 bufferlen, u64 requestid,
648 enum vmbus_packet_type type, u32 flags)
650 return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
653 EXPORT_SYMBOL(vmbus_sendpacket);
656 * vmbus_sendpacket_pagebuffer_ctl - Send a range of single-page buffer
657 * packets using a GPADL Direct packet type. This interface allows you
658 * to control notifying the host. This will be useful for sending
659 * batched data. Also the sender can control the send flags
662 int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
663 struct hv_page_buffer pagebuffers[],
664 u32 pagecount, void *buffer, u32 bufferlen,
671 struct vmbus_channel_packet_page_buffer desc;
674 u32 packetlen_aligned;
675 struct kvec bufferlist[3];
676 u64 aligned_data = 0;
679 if (pagecount > MAX_PAGE_BUFFER_COUNT)
684 * Adjust the size down since vmbus_channel_packet_page_buffer is the
685 * largest size we support
687 descsize = sizeof(struct vmbus_channel_packet_page_buffer) -
688 ((MAX_PAGE_BUFFER_COUNT - pagecount) *
689 sizeof(struct hv_page_buffer));
690 packetlen = descsize + bufferlen;
691 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
693 /* Setup the descriptor */
694 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
696 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
697 desc.length8 = (u16)(packetlen_aligned >> 3);
698 desc.transactionid = requestid;
699 desc.rangecount = pagecount;
701 for (i = 0; i < pagecount; i++) {
702 desc.range[i].len = pagebuffers[i].len;
703 desc.range[i].offset = pagebuffers[i].offset;
704 desc.range[i].pfn = pagebuffers[i].pfn;
707 bufferlist[0].iov_base = &desc;
708 bufferlist[0].iov_len = descsize;
709 bufferlist[1].iov_base = buffer;
710 bufferlist[1].iov_len = bufferlen;
711 bufferlist[2].iov_base = &aligned_data;
712 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
714 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
717 * Signalling the host is conditional on many factors:
718 * 1. The ring state changed from being empty to non-empty.
719 * This is tracked by the variable "signal".
720 * 2. The variable kick_q tracks if more data will be placed
721 * on the ring. We will not signal if more data is
724 * If we cannot write to the ring-buffer; signal the host
725 * even if we may not have written anything. This is a rare
726 * enough condition that it should not matter.
728 if (((ret == 0) && kick_q && signal) || (ret))
729 vmbus_setevent(channel);
733 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
736 * vmbus_sendpacket_pagebuffer - Send a range of single-page buffer
737 * packets using a GPADL Direct packet type.
739 int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
740 struct hv_page_buffer pagebuffers[],
741 u32 pagecount, void *buffer, u32 bufferlen,
744 u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
745 return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
746 buffer, bufferlen, requestid,
750 EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
753 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
754 * using a GPADL Direct packet type.
755 * The buffer includes the vmbus descriptor.
757 int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
758 struct vmbus_packet_mpb_array *desc,
760 void *buffer, u32 bufferlen, u64 requestid)
764 u32 packetlen_aligned;
765 struct kvec bufferlist[3];
766 u64 aligned_data = 0;
769 packetlen = desc_size + bufferlen;
770 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
772 /* Setup the descriptor */
773 desc->type = VM_PKT_DATA_USING_GPA_DIRECT;
774 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
775 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */
776 desc->length8 = (u16)(packetlen_aligned >> 3);
777 desc->transactionid = requestid;
778 desc->rangecount = 1;
780 bufferlist[0].iov_base = desc;
781 bufferlist[0].iov_len = desc_size;
782 bufferlist[1].iov_base = buffer;
783 bufferlist[1].iov_len = bufferlen;
784 bufferlist[2].iov_base = &aligned_data;
785 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
787 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
789 if (ret == 0 && signal)
790 vmbus_setevent(channel);
794 EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
797 * vmbus_sendpacket_multipagebuffer - Send a multi-page buffer packet
798 * using a GPADL Direct packet type.
800 int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
801 struct hv_multipage_buffer *multi_pagebuffer,
802 void *buffer, u32 bufferlen, u64 requestid)
805 struct vmbus_channel_packet_multipage_buffer desc;
808 u32 packetlen_aligned;
809 struct kvec bufferlist[3];
810 u64 aligned_data = 0;
812 u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
813 multi_pagebuffer->len);
815 if (pfncount > MAX_MULTIPAGE_BUFFER_COUNT)
819 * Adjust the size down since vmbus_channel_packet_multipage_buffer is
820 * the largest size we support
822 descsize = sizeof(struct vmbus_channel_packet_multipage_buffer) -
823 ((MAX_MULTIPAGE_BUFFER_COUNT - pfncount) *
825 packetlen = descsize + bufferlen;
826 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
829 /* Setup the descriptor */
830 desc.type = VM_PKT_DATA_USING_GPA_DIRECT;
831 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
832 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */
833 desc.length8 = (u16)(packetlen_aligned >> 3);
834 desc.transactionid = requestid;
837 desc.range.len = multi_pagebuffer->len;
838 desc.range.offset = multi_pagebuffer->offset;
840 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array,
841 pfncount * sizeof(u64));
843 bufferlist[0].iov_base = &desc;
844 bufferlist[0].iov_len = descsize;
845 bufferlist[1].iov_base = buffer;
846 bufferlist[1].iov_len = bufferlen;
847 bufferlist[2].iov_base = &aligned_data;
848 bufferlist[2].iov_len = (packetlen_aligned - packetlen);
850 ret = hv_ringbuffer_write(&channel->outbound, bufferlist, 3, &signal);
852 if (ret == 0 && signal)
853 vmbus_setevent(channel);
857 EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
860 * vmbus_recvpacket() - Retrieve the user packet on the specified channel
861 * @channel: Pointer to vmbus_channel structure.
862 * @buffer: Pointer to the buffer you want to receive the data into.
863 * @bufferlen: Maximum size of what the the buffer will hold
864 * @buffer_actual_len: The actual size of the data after it was received
865 * @requestid: Identifier of the request
867 * Receives directly from the hyper-v vmbus and puts the data it received
868 * into Buffer. This will receive the data unparsed from hyper-v.
870 * Mainly used by Hyper-V drivers.
872 int vmbus_recvpacket(struct vmbus_channel *channel, void *buffer,
873 u32 bufferlen, u32 *buffer_actual_len, u64 *requestid)
875 struct vmpacket_descriptor desc;
881 *buffer_actual_len = 0;
885 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
886 sizeof(struct vmpacket_descriptor));
890 packetlen = desc.len8 << 3;
891 userlen = packetlen - (desc.offset8 << 3);
893 *buffer_actual_len = userlen;
895 if (userlen > bufferlen) {
897 pr_err("Buffer too small - got %d needs %d\n",
902 *requestid = desc.trans_id;
904 /* Copy over the packet to the user buffer */
905 ret = hv_ringbuffer_read(&channel->inbound, buffer, userlen,
906 (desc.offset8 << 3), &signal);
909 vmbus_setevent(channel);
913 EXPORT_SYMBOL(vmbus_recvpacket);
916 * vmbus_recvpacket_raw - Retrieve the raw packet on the specified channel
918 int vmbus_recvpacket_raw(struct vmbus_channel *channel, void *buffer,
919 u32 bufferlen, u32 *buffer_actual_len,
922 struct vmpacket_descriptor desc;
927 *buffer_actual_len = 0;
931 ret = hv_ringbuffer_peek(&channel->inbound, &desc,
932 sizeof(struct vmpacket_descriptor));
937 packetlen = desc.len8 << 3;
939 *buffer_actual_len = packetlen;
941 if (packetlen > bufferlen)
944 *requestid = desc.trans_id;
946 /* Copy over the entire packet to the user buffer */
947 ret = hv_ringbuffer_read(&channel->inbound, buffer, packetlen, 0,
951 vmbus_setevent(channel);
955 EXPORT_SYMBOL_GPL(vmbus_recvpacket_raw);