2 * Copyright (c) 2012, Microsoft Corporation.
5 * K. Y. Srinivasan <kys@microsoft.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT. See the GNU General Public License for more
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/mman.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
28 #include <linux/kthread.h>
29 #include <linux/completion.h>
30 #include <linux/memory_hotplug.h>
31 #include <linux/memory.h>
32 #include <linux/notifier.h>
33 #include <linux/percpu_counter.h>
35 #include <linux/hyperv.h>
38 * We begin with definitions supporting the Dynamic Memory protocol
41 * Begin protocol definitions.
47 * Protocol versions. The low word is the minor version, the high word the major
52 * Changed to 0.1 on 2009/03/25
53 * Changes to 0.2 on 2009/05/14
54 * Changes to 0.3 on 2009/12/03
55 * Changed to 1.0 on 2011/04/05
58 #define DYNMEM_MAKE_VERSION(Major, Minor) ((__u32)(((Major) << 16) | (Minor)))
59 #define DYNMEM_MAJOR_VERSION(Version) ((__u32)(Version) >> 16)
60 #define DYNMEM_MINOR_VERSION(Version) ((__u32)(Version) & 0xff)
63 DYNMEM_PROTOCOL_VERSION_1 = DYNMEM_MAKE_VERSION(0, 3),
64 DYNMEM_PROTOCOL_VERSION_2 = DYNMEM_MAKE_VERSION(1, 0),
66 DYNMEM_PROTOCOL_VERSION_WIN7 = DYNMEM_PROTOCOL_VERSION_1,
67 DYNMEM_PROTOCOL_VERSION_WIN8 = DYNMEM_PROTOCOL_VERSION_2,
69 DYNMEM_PROTOCOL_VERSION_CURRENT = DYNMEM_PROTOCOL_VERSION_WIN8
78 enum dm_message_type {
83 DM_VERSION_REQUEST = 1,
84 DM_VERSION_RESPONSE = 2,
85 DM_CAPABILITIES_REPORT = 3,
86 DM_CAPABILITIES_RESPONSE = 4,
88 DM_BALLOON_REQUEST = 6,
89 DM_BALLOON_RESPONSE = 7,
90 DM_UNBALLOON_REQUEST = 8,
91 DM_UNBALLOON_RESPONSE = 9,
92 DM_MEM_HOT_ADD_REQUEST = 10,
93 DM_MEM_HOT_ADD_RESPONSE = 11,
94 DM_VERSION_03_MAX = 11,
104 * Structures defining the dynamic memory management
122 * To support guests that may have alignment
123 * limitations on hot-add, the guest can specify
124 * its alignment requirements; a value of n
125 * represents an alignment of 2^n in mega bytes.
127 __u64 hot_add_alignment:4;
133 union dm_mem_page_range {
136 * The PFN number of the first page in the range.
137 * 40 bits is the architectural limit of a PFN
142 * The number of pages in the range.
152 * The header for all dynamic memory messages:
154 * type: Type of the message.
155 * size: Size of the message in bytes; including the header.
156 * trans_id: The guest is responsible for manufacturing this ID.
166 * A generic message format for dynamic memory.
167 * Specific message formats are defined later in the file.
171 struct dm_header hdr;
172 __u8 data[]; /* enclosed message */
177 * Specific message types supporting the dynamic memory protocol.
181 * Version negotiation message. Sent from the guest to the host.
182 * The guest is free to try different versions until the host
183 * accepts the version.
185 * dm_version: The protocol version requested.
186 * is_last_attempt: If TRUE, this is the last version guest will request.
187 * reservedz: Reserved field, set to zero.
190 struct dm_version_request {
191 struct dm_header hdr;
192 union dm_version version;
193 __u32 is_last_attempt:1;
198 * Version response message; Host to Guest and indicates
199 * if the host has accepted the version sent by the guest.
201 * is_accepted: If TRUE, host has accepted the version and the guest
202 * should proceed to the next stage of the protocol. FALSE indicates that
203 * guest should re-try with a different version.
205 * reservedz: Reserved field, set to zero.
208 struct dm_version_response {
209 struct dm_header hdr;
215 * Message reporting capabilities. This is sent from the guest to the
219 struct dm_capabilities {
220 struct dm_header hdr;
223 __u64 max_page_number;
227 * Response to the capabilities message. This is sent from the host to the
228 * guest. This message notifies if the host has accepted the guest's
229 * capabilities. If the host has not accepted, the guest must shutdown
232 * is_accepted: Indicates if the host has accepted guest's capabilities.
233 * reservedz: Must be 0.
236 struct dm_capabilities_resp_msg {
237 struct dm_header hdr;
243 * This message is used to report memory pressure from the guest.
244 * This message is not part of any transaction and there is no
245 * response to this message.
247 * num_avail: Available memory in pages.
248 * num_committed: Committed memory in pages.
249 * page_file_size: The accumulated size of all page files
250 * in the system in pages.
251 * zero_free: The nunber of zero and free pages.
252 * page_file_writes: The writes to the page file in pages.
253 * io_diff: An indicator of file cache efficiency or page file activity,
254 * calculated as File Cache Page Fault Count - Page Read Count.
255 * This value is in pages.
257 * Some of these metrics are Windows specific and fortunately
258 * the algorithm on the host side that computes the guest memory
259 * pressure only uses num_committed value.
263 struct dm_header hdr;
266 __u64 page_file_size;
268 __u32 page_file_writes;
274 * Message to ask the guest to allocate memory - balloon up message.
275 * This message is sent from the host to the guest. The guest may not be
276 * able to allocate as much memory as requested.
278 * num_pages: number of pages to allocate.
282 struct dm_header hdr;
289 * Balloon response message; this message is sent from the guest
290 * to the host in response to the balloon message.
292 * reservedz: Reserved; must be set to zero.
293 * more_pages: If FALSE, this is the last message of the transaction.
294 * if TRUE there will atleast one more message from the guest.
296 * range_count: The number of ranges in the range array.
298 * range_array: An array of page ranges returned to the host.
302 struct dm_balloon_response {
303 struct dm_header hdr;
306 __u32 range_count:31;
307 union dm_mem_page_range range_array[];
311 * Un-balloon message; this message is sent from the host
312 * to the guest to give guest more memory.
314 * more_pages: If FALSE, this is the last message of the transaction.
315 * if TRUE there will atleast one more message from the guest.
317 * reservedz: Reserved; must be set to zero.
319 * range_count: The number of ranges in the range array.
321 * range_array: An array of page ranges returned to the host.
325 struct dm_unballoon_request {
326 struct dm_header hdr;
330 union dm_mem_page_range range_array[];
334 * Un-balloon response message; this message is sent from the guest
335 * to the host in response to an unballoon request.
339 struct dm_unballoon_response {
340 struct dm_header hdr;
345 * Hot add request message. Message sent from the host to the guest.
347 * mem_range: Memory range to hot add.
349 * On Linux we currently don't support this since we cannot hot add
350 * arbitrary granularity of memory.
354 struct dm_header hdr;
355 union dm_mem_page_range range;
359 * Hot add response message.
360 * This message is sent by the guest to report the status of a hot add request.
361 * If page_count is less than the requested page count, then the host should
362 * assume all further hot add requests will fail, since this indicates that
363 * the guest has hit an upper physical memory barrier.
365 * Hot adds may also fail due to low resources; in this case, the guest must
366 * not complete this message until the hot add can succeed, and the host must
367 * not send a new hot add request until the response is sent.
368 * If VSC fails to hot add memory DYNMEM_NUMBER_OF_UNSUCCESSFUL_HOTADD_ATTEMPTS
369 * times it fails the request.
372 * page_count: number of pages that were successfully hot added.
374 * result: result of the operation 1: success, 0: failure.
378 struct dm_hot_add_response {
379 struct dm_header hdr;
385 * Types of information sent from host to the guest.
389 INFO_TYPE_MAX_PAGE_CNT = 0,
395 * Header for the information message.
398 struct dm_info_header {
399 enum dm_info_type type;
404 * This message is sent from the host to the guest to pass
405 * some relevant information (win8 addition).
408 * info_size: size of the information blob.
409 * info: information blob.
413 struct dm_header hdr;
420 * End protocol definitions.
424 * State to manage hot adding memory into the guest.
425 * The range start_pfn : end_pfn specifies the range
426 * that the host has asked us to hot add. The range
427 * start_pfn : ha_end_pfn specifies the range that we have
428 * currently hot added. We hot add in multiples of 128M
429 * chunks; it is possible that we may not be able to bring
430 * online all the pages in the region. The range
431 * covered_start_pfn : covered_end_pfn defines the pages that can
435 struct hv_hotadd_state {
436 struct list_head list;
437 unsigned long start_pfn;
438 unsigned long covered_start_pfn;
439 unsigned long covered_end_pfn;
440 unsigned long ha_end_pfn;
441 unsigned long end_pfn;
444 struct balloon_state {
446 struct work_struct wrk;
450 union dm_mem_page_range ha_page_range;
451 union dm_mem_page_range ha_region_range;
452 struct work_struct wrk;
455 static bool hot_add = true;
456 static bool do_hot_add;
458 * Delay reporting memory pressure by
459 * the specified number of seconds.
461 static uint pressure_report_delay = 45;
464 * The last time we posted a pressure report to host.
466 static unsigned long last_post_time;
468 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
469 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
471 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
472 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
473 static atomic_t trans_id = ATOMIC_INIT(0);
475 static int dm_ring_size = (5 * PAGE_SIZE);
478 * Driver specific state.
491 static __u8 recv_buffer[PAGE_SIZE];
492 static __u8 *send_buffer;
493 #define PAGES_IN_2M 512
494 #define HA_CHUNK (32 * 1024)
496 struct hv_dynmem_device {
497 struct hv_device *dev;
498 enum hv_dm_state state;
499 struct completion host_event;
500 struct completion config_event;
503 * Number of pages we have currently ballooned out.
505 unsigned int num_pages_ballooned;
506 unsigned int num_pages_onlined;
507 unsigned int num_pages_added;
510 * State to manage the ballooning (up) operation.
512 struct balloon_state balloon_wrk;
515 * State to execute the "hot-add" operation.
517 struct hot_add_wrk ha_wrk;
520 * This state tracks if the host has specified a hot-add
523 bool host_specified_ha_region;
526 * State to synchronize hot-add.
528 struct completion ol_waitevent;
531 * This thread handles hot-add
532 * requests from the host as well as notifying
533 * the host with regards to memory pressure in
536 struct task_struct *thread;
538 struct mutex ha_region_mutex;
541 * A list of hot-add regions.
543 struct list_head ha_region_list;
546 * We start with the highest version we can support
547 * and downgrade based on the host; we save here the
548 * next version to try.
553 static struct hv_dynmem_device dm_device;
555 static void post_status(struct hv_dynmem_device *dm);
557 #ifdef CONFIG_MEMORY_HOTPLUG
558 static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
561 struct memory_notify *mem = (struct memory_notify *)v;
564 case MEM_GOING_ONLINE:
565 mutex_lock(&dm_device.ha_region_mutex);
569 dm_device.num_pages_onlined += mem->nr_pages;
570 case MEM_CANCEL_ONLINE:
571 mutex_unlock(&dm_device.ha_region_mutex);
572 if (dm_device.ha_waiting) {
573 dm_device.ha_waiting = false;
574 complete(&dm_device.ol_waitevent);
579 mutex_lock(&dm_device.ha_region_mutex);
580 dm_device.num_pages_onlined -= mem->nr_pages;
581 mutex_unlock(&dm_device.ha_region_mutex);
583 case MEM_GOING_OFFLINE:
584 case MEM_CANCEL_OFFLINE:
590 static struct notifier_block hv_memory_nb = {
591 .notifier_call = hv_memory_notifier,
596 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
600 for (i = 0; i < size; i++) {
602 pg = pfn_to_page(start_pfn + i);
603 __online_page_set_limits(pg);
604 __online_page_increment_counters(pg);
605 __online_page_free(pg);
609 static void hv_mem_hot_add(unsigned long start, unsigned long size,
610 unsigned long pfn_count,
611 struct hv_hotadd_state *has)
615 unsigned long start_pfn;
616 unsigned long processed_pfn;
617 unsigned long total_pfn = pfn_count;
619 for (i = 0; i < (size/HA_CHUNK); i++) {
620 start_pfn = start + (i * HA_CHUNK);
621 has->ha_end_pfn += HA_CHUNK;
623 if (total_pfn > HA_CHUNK) {
624 processed_pfn = HA_CHUNK;
625 total_pfn -= HA_CHUNK;
627 processed_pfn = total_pfn;
631 has->covered_end_pfn += processed_pfn;
633 init_completion(&dm_device.ol_waitevent);
634 dm_device.ha_waiting = true;
636 mutex_unlock(&dm_device.ha_region_mutex);
637 nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
638 ret = add_memory(nid, PFN_PHYS((start_pfn)),
639 (HA_CHUNK << PAGE_SHIFT));
642 pr_info("hot_add memory failed error is %d\n", ret);
643 if (ret == -EEXIST) {
645 * This error indicates that the error
646 * is not a transient failure. This is the
647 * case where the guest's physical address map
648 * precludes hot adding memory. Stop all further
653 has->ha_end_pfn -= HA_CHUNK;
654 has->covered_end_pfn -= processed_pfn;
659 * Wait for the memory block to be onlined.
660 * Since the hot add has succeeded, it is ok to
661 * proceed even if the pages in the hot added region
662 * have not been "onlined" within the allowed time.
664 wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
665 mutex_lock(&dm_device.ha_region_mutex);
666 post_status(&dm_device);
672 static void hv_online_page(struct page *pg)
674 struct list_head *cur;
675 struct hv_hotadd_state *has;
676 unsigned long cur_start_pgp;
677 unsigned long cur_end_pgp;
679 list_for_each(cur, &dm_device.ha_region_list) {
680 has = list_entry(cur, struct hv_hotadd_state, list);
681 cur_start_pgp = (unsigned long)
682 pfn_to_page(has->covered_start_pfn);
683 cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
685 if (((unsigned long)pg >= cur_start_pgp) &&
686 ((unsigned long)pg < cur_end_pgp)) {
688 * This frame is currently backed; online the
691 __online_page_set_limits(pg);
692 __online_page_increment_counters(pg);
693 __online_page_free(pg);
694 has->covered_start_pfn++;
699 static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
701 struct list_head *cur;
702 struct hv_hotadd_state *has;
703 unsigned long residual, new_inc;
705 if (list_empty(&dm_device.ha_region_list))
708 list_for_each(cur, &dm_device.ha_region_list) {
709 has = list_entry(cur, struct hv_hotadd_state, list);
712 * If the pfn range we are dealing with is not in the current
713 * "hot add block", move on.
715 if ((start_pfn >= has->end_pfn))
718 * If the current hot add-request extends beyond
719 * our current limit; extend it.
721 if ((start_pfn + pfn_cnt) > has->end_pfn) {
722 residual = (start_pfn + pfn_cnt - has->end_pfn);
724 * Extend the region by multiples of HA_CHUNK.
726 new_inc = (residual / HA_CHUNK) * HA_CHUNK;
727 if (residual % HA_CHUNK)
730 has->end_pfn += new_inc;
734 * If the current start pfn is not where the covered_end
738 if (has->covered_end_pfn != start_pfn) {
739 has->covered_end_pfn = start_pfn;
740 has->covered_start_pfn = start_pfn;
749 static unsigned long handle_pg_range(unsigned long pg_start,
750 unsigned long pg_count)
752 unsigned long start_pfn = pg_start;
753 unsigned long pfn_cnt = pg_count;
755 struct list_head *cur;
756 struct hv_hotadd_state *has;
757 unsigned long pgs_ol = 0;
758 unsigned long old_covered_state;
760 if (list_empty(&dm_device.ha_region_list))
763 list_for_each(cur, &dm_device.ha_region_list) {
764 has = list_entry(cur, struct hv_hotadd_state, list);
767 * If the pfn range we are dealing with is not in the current
768 * "hot add block", move on.
770 if ((start_pfn >= has->end_pfn))
773 old_covered_state = has->covered_end_pfn;
775 if (start_pfn < has->ha_end_pfn) {
777 * This is the case where we are backing pages
778 * in an already hot added region. Bring
779 * these pages online first.
781 pgs_ol = has->ha_end_pfn - start_pfn;
782 if (pgs_ol > pfn_cnt)
784 hv_bring_pgs_online(start_pfn, pgs_ol);
785 has->covered_end_pfn += pgs_ol;
786 has->covered_start_pfn += pgs_ol;
790 if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
792 * We have some residual hot add range
793 * that needs to be hot added; hot add
794 * it now. Hot add a multiple of
795 * of HA_CHUNK that fully covers the pages
798 size = (has->end_pfn - has->ha_end_pfn);
799 if (pfn_cnt <= size) {
800 size = ((pfn_cnt / HA_CHUNK) * HA_CHUNK);
801 if (pfn_cnt % HA_CHUNK)
806 hv_mem_hot_add(has->ha_end_pfn, size, pfn_cnt, has);
809 * If we managed to online any pages that were given to us,
810 * we declare success.
812 return has->covered_end_pfn - old_covered_state;
819 static unsigned long process_hot_add(unsigned long pg_start,
820 unsigned long pfn_cnt,
821 unsigned long rg_start,
822 unsigned long rg_size)
824 struct hv_hotadd_state *ha_region = NULL;
829 if (!dm_device.host_specified_ha_region)
830 if (pfn_covered(pg_start, pfn_cnt))
834 * If the host has specified a hot-add range; deal with it first.
838 ha_region = kzalloc(sizeof(struct hv_hotadd_state), GFP_KERNEL);
842 INIT_LIST_HEAD(&ha_region->list);
844 list_add_tail(&ha_region->list, &dm_device.ha_region_list);
845 ha_region->start_pfn = rg_start;
846 ha_region->ha_end_pfn = rg_start;
847 ha_region->covered_start_pfn = pg_start;
848 ha_region->covered_end_pfn = pg_start;
849 ha_region->end_pfn = rg_start + rg_size;
854 * Process the page range specified; bringing them
855 * online if possible.
857 return handle_pg_range(pg_start, pfn_cnt);
862 static void hot_add_req(struct work_struct *dummy)
864 struct dm_hot_add_response resp;
865 #ifdef CONFIG_MEMORY_HOTPLUG
866 unsigned long pg_start, pfn_cnt;
867 unsigned long rg_start, rg_sz;
869 struct hv_dynmem_device *dm = &dm_device;
871 memset(&resp, 0, sizeof(struct dm_hot_add_response));
872 resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
873 resp.hdr.size = sizeof(struct dm_hot_add_response);
875 #ifdef CONFIG_MEMORY_HOTPLUG
876 mutex_lock(&dm_device.ha_region_mutex);
877 pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
878 pfn_cnt = dm->ha_wrk.ha_page_range.finfo.page_cnt;
880 rg_start = dm->ha_wrk.ha_region_range.finfo.start_page;
881 rg_sz = dm->ha_wrk.ha_region_range.finfo.page_cnt;
883 if ((rg_start == 0) && (!dm->host_specified_ha_region)) {
884 unsigned long region_size;
885 unsigned long region_start;
888 * The host has not specified the hot-add region.
889 * Based on the hot-add page range being specified,
890 * compute a hot-add region that can cover the pages
891 * that need to be hot-added while ensuring the alignment
892 * and size requirements of Linux as it relates to hot-add.
894 region_start = pg_start;
895 region_size = (pfn_cnt / HA_CHUNK) * HA_CHUNK;
896 if (pfn_cnt % HA_CHUNK)
897 region_size += HA_CHUNK;
899 region_start = (pg_start / HA_CHUNK) * HA_CHUNK;
901 rg_start = region_start;
906 resp.page_count = process_hot_add(pg_start, pfn_cnt,
909 dm->num_pages_added += resp.page_count;
910 mutex_unlock(&dm_device.ha_region_mutex);
913 * The result field of the response structure has the
914 * following semantics:
916 * 1. If all or some pages hot-added: Guest should return success.
918 * 2. If no pages could be hot-added:
920 * If the guest returns success, then the host
921 * will not attempt any further hot-add operations. This
922 * signifies a permanent failure.
924 * If the guest returns failure, then this failure will be
925 * treated as a transient failure and the host may retry the
926 * hot-add operation after some delay.
928 if (resp.page_count > 0)
930 else if (!do_hot_add)
935 if (!do_hot_add || (resp.page_count == 0))
936 pr_info("Memory hot add failed\n");
938 dm->state = DM_INITIALIZED;
939 resp.hdr.trans_id = atomic_inc_return(&trans_id);
940 vmbus_sendpacket(dm->dev->channel, &resp,
941 sizeof(struct dm_hot_add_response),
943 VM_PKT_DATA_INBAND, 0);
946 static void process_info(struct hv_dynmem_device *dm, struct dm_info_msg *msg)
948 struct dm_info_header *info_hdr;
950 info_hdr = (struct dm_info_header *)msg->info;
952 switch (info_hdr->type) {
953 case INFO_TYPE_MAX_PAGE_CNT:
954 pr_info("Received INFO_TYPE_MAX_PAGE_CNT\n");
955 pr_info("Data Size is %d\n", info_hdr->data_size);
958 pr_info("Received Unknown type: %d\n", info_hdr->type);
962 static unsigned long compute_balloon_floor(void)
964 unsigned long min_pages;
965 #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
966 /* Simple continuous piecewiese linear function:
967 * max MiB -> min MiB gradient
977 if (totalram_pages < MB2PAGES(128))
978 min_pages = MB2PAGES(8) + (totalram_pages >> 1);
979 else if (totalram_pages < MB2PAGES(512))
980 min_pages = MB2PAGES(40) + (totalram_pages >> 2);
981 else if (totalram_pages < MB2PAGES(2048))
982 min_pages = MB2PAGES(104) + (totalram_pages >> 3);
983 else if (totalram_pages < MB2PAGES(8192))
984 min_pages = MB2PAGES(256) + (totalram_pages >> 4);
986 min_pages = MB2PAGES(512) + (totalram_pages >> 5);
992 * Post our status as it relates memory pressure to the
993 * host. Host expects the guests to post this status
994 * periodically at 1 second intervals.
996 * The metrics specified in this protocol are very Windows
997 * specific and so we cook up numbers here to convey our memory
1001 static void post_status(struct hv_dynmem_device *dm)
1003 struct dm_status status;
1005 unsigned long now = jiffies;
1006 unsigned long last_post = last_post_time;
1008 if (pressure_report_delay > 0) {
1009 --pressure_report_delay;
1013 if (!time_after(now, (last_post_time + HZ)))
1017 memset(&status, 0, sizeof(struct dm_status));
1018 status.hdr.type = DM_STATUS_REPORT;
1019 status.hdr.size = sizeof(struct dm_status);
1020 status.hdr.trans_id = atomic_inc_return(&trans_id);
1023 * The host expects the guest to report free and committed memory.
1024 * Furthermore, the host expects the pressure information to include
1025 * the ballooned out pages. For a given amount of memory that we are
1026 * managing we need to compute a floor below which we should not
1027 * balloon. Compute this and add it to the pressure report.
1028 * We also need to report all offline pages (num_pages_added -
1029 * num_pages_onlined) as committed to the host, otherwise it can try
1030 * asking us to balloon them out.
1032 status.num_avail = val.freeram;
1033 status.num_committed = vm_memory_committed() +
1034 dm->num_pages_ballooned +
1035 (dm->num_pages_added > dm->num_pages_onlined ?
1036 dm->num_pages_added - dm->num_pages_onlined : 0) +
1037 compute_balloon_floor();
1040 * If our transaction ID is no longer current, just don't
1041 * send the status. This can happen if we were interrupted
1042 * after we picked our transaction ID.
1044 if (status.hdr.trans_id != atomic_read(&trans_id))
1048 * If the last post time that we sampled has changed,
1049 * we have raced, don't post the status.
1051 if (last_post != last_post_time)
1054 last_post_time = jiffies;
1055 vmbus_sendpacket(dm->dev->channel, &status,
1056 sizeof(struct dm_status),
1057 (unsigned long)NULL,
1058 VM_PKT_DATA_INBAND, 0);
1062 static void free_balloon_pages(struct hv_dynmem_device *dm,
1063 union dm_mem_page_range *range_array)
1065 int num_pages = range_array->finfo.page_cnt;
1066 __u64 start_frame = range_array->finfo.start_page;
1070 for (i = 0; i < num_pages; i++) {
1071 pg = pfn_to_page(i + start_frame);
1073 dm->num_pages_ballooned--;
1079 static int alloc_balloon_pages(struct hv_dynmem_device *dm, int num_pages,
1080 struct dm_balloon_response *bl_resp, int alloc_unit,
1086 if (num_pages < alloc_unit)
1089 for (i = 0; (i * alloc_unit) < num_pages; i++) {
1090 if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) >
1092 return i * alloc_unit;
1095 * We execute this code in a thread context. Furthermore,
1096 * we don't want the kernel to try too hard.
1098 pg = alloc_pages(GFP_HIGHUSER | __GFP_NORETRY |
1099 __GFP_NOMEMALLOC | __GFP_NOWARN,
1100 get_order(alloc_unit << PAGE_SHIFT));
1103 *alloc_error = true;
1104 return i * alloc_unit;
1108 dm->num_pages_ballooned += alloc_unit;
1111 * If we allocatted 2M pages; split them so we
1112 * can free them in any order we get.
1115 if (alloc_unit != 1)
1116 split_page(pg, get_order(alloc_unit << PAGE_SHIFT));
1118 bl_resp->range_count++;
1119 bl_resp->range_array[i].finfo.start_page =
1121 bl_resp->range_array[i].finfo.page_cnt = alloc_unit;
1122 bl_resp->hdr.size += sizeof(union dm_mem_page_range);
1131 static void balloon_up(struct work_struct *dummy)
1133 int num_pages = dm_device.balloon_wrk.num_pages;
1134 int num_ballooned = 0;
1135 struct dm_balloon_response *bl_resp;
1142 unsigned long floor;
1144 /* The host balloons pages in 2M granularity. */
1145 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1148 * We will attempt 2M allocations. However, if we fail to
1149 * allocate 2M chunks, we will go back to 4k allocations.
1154 floor = compute_balloon_floor();
1156 /* Refuse to balloon below the floor, keep the 2M granularity. */
1157 if (val.freeram - num_pages < floor) {
1158 num_pages = val.freeram > floor ? (val.freeram - floor) : 0;
1159 num_pages -= num_pages % PAGES_IN_2M;
1163 bl_resp = (struct dm_balloon_response *)send_buffer;
1164 memset(send_buffer, 0, PAGE_SIZE);
1165 bl_resp->hdr.type = DM_BALLOON_RESPONSE;
1166 bl_resp->hdr.size = sizeof(struct dm_balloon_response);
1167 bl_resp->more_pages = 1;
1170 num_pages -= num_ballooned;
1171 alloc_error = false;
1172 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1173 bl_resp, alloc_unit,
1176 if (alloc_unit != 1 && num_ballooned == 0) {
1181 if ((alloc_unit == 1 && alloc_error) ||
1182 (num_ballooned == num_pages)) {
1183 bl_resp->more_pages = 0;
1185 dm_device.state = DM_INITIALIZED;
1189 * We are pushing a lot of data through the channel;
1190 * deal with transient failures caused because of the
1191 * lack of space in the ring buffer.
1195 bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
1196 ret = vmbus_sendpacket(dm_device.dev->channel,
1199 (unsigned long)NULL,
1200 VM_PKT_DATA_INBAND, 0);
1204 post_status(&dm_device);
1205 } while (ret == -EAGAIN);
1209 * Free up the memory we allocatted.
1211 pr_info("Balloon response failed\n");
1213 for (i = 0; i < bl_resp->range_count; i++)
1214 free_balloon_pages(&dm_device,
1215 &bl_resp->range_array[i]);
1223 static void balloon_down(struct hv_dynmem_device *dm,
1224 struct dm_unballoon_request *req)
1226 union dm_mem_page_range *range_array = req->range_array;
1227 int range_count = req->range_count;
1228 struct dm_unballoon_response resp;
1231 for (i = 0; i < range_count; i++) {
1232 free_balloon_pages(dm, &range_array[i]);
1233 complete(&dm_device.config_event);
1236 if (req->more_pages == 1)
1239 memset(&resp, 0, sizeof(struct dm_unballoon_response));
1240 resp.hdr.type = DM_UNBALLOON_RESPONSE;
1241 resp.hdr.trans_id = atomic_inc_return(&trans_id);
1242 resp.hdr.size = sizeof(struct dm_unballoon_response);
1244 vmbus_sendpacket(dm_device.dev->channel, &resp,
1245 sizeof(struct dm_unballoon_response),
1246 (unsigned long)NULL,
1247 VM_PKT_DATA_INBAND, 0);
1249 dm->state = DM_INITIALIZED;
1252 static void balloon_onchannelcallback(void *context);
1254 static int dm_thread_func(void *dm_dev)
1256 struct hv_dynmem_device *dm = dm_dev;
1258 while (!kthread_should_stop()) {
1259 wait_for_completion_interruptible_timeout(
1260 &dm_device.config_event, 1*HZ);
1262 * The host expects us to post information on the memory
1263 * pressure every second.
1265 reinit_completion(&dm_device.config_event);
1273 static void version_resp(struct hv_dynmem_device *dm,
1274 struct dm_version_response *vresp)
1276 struct dm_version_request version_req;
1279 if (vresp->is_accepted) {
1281 * We are done; wakeup the
1282 * context waiting for version
1285 complete(&dm->host_event);
1289 * If there are more versions to try, continue
1290 * with negotiations; if not
1291 * shutdown the service since we are not able
1292 * to negotiate a suitable version number
1295 if (dm->next_version == 0)
1298 dm->next_version = 0;
1299 memset(&version_req, 0, sizeof(struct dm_version_request));
1300 version_req.hdr.type = DM_VERSION_REQUEST;
1301 version_req.hdr.size = sizeof(struct dm_version_request);
1302 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1303 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
1304 version_req.is_last_attempt = 1;
1306 ret = vmbus_sendpacket(dm->dev->channel, &version_req,
1307 sizeof(struct dm_version_request),
1308 (unsigned long)NULL,
1309 VM_PKT_DATA_INBAND, 0);
1317 dm->state = DM_INIT_ERROR;
1318 complete(&dm->host_event);
1321 static void cap_resp(struct hv_dynmem_device *dm,
1322 struct dm_capabilities_resp_msg *cap_resp)
1324 if (!cap_resp->is_accepted) {
1325 pr_info("Capabilities not accepted by host\n");
1326 dm->state = DM_INIT_ERROR;
1328 complete(&dm->host_event);
1331 static void balloon_onchannelcallback(void *context)
1333 struct hv_device *dev = context;
1336 struct dm_message *dm_msg;
1337 struct dm_header *dm_hdr;
1338 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1339 struct dm_balloon *bal_msg;
1340 struct dm_hot_add *ha_msg;
1341 union dm_mem_page_range *ha_pg_range;
1342 union dm_mem_page_range *ha_region;
1344 memset(recv_buffer, 0, sizeof(recv_buffer));
1345 vmbus_recvpacket(dev->channel, recv_buffer,
1346 PAGE_SIZE, &recvlen, &requestid);
1349 dm_msg = (struct dm_message *)recv_buffer;
1350 dm_hdr = &dm_msg->hdr;
1352 switch (dm_hdr->type) {
1353 case DM_VERSION_RESPONSE:
1355 (struct dm_version_response *)dm_msg);
1358 case DM_CAPABILITIES_RESPONSE:
1360 (struct dm_capabilities_resp_msg *)dm_msg);
1363 case DM_BALLOON_REQUEST:
1364 if (dm->state == DM_BALLOON_UP)
1365 pr_warn("Currently ballooning\n");
1366 bal_msg = (struct dm_balloon *)recv_buffer;
1367 dm->state = DM_BALLOON_UP;
1368 dm_device.balloon_wrk.num_pages = bal_msg->num_pages;
1369 schedule_work(&dm_device.balloon_wrk.wrk);
1372 case DM_UNBALLOON_REQUEST:
1373 dm->state = DM_BALLOON_DOWN;
1375 (struct dm_unballoon_request *)recv_buffer);
1378 case DM_MEM_HOT_ADD_REQUEST:
1379 if (dm->state == DM_HOT_ADD)
1380 pr_warn("Currently hot-adding\n");
1381 dm->state = DM_HOT_ADD;
1382 ha_msg = (struct dm_hot_add *)recv_buffer;
1383 if (ha_msg->hdr.size == sizeof(struct dm_hot_add)) {
1385 * This is a normal hot-add request specifying
1388 ha_pg_range = &ha_msg->range;
1389 dm->ha_wrk.ha_page_range = *ha_pg_range;
1390 dm->ha_wrk.ha_region_range.page_range = 0;
1393 * Host is specifying that we first hot-add
1394 * a region and then partially populate this
1397 dm->host_specified_ha_region = true;
1398 ha_pg_range = &ha_msg->range;
1399 ha_region = &ha_pg_range[1];
1400 dm->ha_wrk.ha_page_range = *ha_pg_range;
1401 dm->ha_wrk.ha_region_range = *ha_region;
1403 schedule_work(&dm_device.ha_wrk.wrk);
1406 case DM_INFO_MESSAGE:
1407 process_info(dm, (struct dm_info_msg *)dm_msg);
1411 pr_err("Unhandled message: type: %d\n", dm_hdr->type);
1418 static int balloon_probe(struct hv_device *dev,
1419 const struct hv_vmbus_device_id *dev_id)
1423 struct dm_version_request version_req;
1424 struct dm_capabilities cap_msg;
1426 do_hot_add = hot_add;
1429 * First allocate a send buffer.
1432 send_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1436 ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
1437 balloon_onchannelcallback, dev);
1442 dm_device.dev = dev;
1443 dm_device.state = DM_INITIALIZING;
1444 dm_device.next_version = DYNMEM_PROTOCOL_VERSION_WIN7;
1445 init_completion(&dm_device.host_event);
1446 init_completion(&dm_device.config_event);
1447 INIT_LIST_HEAD(&dm_device.ha_region_list);
1448 mutex_init(&dm_device.ha_region_mutex);
1449 INIT_WORK(&dm_device.balloon_wrk.wrk, balloon_up);
1450 INIT_WORK(&dm_device.ha_wrk.wrk, hot_add_req);
1451 dm_device.host_specified_ha_region = false;
1454 kthread_run(dm_thread_func, &dm_device, "hv_balloon");
1455 if (IS_ERR(dm_device.thread)) {
1456 ret = PTR_ERR(dm_device.thread);
1460 #ifdef CONFIG_MEMORY_HOTPLUG
1461 set_online_page_callback(&hv_online_page);
1462 register_memory_notifier(&hv_memory_nb);
1465 hv_set_drvdata(dev, &dm_device);
1467 * Initiate the hand shake with the host and negotiate
1468 * a version that the host can support. We start with the
1469 * highest version number and go down if the host cannot
1472 memset(&version_req, 0, sizeof(struct dm_version_request));
1473 version_req.hdr.type = DM_VERSION_REQUEST;
1474 version_req.hdr.size = sizeof(struct dm_version_request);
1475 version_req.hdr.trans_id = atomic_inc_return(&trans_id);
1476 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
1477 version_req.is_last_attempt = 0;
1479 ret = vmbus_sendpacket(dev->channel, &version_req,
1480 sizeof(struct dm_version_request),
1481 (unsigned long)NULL,
1482 VM_PKT_DATA_INBAND, 0);
1486 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1493 * If we could not negotiate a compatible version with the host
1494 * fail the probe function.
1496 if (dm_device.state == DM_INIT_ERROR) {
1501 * Now submit our capabilities to the host.
1503 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
1504 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
1505 cap_msg.hdr.size = sizeof(struct dm_capabilities);
1506 cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
1508 cap_msg.caps.cap_bits.balloon = 1;
1509 cap_msg.caps.cap_bits.hot_add = 1;
1512 * Specify our alignment requirements as it relates
1513 * memory hot-add. Specify 128MB alignment.
1515 cap_msg.caps.cap_bits.hot_add_alignment = 7;
1518 * Currently the host does not use these
1519 * values and we set them to what is done in the
1522 cap_msg.min_page_cnt = 0;
1523 cap_msg.max_page_number = -1;
1525 ret = vmbus_sendpacket(dev->channel, &cap_msg,
1526 sizeof(struct dm_capabilities),
1527 (unsigned long)NULL,
1528 VM_PKT_DATA_INBAND, 0);
1532 t = wait_for_completion_timeout(&dm_device.host_event, 5*HZ);
1539 * If the host does not like our capabilities,
1540 * fail the probe function.
1542 if (dm_device.state == DM_INIT_ERROR) {
1547 dm_device.state = DM_INITIALIZED;
1552 #ifdef CONFIG_MEMORY_HOTPLUG
1553 restore_online_page_callback(&hv_online_page);
1555 kthread_stop(dm_device.thread);
1558 vmbus_close(dev->channel);
1564 static int balloon_remove(struct hv_device *dev)
1566 struct hv_dynmem_device *dm = hv_get_drvdata(dev);
1567 struct list_head *cur, *tmp;
1568 struct hv_hotadd_state *has;
1570 if (dm->num_pages_ballooned != 0)
1571 pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
1573 cancel_work_sync(&dm->balloon_wrk.wrk);
1574 cancel_work_sync(&dm->ha_wrk.wrk);
1576 vmbus_close(dev->channel);
1577 kthread_stop(dm->thread);
1579 #ifdef CONFIG_MEMORY_HOTPLUG
1580 restore_online_page_callback(&hv_online_page);
1581 unregister_memory_notifier(&hv_memory_nb);
1583 list_for_each_safe(cur, tmp, &dm->ha_region_list) {
1584 has = list_entry(cur, struct hv_hotadd_state, list);
1585 list_del(&has->list);
1592 static const struct hv_vmbus_device_id id_table[] = {
1593 /* Dynamic Memory Class ID */
1594 /* 525074DC-8985-46e2-8057-A307DC18A502 */
1599 MODULE_DEVICE_TABLE(vmbus, id_table);
1601 static struct hv_driver balloon_drv = {
1602 .name = "hv_balloon",
1603 .id_table = id_table,
1604 .probe = balloon_probe,
1605 .remove = balloon_remove,
1608 static int __init init_balloon_drv(void)
1611 return vmbus_driver_register(&balloon_drv);
1614 module_init(init_balloon_drv);
1616 MODULE_DESCRIPTION("Hyper-V Balloon");
1617 MODULE_LICENSE("GPL");