diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-03 14:42:03 +0000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-02-03 14:42:03 +0000 |
commit | d0fa9250317ff6e92a1397ebf7cf4d83014f38a6 (patch) | |
tree | 34735aae3a17aad235f7acc1b45fe96fba965750 | |
parent | 46d6b7becb1d5a8e697db786590c19e4067a975a (diff) | |
parent | 54e19d34011fea26d39aa74781131de0ce642a01 (diff) |
Merge tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux
Pull Hyper-V updates from Sasha Levin:
- Most of the commits here are work to enable host-initiated
hibernation support by Dexuan Cui.
- Fix for a warning shown when host sends non-aligned balloon requests
by Tianyu Lan.
* tag 'hyperv-next-signed' of git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux:
hv_utils: Add the support of hibernation
hv_utils: Support host-initiated hibernation request
hv_utils: Support host-initiated restart request
Tools: hv: Reopen the devices if read() or write() returns errors
video: hyperv: hyperv_fb: Use physical memory for fb on HyperV Gen 1 VMs.
Drivers: hv: vmbus: Ignore CHANNELMSG_TL_CONNECT_RESULT(23)
video: hyperv_fb: Fix hibernation for the deferred IO feature
Input: hyperv-keyboard: Add the support of hibernation
hv_balloon: Balloon up according to request page number
-rw-r--r-- | drivers/hv/channel_mgmt.c | 21 | ||||
-rw-r--r-- | drivers/hv/hv_balloon.c | 13 | ||||
-rw-r--r-- | drivers/hv/hv_fcopy.c | 54 | ||||
-rw-r--r-- | drivers/hv/hv_kvp.c | 43 | ||||
-rw-r--r-- | drivers/hv/hv_snapshot.c | 55 | ||||
-rw-r--r-- | drivers/hv/hv_util.c | 148 | ||||
-rw-r--r-- | drivers/hv/hyperv_vmbus.h | 6 | ||||
-rw-r--r-- | drivers/hv/vmbus_drv.c | 4 | ||||
-rw-r--r-- | drivers/input/serio/hyperv-keyboard.c | 27 | ||||
-rw-r--r-- | drivers/video/fbdev/Kconfig | 1 | ||||
-rw-r--r-- | drivers/video/fbdev/hyperv_fb.c | 184 | ||||
-rw-r--r-- | include/linux/hyperv.h | 4 | ||||
-rw-r--r-- | tools/hv/hv_fcopy_daemon.c | 37 | ||||
-rw-r--r-- | tools/hv/hv_kvp_daemon.c | 36 | ||||
-rw-r--r-- | tools/hv/hv_vss_daemon.c | 49 |
15 files changed, 574 insertions, 108 deletions
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index 8eb167540b4f..0370364169c4 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -1351,6 +1351,8 @@ channel_message_table[CHANNELMSG_COUNT] = { { CHANNELMSG_19, 0, NULL }, { CHANNELMSG_20, 0, NULL }, { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL }, + { CHANNELMSG_22, 0, NULL }, + { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL }, }; /* @@ -1362,25 +1364,16 @@ void vmbus_onmessage(void *context) { struct hv_message *msg = context; struct vmbus_channel_message_header *hdr; - int size; hdr = (struct vmbus_channel_message_header *)msg->u.payload; - size = msg->header.payload_size; trace_vmbus_on_message(hdr); - if (hdr->msgtype >= CHANNELMSG_COUNT) { - pr_err("Received invalid channel message type %d size %d\n", - hdr->msgtype, size); - print_hex_dump_bytes("", DUMP_PREFIX_NONE, - (unsigned char *)msg->u.payload, size); - return; - } - - if (channel_message_table[hdr->msgtype].message_handler) - channel_message_table[hdr->msgtype].message_handler(hdr); - else - pr_err("Unhandled channel message type %d\n", hdr->msgtype); + /* + * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go + * out of bound and the message_handler pointer can not be NULL. + */ + channel_message_table[hdr->msgtype].message_handler(hdr); } /* diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index b155d0052981..a02ce43d778d 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1217,10 +1217,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, unsigned int i, j; struct page *pg; - if (num_pages < alloc_unit) - return 0; - - for (i = 0; (i * alloc_unit) < num_pages; i++) { + for (i = 0; i < num_pages / alloc_unit; i++) { if (bl_resp->hdr.size + sizeof(union dm_mem_page_range) > HV_HYP_PAGE_SIZE) return i * alloc_unit; @@ -1258,7 +1255,7 @@ static unsigned int alloc_balloon_pages(struct hv_dynmem_device *dm, } - return num_pages; + return i * alloc_unit; } static void balloon_up(struct work_struct *dummy) @@ -1273,9 +1270,6 @@ static void balloon_up(struct work_struct *dummy) long avail_pages; unsigned long floor; - /* The host balloons pages in 2M granularity. */ - WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0); - /* * We will attempt 2M allocations. However, if we fail to * allocate 2M chunks, we will go back to PAGE_SIZE allocations. @@ -1285,14 +1279,13 @@ static void balloon_up(struct work_struct *dummy) avail_pages = si_mem_available(); floor = compute_balloon_floor(); - /* Refuse to balloon below the floor, keep the 2M granularity. */ + /* Refuse to balloon below the floor. */ if (avail_pages < num_pages || avail_pages - num_pages < floor) { pr_warn("Balloon request will be partially fulfilled. %s\n", avail_pages < num_pages ? "Not enough memory." : "Balloon floor reached."); num_pages = avail_pages > floor ? (avail_pages - floor) : 0; - num_pages -= num_pages % PAGES_IN_2M; } while (!done) { diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index 08fa4a5de644..bb9ba3f7c794 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c @@ -346,9 +346,61 @@ int hv_fcopy_init(struct hv_util_service *srv) return 0; } +static void hv_fcopy_cancel_work(void) +{ + cancel_delayed_work_sync(&fcopy_timeout_work); + cancel_work_sync(&fcopy_send_work); +} + +int hv_fcopy_pre_suspend(void) +{ + struct vmbus_channel *channel = fcopy_transaction.recv_channel; + struct hv_fcopy_hdr *fcopy_msg; + + /* + * Fake a CANCEL_FCOPY message for the user space daemon in case the + * daemon is in the middle of copying some file. It doesn't matter if + * there is already a message pending to be delivered to the user + * space since we force fcopy_transaction.state to be HVUTIL_READY, so + * the user space daemon's write() will fail with EINVAL (see + * fcopy_on_msg()), and the daemon will reset the device by closing + * and re-opening it. + */ + fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL); + if (!fcopy_msg) + return -ENOMEM; + + tasklet_disable(&channel->callback_event); + + fcopy_msg->operation = CANCEL_FCOPY; + + hv_fcopy_cancel_work(); + + /* We don't care about the return value. */ + hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL); + + kfree(fcopy_msg); + + fcopy_transaction.state = HVUTIL_READY; + + /* tasklet_enable() will be called in hv_fcopy_pre_resume(). */ + return 0; +} + +int hv_fcopy_pre_resume(void) +{ + struct vmbus_channel *channel = fcopy_transaction.recv_channel; + + tasklet_enable(&channel->callback_event); + + return 0; +} + void hv_fcopy_deinit(void) { fcopy_transaction.state = HVUTIL_DEVICE_DYING; - cancel_delayed_work_sync(&fcopy_timeout_work); + + hv_fcopy_cancel_work(); + hvutil_transport_destroy(hvt); } diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index ae7c028dc5a8..e74b144b8f3d 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -758,11 +758,50 @@ hv_kvp_init(struct hv_util_service *srv) return 0; } -void hv_kvp_deinit(void) +static void hv_kvp_cancel_work(void) { - kvp_transaction.state = HVUTIL_DEVICE_DYING; cancel_delayed_work_sync(&kvp_host_handshake_work); cancel_delayed_work_sync(&kvp_timeout_work); cancel_work_sync(&kvp_sendkey_work); +} + +int hv_kvp_pre_suspend(void) +{ + struct vmbus_channel *channel = kvp_transaction.recv_channel; + + tasklet_disable(&channel->callback_event); + + /* + * If there is a pending transtion, it's unnecessary to tell the host + * that the transaction will fail, because that is implied when + * util_suspend() calls vmbus_close() later. + */ + hv_kvp_cancel_work(); + + /* + * Forece the state to READY to handle the ICMSGTYPE_NEGOTIATE message + * later. The user space daemon may go out of order and its write() + * may fail with EINVAL: this doesn't matter since the daemon will + * reset the device by closing and re-opening it. + */ + kvp_transaction.state = HVUTIL_READY; + return 0; +} + +int hv_kvp_pre_resume(void) +{ + struct vmbus_channel *channel = kvp_transaction.recv_channel; + + tasklet_enable(&channel->callback_event); + + return 0; +} + +void hv_kvp_deinit(void) +{ + kvp_transaction.state = HVUTIL_DEVICE_DYING; + + hv_kvp_cancel_work(); + hvutil_transport_destroy(hvt); } diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index 03b6454268b3..1c75b38f0d6d 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c @@ -379,10 +379,61 @@ hv_vss_init(struct hv_util_service *srv) return 0; } -void hv_vss_deinit(void) +static void hv_vss_cancel_work(void) { - vss_transaction.state = HVUTIL_DEVICE_DYING; cancel_delayed_work_sync(&vss_timeout_work); cancel_work_sync(&vss_handle_request_work); +} + +int hv_vss_pre_suspend(void) +{ + struct vmbus_channel *channel = vss_transaction.recv_channel; + struct hv_vss_msg *vss_msg; + + /* + * Fake a THAW message for the user space daemon in case the daemon + * has frozen the file systems. It doesn't matter if there is already + * a message pending to be delivered to the user space since we force + * vss_transaction.state to be HVUTIL_READY, so the user space daemon's + * write() will fail with EINVAL (see vss_on_msg()), and the daemon + * will reset the device by closing and re-opening it. + */ + vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL); + if (!vss_msg) + return -ENOMEM; + + tasklet_disable(&channel->callback_event); + + vss_msg->vss_hdr.operation = VSS_OP_THAW; + + /* Cancel any possible pending work. */ + hv_vss_cancel_work(); + + /* We don't care about the return value. */ + hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL); + + kfree(vss_msg); + + vss_transaction.state = HVUTIL_READY; + + /* tasklet_enable() will be called in hv_vss_pre_resume(). */ + return 0; +} + +int hv_vss_pre_resume(void) +{ + struct vmbus_channel *channel = vss_transaction.recv_channel; + + tasklet_enable(&channel->callback_event); + + return 0; +} + +void hv_vss_deinit(void) +{ + vss_transaction.state = HVUTIL_DEVICE_DYING; + + hv_vss_cancel_work(); + hvutil_transport_destroy(hvt); } diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 296f9098c9e4..92ee0fe4c919 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c @@ -24,6 +24,10 @@ #define SD_MAJOR 3 #define SD_MINOR 0 +#define SD_MINOR_1 1 +#define SD_MINOR_2 2 +#define SD_VERSION_3_1 (SD_MAJOR << 16 | SD_MINOR_1) +#define SD_VERSION_3_2 (SD_MAJOR << 16 | SD_MINOR_2) #define SD_VERSION (SD_MAJOR << 16 | SD_MINOR) #define SD_MAJOR_1 1 @@ -50,8 +54,10 @@ static int sd_srv_version; static int ts_srv_version; static int hb_srv_version; -#define SD_VER_COUNT 2 +#define SD_VER_COUNT 4 static const int sd_versions[] = { + SD_VERSION_3_2, + SD_VERSION_3_1, SD_VERSION, SD_VERSION_1 }; @@ -75,18 +81,56 @@ static const int fw_versions[] = { UTIL_WS2K8_FW_VERSION }; +/* + * Send the "hibernate" udev event in a thread context. + */ +struct hibernate_work_context { + struct work_struct work; + struct hv_device *dev; +}; + +static struct hibernate_work_context hibernate_context; +static bool hibernation_supported; + +static void send_hibernate_uevent(struct work_struct *work) +{ + char *uevent_env[2] = { "EVENT=hibernate", NULL }; + struct hibernate_work_context *ctx; + + ctx = container_of(work, struct hibernate_work_context, work); + + kobject_uevent_env(&ctx->dev->device.kobj, KOBJ_CHANGE, uevent_env); + + pr_info("Sent hibernation uevent\n"); +} + +static int hv_shutdown_init(struct hv_util_service *srv) +{ + struct vmbus_channel *channel = srv->channel; + + INIT_WORK(&hibernate_context.work, send_hibernate_uevent); + hibernate_context.dev = channel->device_obj; + + hibernation_supported = hv_is_hibernation_supported(); + + return 0; +} + static void shutdown_onchannelcallback(void *context); static struct hv_util_service util_shutdown = { .util_cb = shutdown_onchannelcallback, + .util_init = hv_shutdown_init, }; static int hv_timesync_init(struct hv_util_service *srv); +static int hv_timesync_pre_suspend(void); static void hv_timesync_deinit(void); static void timesync_onchannelcallback(void *context); static struct hv_util_service util_timesynch = { .util_cb = timesync_onchannelcallback, .util_init = hv_timesync_init, + .util_pre_suspend = hv_timesync_pre_suspend, .util_deinit = hv_timesync_deinit, }; @@ -98,18 +142,24 @@ static struct hv_util_service util_heartbeat = { static struct hv_util_service util_kvp = { .util_cb = hv_kvp_onchannelcallback, .util_init = hv_kvp_init, + .util_pre_suspend = hv_kvp_pre_suspend, + .util_pre_resume = hv_kvp_pre_resume, .util_deinit = hv_kvp_deinit, }; static struct hv_util_service util_vss = { .util_cb = hv_vss_onchannelcallback, .util_init = hv_vss_init, + .util_pre_suspend = hv_vss_pre_suspend, + .util_pre_resume = hv_vss_pre_resume, .util_deinit = hv_vss_deinit, }; static struct hv_util_service util_fcopy = { .util_cb = hv_fcopy_onchannelcallback, .util_init = hv_fcopy_init, + .util_pre_suspend = hv_fcopy_pre_suspend, + .util_pre_resume = hv_fcopy_pre_resume, .util_deinit = hv_fcopy_deinit, }; @@ -118,17 +168,27 @@ static void perform_shutdown(struct work_struct *dummy) orderly_poweroff(true); } +static void perform_restart(struct work_struct *dummy) +{ + orderly_reboot(); +} + /* * Perform the shutdown operation in a thread context. */ static DECLARE_WORK(shutdown_work, perform_shutdown); +/* + * Perform the restart operation in a thread context. + */ +static DECLARE_WORK(restart_work, perform_restart); + static void shutdown_onchannelcallback(void *context) { struct vmbus_channel *channel = context; + struct work_struct *work = NULL; u32 recvlen; u64 requestid; - bool execute_shutdown = false; u8 *shut_txf_buf = util_shutdown.recv_buffer; struct shutdown_msg_data *shutdown_msg; @@ -157,19 +217,37 @@ static void shutdown_onchannelcallback(void *context) sizeof(struct vmbuspipe_hdr) + sizeof(struct icmsg_hdr)]; + /* + * shutdown_msg->flags can be 0(shut down), 2(reboot), + * or 4(hibernate). It may bitwise-OR 1, which means + * performing the request by force. Linux always tries + * to perform the request by force. + */ switch (shutdown_msg->flags) { case 0: case 1: icmsghdrp->status = HV_S_OK; - execute_shutdown = true; - + work = &shutdown_work; pr_info("Shutdown request received -" " graceful shutdown initiated\n"); break; + case 2: + case 3: + icmsghdrp->status = HV_S_OK; + work = &restart_work; + pr_info("Restart request received -" + " graceful restart initiated\n"); + break; + case 4: + case 5: + pr_info("Hibernation request received\n"); + icmsghdrp->status = hibernation_supported ? + HV_S_OK : HV_E_FAIL; + if (hibernation_supported) + work = &hibernate_context.work; + break; default: icmsghdrp->status = HV_E_FAIL; - execute_shutdown = false; - pr_info("Shutdown request received -" " Invalid request\n"); break; @@ -184,8 +262,8 @@ static void shutdown_onchannelcallback(void *context) VM_PKT_DATA_INBAND, 0); } - if (execute_shutdown == true) - schedule_work(&shutdown_work); + if (work) + schedule_work(work); } /* @@ -441,6 +519,44 @@ static int util_remove(struct hv_device *dev) return 0; } +/* + * When we're in util_suspend(), all the userspace processes have been frozen + * (refer to hibernate() -> freeze_processes()). The userspace is thawed only + * after the whole resume procedure, including util_resume(), finishes. + */ +static int util_suspend(struct hv_device *dev) +{ + struct hv_util_service *srv = hv_get_drvdata(dev); + int ret = 0; + + if (srv->util_pre_suspend) { + ret = srv->util_pre_suspend(); + if (ret) + return ret; + } + + vmbus_close(dev->channel); + + return 0; +} + +static int util_resume(struct hv_device *dev) +{ + struct hv_util_service *srv = hv_get_drvdata(dev); + int ret = 0; + + if (srv->util_pre_resume) { + ret = srv->util_pre_resume(); + if (ret) + return ret; + } + + ret = vmbus_open(dev->channel, 4 * HV_HYP_PAGE_SIZE, + 4 * HV_HYP_PAGE_SIZE, NULL, 0, srv->util_cb, + dev->channel); + return ret; +} + static const struct hv_vmbus_device_id id_table[] = { /* Shutdown guid */ { HV_SHUTDOWN_GUID, @@ -477,6 +593,8 @@ static struct hv_driver util_drv = { .id_table = id_table, .probe = util_probe, .remove = util_remove, + .suspend = util_suspend, + .resume = util_resume, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, @@ -546,11 +664,23 @@ static int hv_timesync_init(struct hv_util_service *srv) return 0; } +static void hv_timesync_cancel_work(void) +{ + cancel_work_sync(&adj_time_work); +} + +static int hv_timesync_pre_suspend(void) +{ + hv_timesync_cancel_work(); + return 0; +} + static void hv_timesync_deinit(void) { if (hv_ptp_clock) ptp_clock_unregister(hv_ptp_clock); - cancel_work_sync(&adj_time_work); + + hv_timesync_cancel_work(); } static int __init init_hyperv_utils(void) diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index 20edcfd3b96c..f5fa3b3c9baf 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -352,14 +352,20 @@ void vmbus_on_msg_dpc(unsigned long data); int hv_kvp_init(struct hv_util_service *srv); void hv_kvp_deinit(void); +int hv_kvp_pre_suspend(void); +int hv_kvp_pre_resume(void); void hv_kvp_onchannelcallback(void *context); int hv_vss_init(struct hv_util_service *srv); void hv_vss_deinit(void); +int hv_vss_pre_suspend(void); +int hv_vss_pre_resume(void); void hv_vss_onchannelcallback(void *context); int hv_fcopy_init(struct hv_util_service *srv); void hv_fcopy_deinit(void); +int hv_fcopy_pre_suspend(void); +int hv_fcopy_pre_resume(void); void hv_fcopy_onchannelcallback(void *context); void vmbus_initiate_unload(bool crash); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 4ef5a66df680..029378c27421 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1033,6 +1033,10 @@ void vmbus_on_msg_dpc(unsigned long data) } entry = &channel_message_table[hdr->msgtype]; + + if (!entry->message_handler) + goto msg_handled; + if (entry->handler_type == VMHT_BLOCKING) { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) diff --git a/drivers/input/serio/hyperv-keyboard.c b/drivers/input/serio/hyperv-keyboard.c index e486a8a74c40..df4e9f6f4529 100644 --- a/drivers/input/serio/hyperv-keyboard.c +++ b/drivers/input/serio/hyperv-keyboard.c @@ -259,6 +259,8 @@ static int hv_kbd_connect_to_vsp(struct hv_device *hv_dev) u32 proto_status; int error; + reinit_completion(&kbd_dev->wait_event); + request = &kbd_dev->protocol_req; memset(request, 0, sizeof(struct synth_kbd_protocol_request)); request->header.type = __cpu_to_le32(SYNTH_KBD_PROTOCOL_REQUEST); @@ -380,6 +382,29 @@ static int hv_kbd_remove(struct hv_device *hv_dev) return 0; } +static int hv_kbd_suspend(struct hv_device *hv_dev) +{ + vmbus_close(hv_dev->channel); + + return 0; +} + +static int hv_kbd_resume(struct hv_device *hv_dev) +{ + int ret; + + ret = vmbus_open(hv_dev->channel, + KBD_VSC_SEND_RING_BUFFER_SIZE, + KBD_VSC_RECV_RING_BUFFER_SIZE, + NULL, 0, + hv_kbd_on_channel_callback, + hv_dev); + if (ret == 0) + ret = hv_kbd_connect_to_vsp(hv_dev); + + return ret; +} + static const struct hv_vmbus_device_id id_table[] = { /* Keyboard guid */ { HV_KBD_GUID, }, @@ -393,6 +418,8 @@ static struct hv_driver hv_kbd_drv = { .id_table = id_table, .probe = hv_kbd_probe, .remove = hv_kbd_remove, + .suspend = hv_kbd_suspend, + .resume = hv_kbd_resume, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index aa9541bf964b..f65991a67af2 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -2215,6 +2215,7 @@ config FB_HYPERV select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB_DEFERRED_IO + select DMA_CMA if HAVE_DMA_CONTIGUOUS && CMA help This framebuffer driver supports Microsoft Hyper-V Synthetic Video. diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c index afe9fd751cd5..f47d50e560c0 100644 --- a/drivers/video/fbdev/hyperv_fb.c +++ b/drivers/video/fbdev/hyperv_fb.c @@ -31,6 +31,16 @@ * "set-vmvideo" command. For example * set-vmvideo -vmname name -horizontalresolution:1920 \ * -verticalresolution:1200 -resolutiontype single + * + * Gen 1 VMs also support direct using VM's physical memory for framebuffer. + * It could improve the efficiency and performance for framebuffer and VM. + * This requires to allocate contiguous physical memory from Linux kernel's + * CMA memory allocator. To enable this, supply a kernel parameter to give + * enough memory space to CMA allocator for framebuffer. For example: + * cma=130m + * This gives 130MB memory to CMA allocator that can be allocated to + * framebuffer. For reference, 8K resolution (7680x4320) takes about + * 127MB memory. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -228,7 +238,6 @@ struct synthvid_msg { } __packed; - /* FB driver definitions and structures */ #define HVFB_WIDTH 1152 /* default screen width */ #define HVFB_HEIGHT 864 /* default screen height */ @@ -258,12 +267,15 @@ struct hvfb_par { /* If true, the VSC notifies the VSP on every framebuffer change */ bool synchronous_fb; + /* If true, need to copy from deferred IO mem to framebuffer mem */ + bool need_docopy; + struct notifier_block hvfb_panic_nb; /* Memory for deferred IO and frame buffer itself */ unsigned char *dio_vp; unsigned char *mmio_vp; - unsigned long mmio_pp; + phys_addr_t mmio_pp; /* Dirty rectangle, protected by delayed_refresh_lock */ int x1, y1, x2, y2; @@ -434,7 +446,7 @@ static void synthvid_deferred_io(struct fb_info *p, maxy = max_t(int, maxy, y2); /* Copy from dio space to mmio address */ - if (par->fb_ready) + if (par->fb_ready && par->need_docopy) hvfb_docopy(par, start, PAGE_SIZE); } @@ -751,12 +763,12 @@ static void hvfb_update_work(struct work_struct *w) return; /* Copy the dirty rectangle to frame buffer memory */ - for (j = y1; j < y2; j++) { - hvfb_docopy(par, - j * info->fix.line_length + - (x1 * screen_depth / 8), - (x2 - x1) * screen_depth / 8); - } + if (par->need_docopy) + for (j = y1; j < y2; j++) + hvfb_docopy(par, + j * info->fix.line_length + + (x1 * screen_depth / 8), + (x2 - x1) * screen_depth / 8); /* Refresh */ if (par->fb_ready && par->update) @@ -801,7 +813,8 @@ static int hvfb_on_panic(struct notifier_block *nb, par = container_of(nb, struct hvfb_par, hvfb_panic_nb); par->synchronous_fb = true; info = par->info; - hvfb_docopy(par, 0, dio_fb_size); + if (par->need_docopy) + hvfb_docopy(par, 0, dio_fb_size); synthvid_update(info, 0, 0, INT_MAX, INT_MAX); return NOTIFY_DONE; @@ -940,6 +953,62 @@ static void hvfb_get_option(struct fb_info *info) return; } +/* + * Allocate enough contiguous physical memory. + * Return physical address if succeeded or -1 if failed. + */ +static phys_addr_t hvfb_get_phymem(struct hv_device *hdev, + unsigned int request_size) +{ + struct page *page = NULL; + dma_addr_t dma_handle; + void *vmem; + phys_addr_t paddr = 0; + unsigned int order = get_order(request_size); + + if (request_size == 0) + return -1; + + if (order < MAX_ORDER) { + /* Call alloc_pages if the size is less than 2^MAX_ORDER */ + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -1; + + paddr = (page_to_pfn(page) << PAGE_SHIFT); + } else { + /* Allocate from CMA */ + hdev->device.coherent_dma_mask = DMA_BIT_MASK(64); + + vmem = dma_alloc_coherent(&hdev->device, + round_up(request_size, PAGE_SIZE), + &dma_handle, + GFP_KERNEL | __GFP_NOWARN); + + if (!vmem) + return -1; + + paddr = virt_to_phys(vmem); + } + + return paddr; +} + +/* Release contiguous physical memory */ +static void hvfb_release_phymem(struct hv_device *hdev, + phys_addr_t paddr, unsigned int size) +{ + unsigned int order = get_order(size); + + if (order < MAX_ORDER) + __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order); + else + dma_free_coherent(&hdev->device, + round_up(size, PAGE_SIZE), + phys_to_virt(paddr), + paddr); +} + /* Get framebuffer memory from Hyper-V video pci space */ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) @@ -949,22 +1018,61 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) void __iomem *fb_virt; int gen2vm = efi_enabled(EFI_BOOT); resource_size_t pot_start, pot_end; + phys_addr_t paddr; int ret; - dio_fb_size = - screen_width * screen_height * screen_depth / 8; + info->apertures = alloc_apertures(1); + if (!info->apertures) + return -ENOMEM; - if (gen2vm) { - pot_start = 0; - pot_end = -1; - } else { + if (!gen2vm) { pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT, - PCI_DEVICE_ID_HYPERV_VIDEO, NULL); + PCI_DEVICE_ID_HYPERV_VIDEO, NULL); if (!pdev) { pr_err("Unable to find PCI Hyper-V video\n"); + kfree(info->apertures); return -ENODEV; } + info->apertures->ranges[0].base = pci_resource_start(pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(pdev, 0); + + /* + * For Gen 1 VM, we can directly use the contiguous memory + * from VM. If we succeed, deferred IO happens directly + * on this allocated framebuffer memory, avoiding extra + * memory copy. + */ + paddr = hvfb_get_phymem(hdev, screen_fb_size); + if (paddr != (phys_addr_t) -1) { + par->mmio_pp = paddr; + par->mmio_vp = par->dio_vp = __va(paddr); + + info->fix.smem_start = paddr; + info->fix.smem_len = screen_fb_size; + info->screen_base = par->mmio_vp; + info->screen_size = screen_fb_size; + + par->need_docopy = false; + goto getmem_done; + } + pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n"); + } else { + info->apertures->ranges[0].base = screen_info.lfb_base; + info->apertures->ranges[0].size = screen_info.lfb_size; + } + + /* + * Cannot use the contiguous physical memory. + * Allocate mmio space for framebuffer. + */ + dio_fb_size = + screen_width * screen_height * screen_depth / 8; + + if (gen2vm) { + pot_start = 0; + pot_end = -1; + } else { if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) || pci_resource_len(pdev, 0) < screen_fb_size) { pr_err("Resource not available or (0x%lx < 0x%lx)\n", @@ -993,20 +1101,6 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) if (par->dio_vp == NULL) goto err3; - info->apertures = alloc_apertures(1); - if (!info->apertures) - goto err4; - - if (gen2vm) { - info->apertures->ranges[0].base = screen_info.lfb_base; - info->apertures->ranges[0].size = screen_info.lfb_size; - remove_conflicting_framebuffers(info->apertures, - KBUILD_MODNAME, false); - } else { - info->apertures->ranges[0].base = pci_resource_start(pdev, 0); - info->apertures->ranges[0].size = pci_resource_len(pdev, 0); - } - /* Physical address of FB device */ par->mmio_pp = par->mem->start; /* Virtual address of FB device */ @@ -1017,13 +1111,15 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info) info->screen_base = par->dio_vp; info->screen_size = dio_fb_size; +getmem_done: + remove_conflicting_framebuffers(info->apertures, + KBUILD_MODNAME, false); if (!gen2vm) pci_dev_put(pdev); + kfree(info->apertures); return 0; -err4: - vfree(par->dio_vp); err3: iounmap(fb_virt); err2: @@ -1032,18 +1128,25 @@ err2: err1: if (!gen2vm) pci_dev_put(pdev); + kfree(info->apertures); return -ENOMEM; } /* Release the framebuffer */ -static void hvfb_putmem(struct fb_info *info) +static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info) { struct hvfb_par *par = info->par; - vfree(par->dio_vp); - iounmap(info->screen_base); - vmbus_free_mmio(par->mem->start, screen_fb_size); + if (par->need_docopy) { + vfree(par->dio_vp); + iounmap(info->screen_base); + vmbus_free_mmio(par->mem->start, screen_fb_size); + } else { + hvfb_release_phymem(hdev, info->fix.smem_start, + screen_fb_size); + } + par->mem = NULL; } @@ -1062,6 +1165,7 @@ static int hvfb_probe(struct hv_device *hdev, par = info->par; par->info = info; par->fb_ready = false; + par->need_docopy = true; init_completion(&par->wait); INIT_DELAYED_WORK(&par->dwork, hvfb_update_work); @@ -1147,7 +1251,7 @@ static int hvfb_probe(struct hv_device *hdev, error: fb_deferred_io_cleanup(info); - hvfb_putmem(info); + hvfb_putmem(hdev, info); error2: vmbus_close(hdev->channel); error1: @@ -1177,7 +1281,7 @@ static int hvfb_remove(struct hv_device *hdev) vmbus_close(hdev->channel); hv_set_drvdata(hdev, NULL); - hvfb_putmem(info); + hvfb_putmem(hdev, info); framebuffer_release(info); return 0; @@ -1194,6 +1298,7 @@ static int hvfb_suspend(struct hv_device *hdev) fb_set_suspend(info, 1); cancel_delayed_work_sync(&par->dwork); + cancel_delayed_work_sync(&info->deferred_work); par->update_saved = par->update; par->update = false; @@ -1227,6 +1332,7 @@ static int hvfb_resume(struct hv_device *hdev) par->fb_ready = true; par->update = par->update_saved; + schedule_delayed_work(&info->deferred_work, info->fbdefio->delay); schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY); /* 0 means do resume */ diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 26f3aeeae1ca..692c89ccf5df 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -425,6 +425,8 @@ enum vmbus_channel_message_type { CHANNELMSG_19 = 19, CHANNELMSG_20 = 20, CHANNELMSG_TL_CONNECT_REQUEST = 21, + CHANNELMSG_22 = 22, + CHANNELMSG_TL_CONNECT_RESULT = 23, CHANNELMSG_COUNT }; @@ -1433,6 +1435,8 @@ struct hv_util_service { void (*util_cb)(void *); int (*util_init)(struct hv_util_service *); void (*util_deinit)(void); + int (*util_pre_suspend)(void); + int (*util_pre_resume)(void); }; struct vmbuspipe_hdr { diff --git a/tools/hv/hv_fcopy_daemon.c b/tools/hv/hv_fcopy_daemon.c index aea2d91ab364..16d629b22c25 100644 --- a/tools/hv/hv_fcopy_daemon.c +++ b/tools/hv/hv_fcopy_daemon.c @@ -80,6 +80,8 @@ static int hv_start_fcopy(struct hv_start_fcopy *smsg) error = 0; done: + if (error) + target_fname[0] = '\0'; return error; } @@ -108,15 +110,29 @@ static int hv_copy_data(struct hv_do_fcopy *cpmsg) return ret; } +/* + * Reset target_fname to "" in the two below functions for hibernation: if + * the fcopy operation is aborted by hibernation, the daemon should remove the + * partially-copied file; to achieve this, the hv_utils driver always fakes a + * CANCEL_FCOPY message upon suspend, and later when the VM resumes back, + * the daemon calls hv_copy_cancel() to remove the file; if a file is copied + * successfully before suspend, hv_copy_finished() must reset target_fname to + * avoid that the file can be incorrectly removed upon resume, since the faked + * CANCEL_FCOPY message is spurious in this case. + */ static int hv_copy_finished(void) { close(target_fd); + target_fname[0] = '\0'; return 0; } static int hv_copy_cancel(void) { close(target_fd); - unlink(target_fname); + if (strlen(target_fname) > 0) { + unlink(target_fname); + target_fname[0] = '\0'; + } return 0; } @@ -131,7 +147,7 @@ void print_usage(char *argv[]) int main(int argc, char *argv[]) { - int fcopy_fd; + int fcopy_fd = -1; int error; int daemonize = 1, long_index = 0, opt; int version = FCOPY_CURRENT_VERSION; @@ -141,7 +157,7 @@ int main(int argc, char *argv[]) struct hv_do_fcopy copy; __u32 kernel_modver; } buffer = { }; - int in_handshake = 1; + int in_handshake; static struct option long_options[] = { {"help", no_argument, 0, 'h' }, @@ -170,6 +186,12 @@ int main(int argc, char *argv[]) openlog("HV_FCOPY", 0, LOG_USER); syslog(LOG_INFO, "starting; pid is:%d", getpid()); +reopen_fcopy_fd: + if (fcopy_fd != -1) + close(fcopy_fd); + /* Remove any possible partially-copied file on error */ + hv_copy_cancel(); + in_handshake = 1; fcopy_fd = open("/dev/vmbus/hv_fcopy", O_RDWR); if (fcopy_fd < 0) { @@ -196,7 +218,7 @@ int main(int argc, char *argv[]) len = pread(fcopy_fd, &buffer, sizeof(buffer), 0); if (len < 0) { syslog(LOG_ERR, "pread failed: %s", strerror(errno)); - exit(EXIT_FAILURE); + goto reopen_fcopy_fd; } if (in_handshake) { @@ -231,9 +253,14 @@ int main(int argc, char *argv[]) } + /* + * pwrite() may return an error due to the faked CANCEL_FCOPY + * message upon hibernation. Ignore the error by resetting the + * dev file, i.e. closing and re-opening it. + */ if (pwrite(fcopy_fd, &error, sizeof(int), 0) != sizeof(int)) { syslog(LOG_ERR, "pwrite failed: %s", strerror(errno)); - exit(EXIT_FAILURE); + goto reopen_fcopy_fd; } } } diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c index e9ef4ca6a655..ee9c1bb2293e 100644 --- a/tools/hv/hv_kvp_daemon.c +++ b/tools/hv/hv_kvp_daemon.c @@ -76,7 +76,7 @@ enum { DNS }; -static int in_hand_shake = 1; +static int in_hand_shake; static char *os_name = ""; static char *os_major = ""; @@ -1360,7 +1360,7 @@ void print_usage(char *argv[]) int main(int argc, char *argv[]) { - int kvp_fd, len; + int kvp_fd = -1, len; int error; struct pollfd pfd; char *p; @@ -1400,14 +1400,6 @@ int main(int argc, char *argv[]) openlog("KVP", 0, LOG_USER); syslog(LOG_INFO, "KVP starting; pid is:%d", getpid()); - kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC); - - if (kvp_fd < 0) { - syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s", - errno, strerror(errno)); - exit(EXIT_FAILURE); - } - /* * Retrieve OS release information. */ @@ -1423,6 +1415,18 @@ int main(int argc, char *argv[]) exit(EXIT_FAILURE); } +reopen_kvp_fd: + if (kvp_fd != -1) + close(kvp_fd); + in_hand_shake = 1; + kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC); + + if (kvp_fd < 0) { + syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s", + errno, strerror(errno)); + exit(EXIT_FAILURE); + } + /* * Register ourselves with the kernel. */ @@ -1456,9 +1460,7 @@ int main(int argc, char *argv[]) if (len != sizeof(struct hv_kvp_msg)) { syslog(LOG_ERR, "read failed; error:%d %s", errno, strerror(errno)); - - close(kvp_fd); - return EXIT_FAILURE; + goto reopen_kvp_fd; } /* @@ -1617,13 +1619,17 @@ int main(int argc, char *argv[]) break; } - /* Send the value back to the kernel. */ + /* + * Send the value back to the kernel. Note: the write() may + * return an error due to hibernation; we can ignore the error + * by resetting the dev file, i.e. closing and re-opening it. + */ kvp_done: len = write(kvp_fd, hv_msg, sizeof(struct hv_kvp_msg)); if (len != sizeof(struct hv_kvp_msg)) { syslog(LOG_ERR, "write failed; error: %d %s", errno, strerror(errno)); - exit(EXIT_FAILURE); + goto reopen_kvp_fd; } } diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c index 92902a88f671..dd111870beee 100644 --- a/tools/hv/hv_vss_daemon.c +++ b/tools/hv/hv_vss_daemon.c @@ -28,6 +28,8 @@ #include <stdbool.h> #include <dirent.h> +static bool fs_frozen; + /* Don't use syslog() in the function since that can cause write to disk */ static int vss_do_freeze(char *dir, unsigned int cmd) { @@ -155,18 +157,27 @@ static int vss_operate(int operation) continue; } error |= vss_do_freeze(ent->mnt_dir, cmd); - if (error && operation == VSS_OP_FREEZE) - goto err; + if (operation == VSS_OP_FREEZE) { + if (error) + goto err; + fs_frozen = true; + } } endmntent(mounts); if (root_seen) { error |= vss_do_freeze("/", cmd); - if (error && operation == VSS_OP_FREEZE) - goto err; + if (operation == VSS_OP_FREEZE) { + if (error) + goto err; + fs_frozen = true; + } } + if (operation == VSS_OP_THAW && !error) + fs_frozen = false; + goto out; err: save_errno = errno; @@ -175,6 +186,7 @@ err: endmntent(mounts); } vss_operate(VSS_OP_THAW); + fs_frozen = false; /* Call syslog after we thaw all filesystems */ if (ent) syslog(LOG_ERR, "FREEZE of %s failed; error:%d %s", @@ -196,13 +208,13 @@ void print_usage(char *argv[]) int main(int argc, char *argv[]) { - int vss_fd, len; + int vss_fd = -1, len; int error; struct pollfd pfd; int op; struct hv_vss_msg vss_msg[1]; int daemonize = 1, long_index = 0, opt; - int in_handshake = 1; + int in_handshake; __u32 kernel_modver; static struct option long_options[] = { @@ -232,6 +244,18 @@ int main(int argc, char *argv[]) openlog("Hyper-V VSS", 0, LOG_USER); syslog(LOG_INFO, "VSS starting; pid is:%d", getpid()); +reopen_vss_fd: + if (vss_fd != -1) + close(vss_fd); + if (fs_frozen) { + if (vss_operate(VSS_OP_THAW) || fs_frozen) { + syslog(LOG_ERR, "failed to thaw file system: err=%d", + errno); + exit(EXIT_FAILURE); + } + } + + in_handshake = 1; vss_fd = open("/dev/vmbus/hv_vss", O_RDWR); if (vss_fd < 0) { syslog(LOG_ERR, "open /dev/vmbus/hv_vss failed; error: %d %s", @@ -284,8 +308,7 @@ int main(int argc, char *argv[]) if (len != sizeof(struct hv_vss_msg)) { syslog(LOG_ERR, "read failed; error:%d %s", errno, strerror(errno)); - close(vss_fd); - return EXIT_FAILURE; + goto reopen_vss_fd; } op = vss_msg->vss_hdr.operation; @@ -312,14 +335,18 @@ int main(int argc, char *argv[]) default: syslog(LOG_ERR, "Illegal op:%d\n", op); } + + /* + * The write() may return an error due to the faked VSS_OP_THAW + * message upon hibernation. Ignore the error by resetting the + * dev file, i.e. closing and re-opening it. + */ vss_msg->error = error; len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg)); if (len != sizeof(struct hv_vss_msg)) { syslog(LOG_ERR, "write failed; error: %d %s", errno, strerror(errno)); - - if (op == VSS_OP_FREEZE) - vss_operate(VSS_OP_THAW); + goto reopen_vss_fd; } } |