diff options
Diffstat (limited to 'drivers/gpu/drm/i915/gvt')
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cfg_space.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/cmd_parser.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/display.c | 51 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/execlist.c | 20 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/firmware.c | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gtt.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/gvt.h | 18 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/handlers.c | 427 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/interrupt.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/kvmgt.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/mmio.h | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/render.c | 29 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.c | 242 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/sched_policy.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/scheduler.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/gvt/vgpu.c | 128 |
18 files changed, 718 insertions, 372 deletions
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index b7d7721e72fa..40af17ec6312 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, { int ret; - if (vgpu->failsafe) - return 0; - if (WARN_ON(bytes > 4)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 6f972afbdbc3..41b2c3aaa04a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id) { struct decode_info *d_info; - if (ring_id >= I915_NUM_ENGINES) - return INVALID_OP; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; if (d_info == NULL) return INVALID_OP; @@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id) struct decode_info *d_info; int i; - if (ring_id >= I915_NUM_ENGINES) - return; - d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; if (d_info == NULL) return; @@ -1215,7 +1209,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, if (!info->async_flip) return 0; - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & GENMASK(12, 10)) >> 10; @@ -1243,7 +1237,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), info->surf_val << 12); - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), info->stride_val); set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), @@ -1267,7 +1261,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, if (IS_BROADWELL(dev_priv)) return gen8_decode_mi_display_flip(s, info); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return skl_decode_mi_display_flip(s, info); return -ENODEV; @@ -1278,7 +1272,9 @@ static int check_mi_display_flip(struct parser_exec_state *s, { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) + || IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) return gen8_check_mi_display_flip(s, info); return -ENODEV; } @@ -1289,7 +1285,9 @@ static int update_plane_mmio_from_mi_display_flip( { struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; - if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv)) + if (IS_BROADWELL(dev_priv) + || IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) return gen8_update_plane_mmio_from_mi_display_flip(s, info); return -ENODEV; } @@ -1569,7 +1567,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) { struct intel_gvt *gvt = s->vgpu->gvt; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { /* BDW decides privilege based on address space */ if (cmd_val(s, 0) & (1 << 8)) return 0; @@ -2478,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) t1 = get_cycles(); - memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state)); + s_before_advance_custom = *s; if (info->handler) { ret = info->handler(s); @@ -2604,6 +2603,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail; struct parser_exec_state s; int ret = 0; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); /* ring base is page aligned */ if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) @@ -2618,14 +2620,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.buf_type = RING_BUFFER_INSTRUCTION; s.buf_addr_type = GTT_BUFFER; - s.vgpu = wa_ctx->workload->vgpu; - s.ring_id = wa_ctx->workload->ring_id; + s.vgpu = workload->vgpu; + s.ring_id = workload->ring_id; s.ring_start = wa_ctx->indirect_ctx.guest_gma; s.ring_size = ring_size; s.ring_head = gma_head; s.ring_tail = gma_tail; s.rb_va = wa_ctx->indirect_ctx.shadow_va; - s.workload = wa_ctx->workload; + s.workload = workload; ret = ip_gma_set(&s, gma_head); if (ret) @@ -2708,12 +2710,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ctx_size = wa_ctx->indirect_ctx.size; unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; - struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + struct intel_vgpu *vgpu = workload->vgpu; struct drm_i915_gem_object *obj; int ret = 0; void *map; - obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv, + obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv, roundup(ctx_size + CACHELINE_BYTES, PAGE_SIZE)); if (IS_ERR(obj)) @@ -2733,8 +2738,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) goto unmap_src; } - ret = copy_gma_to_hva(wa_ctx->workload->vgpu, - wa_ctx->workload->vgpu->gtt.ggtt_mm, + ret = copy_gma_to_hva(workload->vgpu, + workload->vgpu->gtt.ggtt_mm, guest_gma, guest_gma + ctx_size, map); if (ret < 0) { @@ -2772,7 +2777,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) { int ret; - struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + struct intel_vgpu *vgpu = workload->vgpu; if (wa_ctx->indirect_ctx.size == 0) return 0; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5419ae6ec633..e0261fcc5b50 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { #define DPCD_HEADER_SIZE 0xb +/* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { - 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) @@ -172,26 +173,64 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | SDE_PORTE_HOTPLUG_SPT); + vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= + SKL_FUSE_DOWNLOAD_STATUS | + SKL_FUSE_PG0_DIST_STATUS | + SKL_FUSE_PG1_DIST_STATUS | + SKL_FUSE_PG2_DIST_STATUS; + vgpu_vreg(vgpu, LCPLL1_CTL) |= + LCPLL_PLL_ENABLE | + LCPLL_PLL_LOCK; + vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; + + } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { - vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_B << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; + vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_C << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; } if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= + ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | + TRANS_DDI_PORT_MASK); + vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | + (PORT_D << TRANS_DDI_PORT_SHIFT) | + TRANS_DDI_FUNC_ENABLE); + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; } - if (IS_SKYLAKE(dev_priv) && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; } @@ -353,7 +392,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) clean_virtual_dp_monitor(vgpu, PORT_D); else clean_virtual_dp_monitor(vgpu, PORT_B); @@ -375,7 +414,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) intel_vgpu_init_i2c_edid(vgpu); - if (IS_SKYLAKE(dev_priv)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, resolution); else diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f1f426a97aa9..dca989eb2d42 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -56,8 +56,8 @@ static int context_switch_events[] = { static int ring_id_to_context_switch_event(int ring_id) { - if (WARN_ON(ring_id < RCS && ring_id > - ARRAY_SIZE(context_switch_events))) + if (WARN_ON(ring_id < RCS || + ring_id >= ARRAY_SIZE(context_switch_events))) return -EINVAL; return context_switch_events[ring_id]; @@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx) { - int ring_id = wa_ctx->workload->ring_id; - struct i915_gem_context *shadow_ctx = - wa_ctx->workload->vgpu->shadow_ctx; + struct intel_vgpu_workload *workload = container_of(wa_ctx, + struct intel_vgpu_workload, + wa_ctx); + int ring_id = workload->ring_id; + struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; struct drm_i915_gem_object *ctx_obj = shadow_ctx->engine[ring_id].state->obj; struct execlist_ring_context *shadow_ring_context; @@ -680,15 +682,12 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, CACHELINE_BYTES; workload->wa_ctx.per_ctx.guest_gma = per_ctx & PER_CTX_ADDR_MASK; - workload->wa_ctx.workload = workload; WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); } if (emulate_schedule_in) - memcpy(&workload->elsp_dwords, - &vgpu->execlist[ring_id].elsp_dwords, - sizeof(workload->elsp_dwords)); + workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords; gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n", workload, ring_id, head, tail, start, ctl); @@ -775,7 +774,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) _EL_OFFSET_STATUS_PTR); ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg); - ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7; + ctx_status_ptr.read_ptr = 0; + ctx_status_ptr.write_ptr = 0x7; vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; } diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c index 933a7c211a1c..dce8d15f706f 100644 --- a/drivers/gpu/drm/i915/gvt/firmware.c +++ b/drivers/gpu/drm/i915/gvt/firmware.c @@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) struct gvt_firmware_header *h; void *firmware; void *p; - unsigned long size; + unsigned long size, crc32_start; int i; int ret; - size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; + size = sizeof(*h) + info->mmio_size + info->cfg_space_size; firmware = vzalloc(size); if (!firmware) return -ENOMEM; @@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) memcpy(gvt->firmware.mmio, p, info->mmio_size); + crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; + h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start); + firmware_attr.size = size; firmware_attr.private = firmware; @@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt) firmware->mmio = mem; - sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state", + sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state", GVT_FIRMWARE_PATH, pdev->vendor, pdev->device, pdev->revision); diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index b832bea64e03..c6f0077f590d 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2224,7 +2224,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) gvt_dbg_core("init gtt\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { gvt->gtt.pte_ops = &gen8_gtt_pte_ops; gvt->gtt.gma_ops = &gen8_gtt_gma_ops; gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table; @@ -2293,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; + struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; u32 index; u32 offset; u32 num_entries; struct intel_gvt_gtt_entry e; + intel_runtime_pm_get(dev_priv); + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); e.type = GTT_TYPE_GGTT_PTE; ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); @@ -2313,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; for (offset = 0; offset < num_entries; offset++) ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); + + intel_runtime_pm_put(dev_priv); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 3b9d59e457ba..7dea5e5d5567 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = { .vgpu_create = intel_gvt_create_vgpu, .vgpu_destroy = intel_gvt_destroy_vgpu, .vgpu_reset = intel_gvt_reset_vgpu, + .vgpu_activate = intel_gvt_activate_vgpu, + .vgpu_deactivate = intel_gvt_deactivate_vgpu, }; /** @@ -106,7 +108,8 @@ static void init_device_info(struct intel_gvt *gvt) struct intel_gvt_device_info *info = &gvt->device_info; struct pci_dev *pdev = gvt->dev_priv->drm.pdev; - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { info->max_support_vgpus = 8; info->cfg_space_size = 256; info->mmio_size = 2 * 1024 * 1024; @@ -143,6 +146,11 @@ static int gvt_service_thread(void *data) intel_gvt_emulate_vblank(gvt); mutex_unlock(&gvt->lock); } + + if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED, + (void *)&gvt->service_request)) { + intel_gvt_schedule(gvt); + } } return 0; @@ -196,6 +204,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) idr_destroy(&gvt->vgpu_idr); + intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); + kfree(dev_priv->gvt); dev_priv->gvt = NULL; } @@ -214,6 +224,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) int intel_gvt_init_device(struct drm_i915_private *dev_priv) { struct intel_gvt *gvt; + struct intel_vgpu *vgpu; int ret; /* @@ -286,6 +297,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) goto out_clean_types; } + vgpu = intel_gvt_create_idle_vgpu(gvt); + if (IS_ERR(vgpu)) { + ret = PTR_ERR(vgpu); + gvt_err("failed to create idle vgpu\n"); + goto out_clean_types; + } + gvt->idle_vgpu = vgpu; + gvt_dbg_core("gvt device initialization is done\n"); dev_priv->gvt = gvt; return 0; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 6dfc48b63b71..930732e5c780 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -138,6 +138,10 @@ struct intel_vgpu_display { struct intel_vgpu_sbi sbi; }; +struct vgpu_sched_ctl { + int weight; +}; + struct intel_vgpu { struct intel_gvt *gvt; int id; @@ -147,6 +151,7 @@ struct intel_vgpu { bool failsafe; bool resetting; void *sched_data; + struct vgpu_sched_ctl sched_ctl; struct intel_vgpu_fence fence; struct intel_vgpu_gm gm; @@ -160,6 +165,7 @@ struct intel_vgpu { struct list_head workload_q_head[I915_NUM_ENGINES]; struct kmem_cache *workloads; atomic_t running_workload_num; + ktime_t last_ctx_submit_time; DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); struct i915_gem_context *shadow_ctx; @@ -215,6 +221,7 @@ struct intel_vgpu_type { unsigned int low_gm_size; unsigned int high_gm_size; unsigned int fence; + unsigned int weight; enum intel_vgpu_edid resolution; }; @@ -236,6 +243,7 @@ struct intel_gvt { DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); struct intel_vgpu_type *types; unsigned int num_types; + struct intel_vgpu *idle_vgpu; struct task_struct *service_thread; wait_queue_head_t service_thread_wq; @@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915) enum { INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, + INTEL_GVT_REQUEST_SCHED = 1, }; static inline void intel_gvt_request_service(struct intel_gvt *gvt, @@ -322,6 +331,8 @@ struct intel_vgpu_creation_params { __u64 resolution; __s32 primary; __u64 vgpu_id; + + __u32 weight; }; int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, @@ -376,13 +387,16 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, int intel_gvt_init_vgpu_types(struct intel_gvt *gvt); void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt); +struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt); +void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu); struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_type *type); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, unsigned int engine_mask); void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); - +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu); +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu); /* validating GM functions */ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ @@ -449,6 +463,8 @@ struct intel_gvt_ops { struct intel_vgpu_type *); void (*vgpu_destroy)(struct intel_vgpu *); void (*vgpu_reset)(struct intel_vgpu *); + void (*vgpu_activate)(struct intel_vgpu *); + void (*vgpu_deactivate)(struct intel_vgpu *); }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 6da9ae1618e3..0ad1a508e2af 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) return D_BDW; else if (IS_SKYLAKE(gvt->dev_priv)) return D_SKL; + else if (IS_KABYLAKE(gvt->dev_priv)) + return D_KBL; return 0; } @@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, old = vgpu_vreg(vgpu, offset); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { switch (offset) { case FORCEWAKE_RENDER_GEN9_REG: ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; @@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, write_vreg(vgpu, offset, p_data, bytes); data = vgpu_vreg(vgpu, offset); - if (IS_SKYLAKE(vgpu->gvt->dev_priv) && - offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { + if ((IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) + && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { /* SKL DPB/C/D aux ctl register changed */ return 0; } else if (IS_BROADWELL(vgpu->gvt->dev_priv) && @@ -1311,7 +1315,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, switch (cmd) { case GEN9_PCODE_READ_MEM_LATENCY: - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { /** * "Read memory latency" command on gen9. * Below memory latency values are read @@ -1324,7 +1329,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, } break; case SKL_PCODE_CDCLK_CONTROL: - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) *data0 = SKL_CDCLK_READY_FOR_CHANGE; break; case GEN6_PCODE_READ_RC6VIDS: @@ -1418,6 +1424,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data; if (execlist->elsp_dwords.index == 3) { + vgpu->last_ctx_submit_time = ktime_get(); ret = intel_vgpu_submit_execlist(vgpu, ring_id); if(ret) gvt_vgpu_err("fail submit workload on ring %d\n", @@ -2592,219 +2599,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); + MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + dp_aux_ch_ctl_mmio_write); - MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); - MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); + MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS); + MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL, + skl_power_well_ctl_write); + MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write); MMIO_D(0xa210, D_SKL_PLUS); MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write); - MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write); - MMIO_D(0x45504, D_SKL); - MMIO_D(0x45520, D_SKL); - MMIO_D(0x46000, D_SKL); - MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write); - MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write); - MMIO_D(0x6C040, D_SKL); - MMIO_D(0x6C048, D_SKL); - MMIO_D(0x6C050, D_SKL); - MMIO_D(0x6C044, D_SKL); - MMIO_D(0x6C04C, D_SKL); - MMIO_D(0x6C054, D_SKL); - MMIO_D(0x6c058, D_SKL); - MMIO_D(0x6c05c, D_SKL); - MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL); - - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write); - MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write); - - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); - - MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL); - MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL); - MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL); - - MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL); - MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL); - - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL); - - MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL); - MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL); - - MMIO_D(0x70380, D_SKL); - MMIO_D(0x71380, D_SKL); - MMIO_D(0x72380, D_SKL); - MMIO_D(0x7039c, D_SKL); - - MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL); - MMIO_D(0x8f074, D_SKL); - MMIO_D(0x8f004, D_SKL); - MMIO_D(0x8f034, D_SKL); - - MMIO_D(0xb11c, D_SKL); - - MMIO_D(0x51000, D_SKL); - MMIO_D(0x6c00c, D_SKL); - - MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); - MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); - - MMIO_D(0xd08, D_SKL); - MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); - MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write); + MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write); + MMIO_D(0x45504, D_SKL_PLUS); + MMIO_D(0x45520, D_SKL_PLUS); + MMIO_D(0x46000, D_SKL_PLUS); + MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write); + MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write); + MMIO_D(0x6C040, D_SKL | D_KBL); + MMIO_D(0x6C048, D_SKL | D_KBL); + MMIO_D(0x6C050, D_SKL | D_KBL); + MMIO_D(0x6C044, D_SKL | D_KBL); + MMIO_D(0x6C04C, D_SKL | D_KBL); + MMIO_D(0x6C054, D_SKL | D_KBL); + MMIO_D(0x6c058, D_SKL | D_KBL); + MMIO_D(0x6c05c, D_SKL | D_KBL); + MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL); + + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write); + MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write); + + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL); + MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL); + MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL); + + MMIO_D(0x70380, D_SKL_PLUS); + MMIO_D(0x71380, D_SKL_PLUS); + MMIO_D(0x72380, D_SKL_PLUS); + MMIO_D(0x7039c, D_SKL_PLUS); + + MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL); + MMIO_D(0x8f074, D_SKL | D_KBL); + MMIO_D(0x8f004, D_SKL | D_KBL); + MMIO_D(0x8f034, D_SKL | D_KBL); + + MMIO_D(0xb11c, D_SKL | D_KBL); + + MMIO_D(0x51000, D_SKL | D_KBL); + MMIO_D(0x6c00c, D_SKL_PLUS); + + MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL); + + MMIO_D(0xd08, D_SKL_PLUS); + MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL); + MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); /* TRTT */ - MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); - MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); + MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write); + MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write); - MMIO_D(0x45008, D_SKL); + MMIO_D(0x45008, D_SKL | D_KBL); - MMIO_D(0x46430, D_SKL); + MMIO_D(0x46430, D_SKL | D_KBL); - MMIO_D(0x46520, D_SKL); + MMIO_D(0x46520, D_SKL | D_KBL); - MMIO_D(0xc403c, D_SKL); - MMIO_D(0xb004, D_SKL); + MMIO_D(0xc403c, D_SKL | D_KBL); + MMIO_D(0xb004, D_SKL_PLUS); MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write); - MMIO_D(0x65900, D_SKL); - MMIO_D(0x1082c0, D_SKL); - MMIO_D(0x4068, D_SKL); - MMIO_D(0x67054, D_SKL); - MMIO_D(0x6e560, D_SKL); - MMIO_D(0x6e554, D_SKL); - MMIO_D(0x2b20, D_SKL); - MMIO_D(0x65f00, D_SKL); - MMIO_D(0x65f08, D_SKL); - MMIO_D(0x320f0, D_SKL); - - MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(0x70034, D_SKL); - MMIO_D(0x71034, D_SKL); - MMIO_D(0x72034, D_SKL); - - MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL); - MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL); - MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL); - MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); - - MMIO_D(0x44500, D_SKL); + MMIO_D(0x65900, D_SKL_PLUS); + MMIO_D(0x1082c0, D_SKL | D_KBL); + MMIO_D(0x4068, D_SKL | D_KBL); + MMIO_D(0x67054, D_SKL | D_KBL); + MMIO_D(0x6e560, D_SKL | D_KBL); + MMIO_D(0x6e554, D_SKL | D_KBL); + MMIO_D(0x2b20, D_SKL | D_KBL); + MMIO_D(0x65f00, D_SKL | D_KBL); + MMIO_D(0x65f08, D_SKL | D_KBL); + MMIO_D(0x320f0, D_SKL | D_KBL); + + MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); + MMIO_D(0x70034, D_SKL_PLUS); + MMIO_D(0x71034, D_SKL_PLUS); + MMIO_D(0x72034, D_SKL_PLUS); + + MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS); + MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS); + MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS); + MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS); + + MMIO_D(0x44500, D_SKL_PLUS); MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); - MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); + + MMIO_D(0x4ab8, D_KBL); + MMIO_D(0x940c, D_SKL_PLUS); + MMIO_D(0x2248, D_SKL_PLUS | D_KBL); + MMIO_D(0x4ab0, D_SKL | D_KBL); + MMIO_D(0x20d4, D_SKL | D_KBL); + return 0; } @@ -2881,7 +2901,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ret = init_broadwell_mmio_info(gvt); if (ret) goto err; - } else if (IS_SKYLAKE(dev_priv)) { + } else if (IS_SKYLAKE(dev_priv) + || IS_KABYLAKE(dev_priv)) { ret = init_broadwell_mmio_info(gvt); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 92bb247e3478..9d6812f0957f 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -580,7 +580,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); - } else if (IS_SKYLAKE(gvt->dev_priv)) { + } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) { SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); @@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt) gvt_dbg_core("init irq framework\n"); - if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { + if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv)) { irq->ops = &gen8_irq_ops; irq->irq_map = gen8_irq_map; } else { diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d641214578a7..1ae0b4083ce1 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, return 0; return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" - "fence: %d\nresolution: %s\n", + "fence: %d\nresolution: %s\n" + "weight: %d\n", BYTES_TO_MB(type->low_gm_size), BYTES_TO_MB(type->high_gm_size), - type->fence, vgpu_edid_str(type->resolution)); + type->fence, vgpu_edid_str(type->resolution), + type->weight); } static MDEV_TYPE_ATTR_RO(available_instances); @@ -544,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) if (ret) goto undo_group; + intel_gvt_ops->vgpu_activate(vgpu); + atomic_set(&vgpu->vdev.released, 0); return ret; @@ -569,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) return; + intel_gvt_ops->vgpu_deactivate(vgpu); + ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY, &vgpu->vdev.iommu_notifier); WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); @@ -1146,8 +1152,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, return 0; } +static ssize_t +vgpu_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct mdev_device *mdev = mdev_from_dev(dev); + + if (mdev) { + struct intel_vgpu *vgpu = (struct intel_vgpu *) + mdev_get_drvdata(mdev); + return sprintf(buf, "%d\n", vgpu->id); + } + return sprintf(buf, "\n"); +} + +static DEVICE_ATTR_RO(vgpu_id); + +static struct attribute *intel_vgpu_attrs[] = { + &dev_attr_vgpu_id.attr, + NULL +}; + +static const struct attribute_group intel_vgpu_group = { + .name = "intel_vgpu", + .attrs = intel_vgpu_attrs, +}; + +static const struct attribute_group *intel_vgpu_groups[] = { + &intel_vgpu_group, + NULL, +}; + static const struct mdev_parent_ops intel_vgpu_ops = { .supported_type_groups = intel_vgpu_type_groups, + .mdev_attr_groups = intel_vgpu_groups, .create = intel_vgpu_create, .remove = intel_vgpu_remove, @@ -1340,13 +1378,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) { - struct intel_vgpu *vgpu = info->vgpu; - - if (!info) { - gvt_vgpu_err("kvmgt_guest_info invalid\n"); - return false; - } - kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_put_kvm(info->kvm); kvmgt_protect_table_destroy(info); diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index a3a027025cd0..7edd66f38ef9 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h @@ -44,20 +44,21 @@ struct intel_vgpu; #define D_HSW (1 << 2) #define D_BDW (1 << 3) #define D_SKL (1 << 4) +#define D_KBL (1 << 5) -#define D_GEN9PLUS (D_SKL) -#define D_GEN8PLUS (D_BDW | D_SKL) -#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL) -#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL) +#define D_GEN9PLUS (D_SKL | D_KBL) +#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL) +#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL) +#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) -#define D_SKL_PLUS (D_SKL) -#define D_BDW_PLUS (D_BDW | D_SKL) -#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL) -#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL) +#define D_SKL_PLUS (D_SKL | D_KBL) +#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL) +#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL) +#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) #define D_PRE_BDW (D_SNB | D_IVB | D_HSW) #define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW) -#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL) +#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL) struct intel_gvt_mmio_info { u32 offset; diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 0beb83563b08..c6e7972ac21d 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c @@ -44,7 +44,7 @@ struct render_mmio { u32 value; }; -static struct render_mmio gen8_render_mmio_list[] = { +static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x229c), 0xffff, false}, {RCS, _MMIO(0x2248), 0x0, false}, {RCS, _MMIO(0x2098), 0x0, false}, @@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = { {BCS, _MMIO(0x22028), 0x0, false}, }; -static struct render_mmio gen9_render_mmio_list[] = { +static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { {RCS, _MMIO(0x229c), 0xffff, false}, {RCS, _MMIO(0x2248), 0x0, false}, {RCS, _MMIO(0x2098), 0x0, false}, @@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = { {VCS2, _MMIO(0x1c028), 0xffff, false}, {VECS, _MMIO(0x1a028), 0xffff, false}, + + {RCS, _MMIO(0x7304), 0xffff, true}, + {RCS, _MMIO(0x2248), 0x0, false}, + {RCS, _MMIO(0x940c), 0x0, false}, + {RCS, _MMIO(0x4ab8), 0x0, false}, + + {RCS, _MMIO(0x4ab0), 0x0, false}, + {RCS, _MMIO(0x20d4), 0x0, false}, + + {RCS, _MMIO(0xb004), 0x0, false}, + {RCS, _MMIO(0x20a0), 0x0, false}, + {RCS, _MMIO(0x20e4), 0xffff, false}, }; static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; @@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) */ fw = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ | FW_REG_WRITE); - if (ring_id == RCS && IS_SKYLAKE(dev_priv)) + if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) fw |= FORCEWAKE_RENDER; intel_uncore_forcewake_get(dev_priv, fw); @@ -192,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (!IS_SKYLAKE(dev_priv)) - return; - offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { gen9_render_mocs[ring_id][i] = I915_READ(offset); @@ -230,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) return; - if (!IS_SKYLAKE(dev_priv)) - return; - offset.reg = regs[ring_id]; for (i = 0; i < 64; i++) { vgpu_vreg(vgpu, offset) = I915_READ(offset); @@ -265,7 +271,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id) u32 inhibit_mask = _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { + if (IS_SKYLAKE(vgpu->gvt->dev_priv) + || IS_KABYLAKE(vgpu->gvt->dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); load_mocs(vgpu, ring_id); @@ -312,7 +319,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id) u32 v; int i, array_size; - if (IS_SKYLAKE(dev_priv)) { + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { mmio = gen9_render_mmio_list; array_size = ARRAY_SIZE(gen9_render_mmio_list); restore_mocs(vgpu, ring_id); diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 34b9acdf3479..79ba4b3440aa 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -47,19 +47,92 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu) return false; } +struct vgpu_sched_data { + struct list_head lru_list; + struct intel_vgpu *vgpu; + + ktime_t sched_in_time; + ktime_t sched_out_time; + ktime_t sched_time; + ktime_t left_ts; + ktime_t allocated_ts; + + struct vgpu_sched_ctl sched_ctl; +}; + +struct gvt_sched_data { + struct intel_gvt *gvt; + struct hrtimer timer; + unsigned long period; + struct list_head lru_runq_head; +}; + +static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu) +{ + ktime_t delta_ts; + struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data; + + delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time; + + vgpu_data->sched_time += delta_ts; + vgpu_data->left_ts -= delta_ts; +} + +#define GVT_TS_BALANCE_PERIOD_MS 100 +#define GVT_TS_BALANCE_STAGE_NUM 10 + +static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) +{ + struct vgpu_sched_data *vgpu_data; + struct list_head *pos; + static uint64_t stage_check; + int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; + + /* The timeslice accumulation reset at stage 0, which is + * allocated again without adding previous debt. + */ + if (stage == 0) { + int total_weight = 0; + ktime_t fair_timeslice; + + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + total_weight += vgpu_data->sched_ctl.weight; + } + + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * + vgpu_data->sched_ctl.weight / + total_weight; + + vgpu_data->allocated_ts = fair_timeslice; + vgpu_data->left_ts = vgpu_data->allocated_ts; + } + } else { + list_for_each(pos, &sched_data->lru_runq_head) { + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); + + /* timeslice for next 100ms should add the left/debt + * slice of previous stages. + */ + vgpu_data->left_ts += vgpu_data->allocated_ts; + } + } +} + static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; enum intel_engine_id i; struct intel_engine_cs *engine; + struct vgpu_sched_data *vgpu_data; + ktime_t cur_time; /* no target to schedule */ if (!scheduler->next_vgpu) return; - gvt_dbg_sched("try to schedule next vgpu %d\n", - scheduler->next_vgpu->id); - /* * after the flag is set, workload dispatch thread will * stop dispatching workload for current vgpu @@ -68,14 +141,18 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) /* still have uncompleted workload? */ for_each_engine(engine, gvt->dev_priv, i) { - if (scheduler->current_workload[i]) { - gvt_dbg_sched("still have running workload\n"); + if (scheduler->current_workload[i]) return; - } } - gvt_dbg_sched("switch to next vgpu %d\n", - scheduler->next_vgpu->id); + cur_time = ktime_get(); + if (scheduler->current_vgpu) { + vgpu_data = scheduler->current_vgpu->sched_data; + vgpu_data->sched_out_time = cur_time; + vgpu_update_timeslice(scheduler->current_vgpu); + } + vgpu_data = scheduler->next_vgpu->sched_data; + vgpu_data->sched_in_time = cur_time; /* switch current vgpu */ scheduler->current_vgpu = scheduler->next_vgpu; @@ -88,97 +165,106 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt) wake_up(&scheduler->waitq[i]); } -struct tbs_vgpu_data { - struct list_head list; - struct intel_vgpu *vgpu; - /* put some per-vgpu sched stats here */ -}; - -struct tbs_sched_data { - struct intel_gvt *gvt; - struct delayed_work work; - unsigned long period; - struct list_head runq_head; -}; - -#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) - -static void tbs_sched_func(struct work_struct *work) +static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data) { - struct tbs_sched_data *sched_data = container_of(work, - struct tbs_sched_data, work.work); - struct tbs_vgpu_data *vgpu_data; - - struct intel_gvt *gvt = sched_data->gvt; - struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - + struct vgpu_sched_data *vgpu_data; struct intel_vgpu *vgpu = NULL; - struct list_head *pos, *head; - - mutex_lock(&gvt->lock); - - /* no vgpu or has already had a target */ - if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu) - goto out; - - if (scheduler->current_vgpu) { - vgpu_data = scheduler->current_vgpu->sched_data; - head = &vgpu_data->list; - } else { - head = &sched_data->runq_head; - } + struct list_head *head = &sched_data->lru_runq_head; + struct list_head *pos; /* search a vgpu with pending workload */ list_for_each(pos, head) { - if (pos == &sched_data->runq_head) - continue; - vgpu_data = container_of(pos, struct tbs_vgpu_data, list); + vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); if (!vgpu_has_pending_workload(vgpu_data->vgpu)) continue; - vgpu = vgpu_data->vgpu; - break; + /* Return the vGPU only if it has time slice left */ + if (vgpu_data->left_ts > 0) { + vgpu = vgpu_data->vgpu; + break; + } } + return vgpu; +} + +/* in nanosecond */ +#define GVT_DEFAULT_TIME_SLICE 1000000 + +static void tbs_sched_func(struct gvt_sched_data *sched_data) +{ + struct intel_gvt *gvt = sched_data->gvt; + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; + struct vgpu_sched_data *vgpu_data; + struct intel_vgpu *vgpu = NULL; + static uint64_t timer_check; + + if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS)) + gvt_balance_timeslice(sched_data); + + /* no active vgpu or has already had a target */ + if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu) + goto out; + + vgpu = find_busy_vgpu(sched_data); if (vgpu) { scheduler->next_vgpu = vgpu; - gvt_dbg_sched("pick next vgpu %d\n", vgpu->id); + + /* Move the last used vGPU to the tail of lru_list */ + vgpu_data = vgpu->sched_data; + list_del_init(&vgpu_data->lru_list); + list_add_tail(&vgpu_data->lru_list, + &sched_data->lru_runq_head); + } else { + scheduler->next_vgpu = gvt->idle_vgpu; } out: - if (scheduler->next_vgpu) { - gvt_dbg_sched("try to schedule next vgpu %d\n", - scheduler->next_vgpu->id); + if (scheduler->next_vgpu) try_to_schedule_next_vgpu(gvt); - } +} - /* - * still have vgpu on runq - * or last schedule haven't finished due to running workload - */ - if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu) - schedule_delayed_work(&sched_data->work, sched_data->period); +void intel_gvt_schedule(struct intel_gvt *gvt) +{ + struct gvt_sched_data *sched_data = gvt->scheduler.sched_data; + mutex_lock(&gvt->lock); + tbs_sched_func(sched_data); mutex_unlock(&gvt->lock); } +static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data) +{ + struct gvt_sched_data *data; + + data = container_of(timer_data, struct gvt_sched_data, timer); + + intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED); + + hrtimer_add_expires_ns(&data->timer, data->period); + + return HRTIMER_RESTART; +} + static int tbs_sched_init(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data; + struct gvt_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - INIT_LIST_HEAD(&data->runq_head); - INIT_DELAYED_WORK(&data->work, tbs_sched_func); + INIT_LIST_HEAD(&data->lru_runq_head); + hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + data->timer.function = tbs_timer_fn; data->period = GVT_DEFAULT_TIME_SLICE; data->gvt = gvt; scheduler->sched_data = data; + return 0; } @@ -186,25 +272,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt) { struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; - struct tbs_sched_data *data = scheduler->sched_data; + struct gvt_sched_data *data = scheduler->sched_data; + + hrtimer_cancel(&data->timer); - cancel_delayed_work(&data->work); kfree(data); scheduler->sched_data = NULL; } static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *data; + struct vgpu_sched_data *data; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; + data->sched_ctl.weight = vgpu->sched_ctl.weight; data->vgpu = vgpu; - INIT_LIST_HEAD(&data->list); + INIT_LIST_HEAD(&data->lru_list); vgpu->sched_data = data; + return 0; } @@ -216,21 +305,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) { - struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - if (!list_empty(&vgpu_data->list)) + if (!list_empty(&vgpu_data->lru_list)) return; - list_add_tail(&vgpu_data->list, &sched_data->runq_head); - schedule_delayed_work(&sched_data->work, 0); + list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head); + + if (!hrtimer_active(&sched_data->timer)) + hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(), + sched_data->period), HRTIMER_MODE_ABS); } static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) { - struct tbs_vgpu_data *vgpu_data = vgpu->sched_data; + struct vgpu_sched_data *vgpu_data = vgpu->sched_data; - list_del_init(&vgpu_data->list); + list_del_init(&vgpu_data->lru_list); } static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h index bb8b9097e41a..ba00a5f7455f 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.h +++ b/drivers/gpu/drm/i915/gvt/sched_policy.h @@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops { void (*stop_schedule)(struct intel_vgpu *vgpu); }; +void intel_gvt_schedule(struct intel_gvt *gvt); + int intel_gvt_init_sched_policy(struct intel_gvt *gvt); void intel_gvt_clean_sched_policy(struct intel_gvt *gvt); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d9a735310e75..bada32b33237 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -279,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload( goto out; } - if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) { - gvt_dbg_sched("ring id %d stop - no available workload\n", - ring_id); + if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) goto out; - } /* * still have current workload, maybe the workload disptacher @@ -453,7 +450,8 @@ static int workload_thread(void *priv) struct intel_vgpu_workload *workload = NULL; struct intel_vgpu *vgpu = NULL; int ret; - bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); + bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) + || IS_KABYLAKE(gvt->dev_priv); DEFINE_WAIT_FUNC(wait, woken_wake_function); kfree(p); diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 2833dfa8c9ae..2cd725c0573e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -67,7 +67,6 @@ struct shadow_per_ctx { }; struct intel_shadow_wa_ctx { - struct intel_vgpu_workload *workload; struct shadow_indirect_ctx indirect_ctx; struct shadow_per_ctx per_ctx; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 41cfa5ccae84..6e3cbd8caec2 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); } +#define VGPU_MAX_WEIGHT 16 +#define VGPU_WEIGHT(vgpu_num) \ + (VGPU_MAX_WEIGHT / (vgpu_num)) + static struct { unsigned int low_mm; unsigned int high_mm; unsigned int fence; + + /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU + * with a weight of 4 on a contended host, different vGPU type has + * different weight set. Legal weights range from 1 to 16. + */ + unsigned int weight; enum intel_vgpu_edid edid; char *name; } vgpu_types[] = { /* Fixed vGPU type table */ - { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, - { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, - { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, - { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, + { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, + { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" }, + { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" }, + { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" }, }; /** @@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) gvt->types[i].low_gm_size = vgpu_types[i].low_mm; gvt->types[i].high_gm_size = vgpu_types[i].high_mm; gvt->types[i].fence = vgpu_types[i].fence; + + if (vgpu_types[i].weight < 1 || + vgpu_types[i].weight > VGPU_MAX_WEIGHT) + return -EINVAL; + + gvt->types[i].weight = vgpu_types[i].weight; gvt->types[i].resolution = vgpu_types[i].edid; gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, high_avail / vgpu_types[i].high_mm); @@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt) sprintf(gvt->types[i].name, "GVTg_V5_%s", vgpu_types[i].name); - gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", + gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n", i, gvt->types[i].name, gvt->types[i].avail_instance, gvt->types[i].low_gm_size, gvt->types[i].high_gm_size, gvt->types[i].fence, + gvt->types[i].weight, vgpu_edid_str(gvt->types[i].resolution)); } @@ -179,20 +196,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt) } /** - * intel_gvt_destroy_vgpu - destroy a virtual GPU + * intel_gvt_active_vgpu - activate a virtual GPU * @vgpu: virtual GPU * - * This function is called when user wants to destroy a virtual GPU. + * This function is called when user wants to activate a virtual GPU. * */ -void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu) +{ + mutex_lock(&vgpu->gvt->lock); + vgpu->active = true; + mutex_unlock(&vgpu->gvt->lock); +} + +/** + * intel_gvt_deactive_vgpu - deactivate a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to deactivate a virtual GPU. + * All virtual GPU runtime information will be destroyed. + * + */ +void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; mutex_lock(&gvt->lock); vgpu->active = false; - idr_remove(&gvt->vgpu_idr, vgpu->id); if (atomic_read(&vgpu->running_workload_num)) { mutex_unlock(&gvt->lock); @@ -201,6 +232,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) } intel_vgpu_stop_schedule(vgpu); + + mutex_unlock(&gvt->lock); +} + +/** + * intel_gvt_destroy_vgpu - destroy a virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to destroy a virtual GPU. + * + */ +void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) +{ + struct intel_gvt *gvt = vgpu->gvt; + + mutex_lock(&gvt->lock); + + WARN(vgpu->active, "vGPU is still active!\n"); + + idr_remove(&gvt->vgpu_idr, vgpu->id); intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_gvt_context(vgpu); intel_vgpu_clean_execlist(vgpu); @@ -216,6 +267,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) mutex_unlock(&gvt->lock); } +#define IDLE_VGPU_IDR 0 + +/** + * intel_gvt_create_idle_vgpu - create an idle virtual GPU + * @gvt: GVT device + * + * This function is called when user wants to create an idle virtual GPU. + * + * Returns: + * pointer to intel_vgpu, error pointer if failed. + */ +struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt) +{ + struct intel_vgpu *vgpu; + enum intel_engine_id i; + int ret; + + vgpu = vzalloc(sizeof(*vgpu)); + if (!vgpu) + return ERR_PTR(-ENOMEM); + + vgpu->id = IDLE_VGPU_IDR; + vgpu->gvt = gvt; + + for (i = 0; i < I915_NUM_ENGINES; i++) + INIT_LIST_HEAD(&vgpu->workload_q_head[i]); + + ret = intel_vgpu_init_sched_policy(vgpu); + if (ret) + goto out_free_vgpu; + + vgpu->active = false; + + return vgpu; + +out_free_vgpu: + vfree(vgpu); + return ERR_PTR(ret); +} + +/** + * intel_gvt_destroy_vgpu - destroy an idle virtual GPU + * @vgpu: virtual GPU + * + * This function is called when user wants to destroy an idle virtual GPU. + * + */ +void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) +{ + intel_vgpu_clean_sched_policy(vgpu); + vfree(vgpu); +} + static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params *param) { @@ -232,13 +336,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, mutex_lock(&gvt->lock); - ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL); + ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU, + GFP_KERNEL); if (ret < 0) goto out_free_vgpu; vgpu->id = ret; vgpu->handle = param->handle; vgpu->gvt = gvt; + vgpu->sched_ctl.weight = param->weight; bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); intel_vgpu_init_cfg_space(vgpu, param->primary); @@ -277,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_shadow_ctx; - vgpu->active = true; mutex_unlock(&gvt->lock); return vgpu; @@ -325,6 +430,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, param.low_gm_sz = type->low_gm_size; param.high_gm_sz = type->high_gm_size; param.fence_sz = type->fence; + param.weight = type->weight; param.resolution = type->resolution; /* XXX current param based on MB */ |