diff options
Diffstat (limited to 'drivers')
86 files changed, 737 insertions, 266 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index c7fd0c47b254..1102de76d876 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -195,19 +195,32 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t sdma_engine_reg_base[2] = { - SOC15_REG_OFFSET(SDMA0, 0, - mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, - SOC15_REG_OFFSET(SDMA1, 0, - mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL - }; - uint32_t retval = sdma_engine_reg_base[engine_id] + uint32_t sdma_engine_reg_base = 0; + uint32_t sdma_rlc_reg_offset; + + switch (engine_id) { + default: + dev_warn(adev->dev, + "Invalid sdma engine id (%d), using engine id 0\n", + engine_id); + fallthrough; + case 0: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0, + mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + case 1: + sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0, + mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL; + break; + } + + sdma_rlc_reg_offset = sdma_engine_reg_base + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, - queue_id, retval); + queue_id, sdma_rlc_reg_offset); - return retval; + return sdma_rlc_reg_offset; } static inline struct v9_mqd *get_mqd(void *mqd) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index bcce4c0be462..1bedb416eebd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1243,7 +1243,6 @@ void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev, if (!obj || !obj->ent) return; - debugfs_remove(obj->ent); obj->ent = NULL; put_obj(obj); } @@ -1257,7 +1256,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_remove(adev, &obj->head); } - debugfs_remove_recursive(con->dir); con->dir = NULL; } /* debugfs end */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 134cc36e30c5..0739e259bf91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -462,7 +462,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev, unsigned int pages; int i, r; - *sgt = kmalloc(sizeof(*sg), GFP_KERNEL); + *sgt = kmalloc(sizeof(**sgt), GFP_KERNEL); if (!*sgt) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index cb9d60a4e05e..b95f22262a90 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -691,6 +691,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_UTCL1_CNTL1, 0x30000000, 0x30000000) }; static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c index fa0bca3e1f73..5d2505956f84 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c @@ -135,6 +135,12 @@ static void gfxhub_v2_1_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -190,6 +196,12 @@ static void gfxhub_v2_1_enable_system_domain(struct amdgpu_device *adev) static void gfxhub_v2_1_disable_identity_aperture(struct amdgpu_device *adev) { + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); WREG32_SOC15(GC, 0, mmGCVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, @@ -326,6 +338,13 @@ void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 757fa8e83f5b..c79fc54bc3c4 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -134,6 +134,12 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev) { uint32_t tmp; + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + /* Setup L2 cache */ tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); @@ -189,6 +195,12 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev) { + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, 0xFFFFFFFF); @@ -318,6 +330,13 @@ void mmhub_v2_0_gart_disable(struct amdgpu_device *adev) void mmhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { u32 tmp; + + /* These registers are not accessible to VF-SRIOV. + * The PF will program them instead. + */ + if (amdgpu_sriov_vf(adev)) + return; + tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_L2_PROTECTION_FAULT_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index d488d250805d..e16874f30d5d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -179,12 +179,11 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) } break; case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: err = psp_init_ta_microcode(&adev->psp, chip_name); if (err) return err; break; - case CHIP_NAVY_FLOUNDER: - break; default: BUG(); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e4b33c67b634..df9338257ae0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2196,6 +2196,7 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); + drm_add_edid_modes(connector, aconnector->edid); if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 078b7e344185..2d5c7daaee23 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1108,6 +1108,18 @@ static enum bp_result bios_parser_enable_disp_power_gating( action); } +static enum bp_result bios_parser_enable_lvtma_control( + struct dc_bios *dcb, + uint8_t uc_pwr_on) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + + if (!bp->cmd_tbl.enable_lvtma_control) + return BP_RESULT_FAILURE; + + return bp->cmd_tbl.enable_lvtma_control(bp, uc_pwr_on); +} + static bool bios_parser_is_accelerated_mode( struct dc_bios *dcb) { @@ -2208,7 +2220,9 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, .pack_data_tables = bios_parser_pack_data_tables, - .get_atom_dc_golden_table = bios_get_atom_dc_golden_table + .get_atom_dc_golden_table = bios_get_atom_dc_golden_table, + + .enable_lvtma_control = bios_parser_enable_lvtma_control }; static bool bios_parser2_construct( diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index bed91572f82a..eb3ae5c3677c 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -904,6 +904,33 @@ static unsigned int get_smu_clock_info_v3_1(struct bios_parser *bp, uint8_t id) return 0; } +/****************************************************************************** + ****************************************************************************** + ** + ** LVTMA CONTROL + ** + ****************************************************************************** + *****************************************************************************/ + +static enum bp_result enable_lvtma_control( + struct bios_parser *bp, + uint8_t uc_pwr_on); + +static void init_enable_lvtma_control(struct bios_parser *bp) +{ + /* TODO add switch for table vrsion */ + bp->cmd_tbl.enable_lvtma_control = enable_lvtma_control; + +} + +static enum bp_result enable_lvtma_control( + struct bios_parser *bp, + uint8_t uc_pwr_on) +{ + enum bp_result result = BP_RESULT_FAILURE; + return result; +} + void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) { init_dig_encoder_control(bp); @@ -919,4 +946,5 @@ void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp) init_set_dce_clock(bp); init_get_smu_clock_info(bp); + init_enable_lvtma_control(bp); } diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h index 7a2af24dfe60..7bdce013cde5 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.h +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.h @@ -94,7 +94,8 @@ struct cmd_tbl { struct bp_set_dce_clock_parameters *bp_params); unsigned int (*get_smu_clock_info)( struct bios_parser *bp, uint8_t id); - + enum bp_result (*enable_lvtma_control)(struct bios_parser *bp, + uint8_t uc_pwr_on); }; void dal_firmware_parser_init_cmd_tbl(struct bios_parser *bp); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 4bd6e03a7ef3..117d8aaf2a9b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3286,12 +3286,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) core_link_set_avmute(pipe_ctx, true); } + dc->hwss.blank_stream(pipe_ctx); #if defined(CONFIG_DRM_AMD_DC_HDCP) update_psp_stream_config(pipe_ctx, true); #endif - dc->hwss.blank_stream(pipe_ctx); - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index d06d07042a12..0811f941f430 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -136,6 +136,10 @@ struct dc_vbios_funcs { enum bp_result (*get_atom_dc_golden_table)( struct dc_bios *dcb); + + enum bp_result (*enable_lvtma_control)( + struct dc_bios *bios, + uint8_t uc_pwr_on); }; struct bios_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h index 70ec691e14d2..99c68ca9c7e0 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h @@ -49,7 +49,7 @@ #define DCN_PANEL_CNTL_REG_LIST()\ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \ - DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ + DCN_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \ SR(BL_PWM_CNTL), \ SR(BL_PWM_CNTL2), \ SR(BL_PWM_PERIOD_CNTL), \ diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 49380ed3aeae..45c9e9027886 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -842,6 +842,17 @@ void dce110_edp_power_control( cntl.coherent = false; cntl.lanes_number = LANE_COUNT_FOUR; cntl.hpd_sel = link->link_enc->hpd_source; + + if (ctx->dc->ctx->dmub_srv && + ctx->dc->debug.dmub_command_table) { + if (cntl.action == TRANSMITTER_CONTROL_POWER_ON) + bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_POWER_ON); + else + bp_result = ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_POWER_OFF); + } + bp_result = link_transmitter_control(ctx->dc_bios, &cntl); if (!power_up) @@ -919,8 +930,21 @@ void dce110_edp_backlight_control( /*edp 1.2*/ if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) edp_receiver_ready_T7(link); + + if (ctx->dc->ctx->dmub_srv && + ctx->dc->debug.dmub_command_table) { + if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON) + ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_LCD_BLON); + else + ctx->dc_bios->funcs->enable_lvtma_control(ctx->dc_bios, + LVTMA_CONTROL_LCD_BLOFF); + } + link_transmitter_control(ctx->dc_bios, &cntl); + + if (enable && link->dpcd_sink_ext_caps.bits.oled) msleep(OLED_POST_T7_DELAY); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 07b2f9399671..842abb4c475b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -121,35 +121,35 @@ void enc1_update_generic_info_packet( switch (packet_index) { case 0: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC0_FRAME_UPDATE, 1); + AFMT_GENERIC0_IMMEDIATE_UPDATE, 1); break; case 1: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC1_FRAME_UPDATE, 1); + AFMT_GENERIC1_IMMEDIATE_UPDATE, 1); break; case 2: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC2_FRAME_UPDATE, 1); + AFMT_GENERIC2_IMMEDIATE_UPDATE, 1); break; case 3: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC3_FRAME_UPDATE, 1); + AFMT_GENERIC3_IMMEDIATE_UPDATE, 1); break; case 4: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC4_FRAME_UPDATE, 1); + AFMT_GENERIC4_IMMEDIATE_UPDATE, 1); break; case 5: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC5_FRAME_UPDATE, 1); + AFMT_GENERIC5_IMMEDIATE_UPDATE, 1); break; case 6: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC6_FRAME_UPDATE, 1); + AFMT_GENERIC6_IMMEDIATE_UPDATE, 1); break; case 7: REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, - AFMT_GENERIC7_FRAME_UPDATE, 1); + AFMT_GENERIC7_IMMEDIATE_UPDATE, 1); break; default: break; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index ed385b1477be..30eae7459d50 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -281,7 +281,14 @@ struct dcn10_stream_enc_registers { SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\ + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\ SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\ @@ -345,7 +352,14 @@ struct dcn10_stream_enc_registers { type AFMT_GENERIC2_FRAME_UPDATE;\ type AFMT_GENERIC3_FRAME_UPDATE;\ type AFMT_GENERIC4_FRAME_UPDATE;\ + type AFMT_GENERIC0_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC1_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC2_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC3_IMMEDIATE_UPDATE;\ type AFMT_GENERIC4_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC5_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC6_IMMEDIATE_UPDATE;\ + type AFMT_GENERIC7_IMMEDIATE_UPDATE;\ type AFMT_GENERIC5_FRAME_UPDATE;\ type AFMT_GENERIC6_FRAME_UPDATE;\ type AFMT_GENERIC7_FRAME_UPDATE;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 66180b4332f1..c8cfd3ba1c15 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1457,8 +1457,8 @@ static void dcn20_update_dchubp_dpp( /* Any updates are handled in dc interface, just need to apply existing for plane enable */ if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || - pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) - && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + pipe_ctx->update_flags.bits.scaler || viewport_changed == true) && + pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { dc->hwss.set_cursor_position(pipe_ctx); dc->hwss.set_cursor_attribute(pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index bf0044f7417e..dcbf28dd72d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -167,7 +167,9 @@ LE_SF(DCIO_SOFT_RESET, UNIPHYB_SOFT_RESET, mask_sh),\ LE_SF(DCIO_SOFT_RESET, UNIPHYC_SOFT_RESET, mask_sh),\ LE_SF(DCIO_SOFT_RESET, UNIPHYD_SOFT_RESET, mask_sh),\ - LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh) + LE_SF(DCIO_SOFT_RESET, UNIPHYE_SOFT_RESET, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) #define LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)\ LINK_ENCODER_MASK_SH_LIST_DCN10(mask_sh),\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 790baf552695..9140b3fc767a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3141,7 +3141,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co int vlevel = 0; int pipe_split_from[MAX_PIPES]; int pipe_cnt = 0; - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL); + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC); DC_LOGGER_INIT(dc->ctx->logger); BW_VAL_TRACE_COUNT(); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h index 8e9fd59ccde8..2fbf879cd327 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h @@ -61,7 +61,10 @@ DPCS_DCN2_MASK_SH_LIST(mask_sh),\ LE_SF(DPCSTX0_DPCSTX_TX_CNTL, DPCS_TX_DATA_ORDER_INVERT_18_BIT, mask_sh),\ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL0, RDPCS_PHY_TX_VBOOST_LVL, mask_sh),\ - LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh) + LE_SF(RDPCSTX0_RDPCSTX_CLOCK_CNTL, RDPCS_TX_CLK_EN, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, mask_sh),\ + LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh) + void dcn30_link_encoder_construct( struct dcn20_link_encoder *enc20, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 653a571e366d..ebe0cc5b833b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -491,6 +491,7 @@ static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { [id] = {\ LE_DCN3_REG_LIST(id), \ UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ } static const struct dce110_aux_registers_shift aux_shift = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c index b54814f11b74..2beb284f89b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c @@ -63,6 +63,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN30_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3984,6 +3985,9 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l } else if (v->PlaneRequiredDISPCLKWithoutODMCombine > v->MaxDispclkRoundedDownToDFSGranularity) { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1; + } else if (v->DSCEnabled[k] && (v->HActive[k] > DCN30_MAX_DSC_IMAGE_WIDTH)) { + v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_2to1; + v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithODMCombine2To1; } else { v->ODMCombineEnablePerState[i][k] = dm_odm_combine_mode_disabled; v->PlaneRequiredDISPCLK = v->PlaneRequiredDISPCLKWithoutODMCombine; diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index c30437ae8395..21011edea337 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -101,6 +101,13 @@ enum bp_pipe_control_action { ASIC_PIPE_INIT }; +enum bp_lvtma_control_action { + LVTMA_CONTROL_LCD_BLOFF = 2, + LVTMA_CONTROL_LCD_BLON = 3, + LVTMA_CONTROL_POWER_ON = 12, + LVTMA_CONTROL_POWER_OFF = 13 +}; + struct bp_encoder_control { enum bp_encoder_control_action action; enum engine_id engine_id; diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h index 89ef9f6860e5..16df2a485dd0 100644 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg); */ static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2) { + if (arg1.value == 0) + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero; + return dc_fixpt_exp( dc_fixpt_mul( dc_fixpt_log(arg1), diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 81820f3d6b3b..d988533d4af5 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -324,22 +324,44 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. + * - Delta for CEIL: delta_from_mid_point_in_us_1 + * - Delta for FLOOR: delta_from_mid_point_in_us_2 */ - if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && - (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || - mid_point_frames_floor < 2)) { + if ((last_render_time_in_us / mid_point_frames_ceil) < in_out_vrr->min_duration_in_us) { + /* Check for out of range. + * If using CEIL produces a value that is out of range, + * then we are forced to use FLOOR. + */ + frames_to_insert = mid_point_frames_floor; + } else if (mid_point_frames_floor < 2) { + /* Check if FLOOR would result in non-LFC. In this case + * choose to use CEIL + */ + frames_to_insert = mid_point_frames_ceil; + } else if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + /* If choosing CEIL results in a frame duration that is + * closer to the mid point of the range. + * Choose CEIL + */ frames_to_insert = mid_point_frames_ceil; - delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - - delta_from_mid_point_in_us_1; } else { + /* If choosing FLOOR results in a frame duration that is + * closer to the mid point of the range. + * Choose FLOOR + */ frames_to_insert = mid_point_frames_floor; - delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 - - delta_from_mid_point_in_us_2; } /* Prefer current frame multiplier when BTR is enabled unless it drifts * too far from the midpoint */ + if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - + delta_from_mid_point_in_us_1; + } else { + delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 - + delta_from_mid_point_in_us_2; + } if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 6c991de8f371..fb962b9ceffb 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -2204,14 +2204,17 @@ static const struct throttling_logging_label { }; static void arcturus_log_thermal_throttling_event(struct smu_context *smu) { + int ret; int throttler_idx, throtting_events = 0, buf_idx = 0; struct amdgpu_device *adev = smu->adev; uint32_t throttler_status; char log_buf[256]; - arcturus_get_smu_metrics_data(smu, - METRICS_THROTTLER_STATUS, - &throttler_status); + ret = arcturus_get_smu_metrics_data(smu, + METRICS_THROTTLER_STATUS, + &throttler_status); + if (ret) + return; memset(log_buf, 0, sizeof(log_buf)); for (throttler_idx = 0; throttler_idx < ARRAY_SIZE(logging_label); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 3b8839641770..ea70d736f6a8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -979,10 +979,7 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) { struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); - uint64_t features_enabled; - int i; - bool enabled; - int ret = 0; + int i, ret = 0; PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, @@ -990,17 +987,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr) "[DisableAllSMUFeatures] Failed to disable all smu features!", return ret); - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); - PP_ASSERT_WITH_CODE(!ret, - "[DisableAllSMUFeatures] Failed to get enabled smc features!", - return ret); - - for (i = 0; i < GNLD_FEATURES_MAX; i++) { - enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? - true : false; - data->smu_features[i].enabled = enabled; - data->smu_features[i].supported = enabled; - } + for (i = 0; i < GNLD_FEATURES_MAX; i++) + data->smu_features[i].enabled = 0; return 0; } @@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr) data->uvd_power_gated = true; data->vce_power_gated = true; - - if (data->smu_features[GNLD_DPM_UVD].enabled) - data->uvd_power_gated = false; - - if (data->smu_features[GNLD_DPM_VCE].enabled) - data->vce_power_gated = false; } static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr) @@ -3230,10 +3212,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf) static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks) { - uint64_t features_enabled; - uint64_t features_to_enable; - uint64_t features_to_disable; - int ret = 0; + struct vega20_hwmgr *data = + (struct vega20_hwmgr *)(hwmgr->backend); + uint64_t features_enabled, features_to_enable, features_to_disable; + int i, ret = 0; + bool enabled; if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX)) return -EINVAL; @@ -3262,6 +3245,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe return ret; } + /* Update the cached feature enablement state */ + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled); + if (ret) + return ret; + + for (i = 0; i < GNLD_FEATURES_MAX; i++) { + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ? + true : false; + data->smu_features[i].enabled = enabled; + } + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 729ec6e0d43a..b2ec3a5141cc 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -14930,7 +14930,7 @@ static int intel_atomic_check(struct drm_device *dev, if (any_ms && !check_digital_port_conflicts(state)) { drm_dbg_kms(&dev_priv->drm, "rejecting conflicting digital port configuration\n"); - ret = EINVAL; + ret = -EINVAL; goto fail; } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 3644752cc5ec..5a5cfe25085b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -2044,9 +2044,12 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability); static int i915_lpsp_capability_show(struct seq_file *m, void *data) { struct drm_connector *connector = m->private; - struct intel_encoder *encoder = - intel_attached_encoder(to_intel_connector(connector)); struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_encoder *encoder; + + encoder = intel_attached_encoder(to_intel_connector(connector)); + if (!encoder) + return -ENODEV; if (connector->status != connector_status_connected) return -ENODEV; diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 0c713e83274d..e0fcb89c736b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -4147,6 +4147,12 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { + .name = "TC cold off", + .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, + .ops = &tgl_tc_cold_off_ops, + .id = DISP_PW_ID_NONE, + }, + { .name = "AUX A", .domains = TGL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_aux_power_well_ops, @@ -4332,12 +4338,6 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .hsw.irq_pipe_mask = BIT(PIPE_D), }, }, - { - .name = "TC cold off", - .domains = TGL_TC_COLD_OFF_POWER_DOMAINS, - .ops = &tgl_tc_cold_off_ops, - .id = DISP_PW_ID_NONE, - }, }; static const struct i915_power_well_desc rkl_power_wells[] = { @@ -5240,10 +5240,10 @@ struct buddy_page_mask { }; static const struct buddy_page_mask tgl_buddy_page_masks[] = { - { .num_channels = 1, .type = INTEL_DRAM_LPDDR4, .page_mask = 0xE }, { .num_channels = 1, .type = INTEL_DRAM_DDR4, .page_mask = 0xF }, { .num_channels = 2, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x1C }, { .num_channels = 2, .type = INTEL_DRAM_DDR4, .page_mask = 0x1F }, + { .num_channels = 4, .type = INTEL_DRAM_LPDDR4, .page_mask = 0x38 }, {} }; diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c index 072725a448db..ad86c5eb5bba 100644 --- a/drivers/gpu/drm/i915/gvt/cfg_space.c +++ b/drivers/gpu/drm/i915/gvt/cfg_space.c @@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, { u8 *cfg_base = vgpu_cfg_space(vgpu); u8 mask, new, old; + pci_power_t pwr; int i = 0; for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { @@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, /* For other configuration space directly copy as it is. */ if (i < bytes) memcpy(cfg_base + off + i, src + i, bytes - i); + + if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) { + pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off]) + & PCI_PM_CTRL_STATE_MASK); + if (pwr == PCI_D3hot) + vgpu->d3_entered = true; + gvt_dbg_core("vgpu-%d power status changed to %d\n", + vgpu->id, pwr); + } } /** @@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; u16 *gmch_ctl; + u8 next; memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, info->cfg_space_size); @@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, pci_resource_len(gvt->gt->i915->drm.pdev, 2); memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4); + + /* PM Support */ + vgpu->cfg_space.pmcsr_off = 0; + if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) { + next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST]; + do { + if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) { + vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL; + break; + } + next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT]; + } while (next); + } } /** diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 210016192ce7..a3a4305eda01 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return create_scratch_page_tree(vgpu); } -static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) +void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu) { struct list_head *pos, *n; struct intel_vgpu_mm *mm; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 320b8d6ad92f..52d0d88abd86 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); + #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index a4a6db6b7f90..ff7f2515a6fe 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar { struct intel_vgpu_cfg_space { unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE]; struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; + u32 pmcsr_off; }; #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) @@ -198,6 +199,8 @@ struct intel_vgpu { struct intel_vgpu_submission submission; struct radix_tree_root page_track_tree; u32 hws_pga[I915_NUM_ENGINES]; + /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */ + bool d3_entered; struct dentry *debugfs; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 7d361623ff67..8fa9b31a2484 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu) intel_gvt_deactivate_vgpu(vgpu); mutex_lock(&vgpu->vgpu_lock); + vgpu->d3_entered = false; intel_vgpu_clean_workloads(vgpu, ALL_ENGINES); intel_vgpu_dmabuf_cleanup(vgpu); mutex_unlock(&vgpu->vgpu_lock); @@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); idr_init(&vgpu->object_idr); intel_vgpu_init_cfg_space(vgpu, param->primary); + vgpu->d3_entered = false; ret = intel_vgpu_init_mmio(vgpu); if (ret) @@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, /* full GPU reset or device model level reset */ if (engine_mask == ALL_ENGINES || dmlr) { intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0); - intel_vgpu_invalidate_ppgtt(vgpu); + if (engine_mask == ALL_ENGINES) + intel_vgpu_invalidate_ppgtt(vgpu); /*fence will not be reset during virtual reset */ if (dmlr) { - intel_vgpu_reset_gtt(vgpu); + if(!vgpu->d3_entered) { + intel_vgpu_invalidate_ppgtt(vgpu); + intel_vgpu_destroy_all_ppgtt_mm(vgpu); + } + intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_reset_resource(vgpu); } @@ -572,7 +579,14 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, intel_vgpu_reset_cfg_space(vgpu); /* only reset the failsafe mode when dmlr reset */ vgpu->failsafe = false; - vgpu->pv_notified = false; + /* + * PCI_D0 is set before dmlr, so reset d3_entered here + * after done using. + */ + if(vgpu->d3_entered) + vgpu->d3_entered = false; + else + vgpu->pv_notified = false; } } diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 28bc5f13ae52..056994224c6b 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -445,8 +445,6 @@ static void i915_pmu_event_destroy(struct perf_event *event) container_of(event->pmu, typeof(*i915), pmu.base); drm_WARN_ON(&i915->drm, event->parent); - - module_put(THIS_MODULE); } static int @@ -538,10 +536,8 @@ static int i915_pmu_event_init(struct perf_event *event) if (ret) return ret; - if (!event->parent) { - __module_get(THIS_MODULE); + if (!event->parent) event->destroy = i915_pmu_event_destroy; - } return 0; } @@ -1130,6 +1126,7 @@ void i915_pmu_register(struct drm_i915_private *i915) if (!pmu->base.attr_groups) goto err_attr; + pmu->base.module = THIS_MODULE; pmu->base.task_ctx_nr = perf_invalid_context; pmu->base.event_init = i915_pmu_event_init; pmu->base.add = i915_pmu_event_add; diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 939a6caebb03..632b912b0bc9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -8,8 +8,6 @@ #include "../i915_selftest.h" #include "i915_random.h" -#define SZ_8G (1ULL << 33) - static void __igt_dump_block(struct i915_buddy_mm *mm, struct i915_buddy_block *block, bool buddy) @@ -281,18 +279,22 @@ static int igt_check_mm(struct i915_buddy_mm *mm) static void igt_mm_config(u64 *size, u64 *chunk_size) { I915_RND_STATE(prng); - u64 s, ms; + u32 s, ms; /* Nothing fancy, just try to get an interesting bit pattern */ prandom_seed_state(&prng, i915_selftest.random_seed); - s = i915_prandom_u64_state(&prng) & (SZ_8G - 1); - ms = BIT_ULL(12 + (prandom_u32_state(&prng) % ilog2(s >> 12))); - s = max(s & -ms, ms); + /* Let size be a random number of pages up to 8 GB (2M pages) */ + s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng); + /* Let the chunk size be a random power of 2 less than size */ + ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng)); + /* Round size down to the chunk size */ + s &= -ms; - *chunk_size = ms; - *size = s; + /* Convert from pages to bytes */ + *chunk_size = (u64)ms << 12; + *size = (u64)s << 12; } static int igt_buddy_alloc_smoke(void *arg) diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index b9810bf156c3..f127e633f7ca 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -78,6 +78,7 @@ static void mock_device_release(struct drm_device *dev) drm_mode_config_cleanup(&i915->drm); out: + i915_params_free(&i915->params); put_device(&i915->drm.pdev->dev); i915->drm.pdev = NULL; } @@ -165,6 +166,8 @@ struct drm_i915_private *mock_gem_device(void) i915->drm.pdev = pdev; drmm_add_final_kfree(&i915->drm, i915); + i915_params_copy(&i915->params, &i915_modparams); + intel_runtime_pm_init_early(&i915->runtime_pm); /* Using the global GTT may ask questions about KMS users, so prepare */ diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 7a2430e34e00..c8da7adc6b30 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -179,6 +179,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, vfpriv->ctx_id, buflist, out_fence); + dma_fence_put(&out_fence->f); virtio_gpu_notify(vgdev); return 0; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 2cdd3cd9ce75..e83651b7747d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -79,6 +79,7 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) } sg_free_table(shmem->pages); + kfree(shmem->pages); shmem->pages = NULL; drm_gem_shmem_unpin(&bo->base.base); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index ef0cd2998671..c36b4d2b61e0 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2751,7 +2751,7 @@ static int __init ib_core_init(void) ret = addr_init(); if (ret) { - pr_warn("Could't init IB address resolution\n"); + pr_warn("Couldn't init IB address resolution\n"); goto err_ibnl; } diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index dad0df8a2467..17ac8b7c5710 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -821,7 +821,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event, struct ib_event event; unsigned int flags; - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) { + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR && + rdma_is_kernel_res(&qp->ib_qp.res)) { flags = bnxt_re_lock_cqs(qp); bnxt_qplib_add_flush_qp(&qp->qplib_qp); bnxt_re_unlock_cqs(qp, flags); diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c index 9af82ff933d7..73d197e21730 100644 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe) case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_RDMA_WRITE: + case IB_WR_RDMA_WRITE_WITH_IMM: switch (prev->wr.opcode) { case IB_WR_TID_RDMA_WRITE: req = wqe_to_tid_req(prev); diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index da9888deff8c..6edcbdcd8f43 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -65,8 +65,6 @@ #define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 #define HNS_ROCE_MIN_CQE_CNT 16 -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 07b4c85d341d..aeb3a6fa7d47 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -535,7 +535,7 @@ static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: epmty: 0x%x, ful: 0x%x\n", + dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", ext_sdb_alept, ext_sdb_alful); } diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d2968594664b..4cda95ed1fbe 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -633,7 +633,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - if (unlikely(wr->num_sge >= hr_qp->rq.max_gs)) { + if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n", wr->num_sge, hr_qp->rq.max_gs); ret = -EINVAL; @@ -653,7 +653,6 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, if (wr->num_sge < hr_qp->rq.max_gs) { dseg->lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); dseg->addr = 0; - dseg->len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); } /* rq support inline data */ @@ -787,8 +786,8 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, } if (wr->num_sge < srq->max_gs) { - dseg[i].len = cpu_to_le32(HNS_ROCE_INVALID_SGE_LENGTH); - dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY); + dseg[i].len = 0; + dseg[i].lkey = cpu_to_le32(0x100); dseg[i].addr = 0; } @@ -5070,7 +5069,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) attr->srq_limit = limit_wl; attr->max_wr = srq->wqe_cnt - 1; - attr->max_sge = srq->max_gs - HNS_ROCE_RESERVED_SGE; + attr->max_sge = srq->max_gs; out: hns_roce_free_cmd_mailbox(hr_dev, mailbox); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 1fb1c583d0f8..ac29be43b6bd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -92,9 +92,7 @@ #define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000 #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2 -#define HNS_ROCE_INVALID_LKEY 0x0 -#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 - +#define HNS_ROCE_INVALID_LKEY 0x100 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 #define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index e94ca130ff5e..c063c450c715 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -386,8 +386,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, return -EINVAL; } - hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge) + - HNS_ROCE_RESERVED_SGE); + hr_qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE) hr_qp->rq.wqe_shift = ilog2(hr_dev->caps.max_rq_desc_sz); @@ -402,7 +401,7 @@ static int set_rq_size(struct hns_roce_dev *hr_dev, struct ib_qp_cap *cap, hr_qp->rq_inl_buf.wqe_cnt = 0; cap->max_recv_wr = cnt; - cap->max_recv_sge = hr_qp->rq.max_gs - HNS_ROCE_RESERVED_SGE; + cap->max_recv_sge = hr_qp->rq.max_gs; return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index f40a000e94ee..b9e2dbd372b6 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -297,7 +297,7 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, spin_lock_init(&srq->lock); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1); - srq->max_gs = init_attr->attr.max_sge + HNS_ROCE_RESERVED_SGE; + srq->max_gs = init_attr->attr.max_sge; if (udata) { ret = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd)); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index c9abe1c01e4e..662e7fc7f628 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -120,7 +120,7 @@ static void usnic_ib_qp_grp_modify_active_to_err(struct usnic_ib_dev *us_ibdev) IB_QPS_ERR, NULL); if (status) { - usnic_err("Failed to transistion qp grp %u from %s to %s\n", + usnic_err("Failed to transition qp grp %u from %s to %s\n", qp_grp->grp_id, usnic_ib_qp_grp_state_to_string (cur_state), diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4959f5df21bd..5141d49a046b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !gfpflags_allow_blocking(gfp) && !coherent) - cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, - gfp); + page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr, + gfp, NULL); else cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs); if (!cpu_addr) diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 31e43a2197a3..cddaa43a9d52 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -130,7 +130,7 @@ static inline struct bonding *__get_bond_by_port(struct port *port) /** * __get_first_agg - get the first aggregator in the bond - * @bond: the bond we're looking at + * @port: the port we're looking at * * Return the aggregator of the first slave in @bond, or %NULL if it can't be * found. @@ -1626,7 +1626,7 @@ static int agg_device_up(const struct aggregator *agg) /** * ad_agg_selection_logic - select an aggregation group for a team - * @aggregator: the aggregator we're looking at + * @agg: the aggregator we're looking at * @update_slave_arr: Does slave array need update? * * It is assumed that only one aggregator may be selected for a team. @@ -1810,7 +1810,7 @@ static void ad_initialize_agg(struct aggregator *aggregator) /** * ad_initialize_port - initialize a given port's parameters - * @aggregator: the aggregator we're looking at + * @port: the port we're looking at * @lacp_fast: boolean. whether fast periodic should be used */ static void ad_initialize_port(struct port *port, int lacp_fast) @@ -1967,6 +1967,7 @@ static void ad_marker_response_received(struct bond_marker *marker, /** * bond_3ad_initiate_agg_selection - initate aggregator selection * @bond: bonding struct + * @timeout: timeout value to set * * Set the aggregation selection timer, to initiate an agg selection in * the very near future. Called during first initialization, and during @@ -2259,7 +2260,7 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond) /** * bond_3ad_state_machine_handler - handle state machines timeout - * @bond: bonding struct to work on + * @work: work context to fetch bonding struct to work on from * * The state machine handling concept in this module is to check every tick * which state machine should operate any function. The execution order is @@ -2500,7 +2501,7 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave) /** * bond_3ad_handle_link_change - handle a slave's link status change indication * @slave: slave struct to work on - * @status: whether the link is now up or down + * @link: whether the link is now up or down * * Handle reselection of aggregator (if needed) for this port. */ @@ -2551,7 +2552,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link) /** * bond_3ad_set_carrier - set link state for bonding master - * @bond - bonding structure + * @bond: bonding structure * * if we have an active aggregator, we're up, if not, we're down. * Presumes that we cannot have an active aggregator if there are @@ -2664,7 +2665,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, /** * bond_3ad_update_lacp_rate - change the lacp rate - * @bond - bonding struct + * @bond: bonding struct * * When modify lacp_rate parameter via sysfs, * update actor_oper_port_state of each port. diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 095ea51d1853..4e1b7deb724b 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1206,8 +1206,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav /** * alb_set_mac_address - * @bond: - * @addr: + * @bond: bonding we're working on + * @addr: MAC address to set * * In TLB mode all slaves are configured to the bond's hw address, but set * their dev_addr field to different addresses (based on their permanent hw diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5ad43aaf76e5..415a37e44cae 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -322,6 +322,7 @@ netdev_tx_t bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, /** * bond_vlan_rx_add_vid - Propagates adding an id to slaves * @bond_dev: bonding net device that got called + * @proto: network protocol ID * @vid: vlan id being added */ static int bond_vlan_rx_add_vid(struct net_device *bond_dev, @@ -355,6 +356,7 @@ unwind: /** * bond_vlan_rx_kill_vid - Propagates deleting an id to slaves * @bond_dev: bonding net device that got called + * @proto: network protocol ID * @vid: vlan id being removed */ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, @@ -948,7 +950,7 @@ static bool bond_should_notify_peers(struct bonding *bond) /** * change_active_interface - change the active slave into the specified one * @bond: our bonding struct - * @new: the new slave to make the active one + * @new_active: the new slave to make the active one * * Set the new slave to the bond's settings and unset them on the old * curr_active_slave. @@ -2205,7 +2207,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev, int ret; ret = __bond_release_one(bond_dev, slave_dev, false, true); - if (ret == 0 && !bond_has_slaves(bond)) { + if (ret == 0 && !bond_has_slaves(bond) && + bond_dev->reg_state != NETREG_UNREGISTERING) { bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; netdev_info(bond_dev, "Destroying bond\n"); bond_remove_proc_entry(bond); @@ -4552,13 +4555,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev) return ret; } +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed) +{ + if (speed == 0 || speed == SPEED_UNKNOWN) + speed = slave->speed; + else + speed = min(speed, slave->speed); + + return speed; +} + static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, struct ethtool_link_ksettings *cmd) { struct bonding *bond = netdev_priv(bond_dev); - unsigned long speed = 0; struct list_head *iter; struct slave *slave; + u32 speed = 0; cmd->base.duplex = DUPLEX_UNKNOWN; cmd->base.port = PORT_OTHER; @@ -4570,8 +4583,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev, */ bond_for_each_slave(bond, slave, iter) { if (bond_slave_can_tx(slave)) { - if (slave->speed != SPEED_UNKNOWN) - speed += slave->speed; + if (slave->speed != SPEED_UNKNOWN) { + if (BOND_MODE(bond) == BOND_MODE_BROADCAST) + speed = bond_mode_bcast_speed(slave, + speed); + else + speed += slave->speed; + } if (cmd->base.duplex == DUPLEX_UNKNOWN && slave->duplex != DUPLEX_UNKNOWN) cmd->base.duplex = slave->duplex; diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index ef1c3151fbb2..bd0ada4e81b0 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -951,7 +951,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev) static void update_stats(struct net_device *dev) { unsigned int ioaddr = dev->base_addr; - u8 rx, tx, up; + u8 up; pr_debug("%s: updating the statistics.\n", dev->name); @@ -972,8 +972,8 @@ static void update_stats(struct net_device *dev) dev->stats.tx_packets += (up&0x30) << 4; /* Rx packets */ inb(ioaddr + 7); /* Tx deferrals */ inb(ioaddr + 8); - rx = inw(ioaddr + 10); - tx = inw(ioaddr + 12); + /* rx */ inw(ioaddr + 10); + /* tx */ inw(ioaddr + 12); EL3WINDOW(4); /* BadSSD */ inb(ioaddr + 12); diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index aeae7966a082..08db4c9da2fa 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -898,6 +898,7 @@ static int ax_close(struct net_device *dev) /** * axnet_tx_timeout - handle transmit time out condition * @dev: network device which has apparently fallen asleep + * @txqueue: unused * * Called by kernel when device never acknowledges a transmit has * completed (or failed) - i.e. never posted a Tx related interrupt. diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9934421814b4..fb37816a74db 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3715,11 +3715,11 @@ failed_mii_init: failed_irq: failed_init: fec_ptp_stop(pdev); - if (fep->reg_phy) - regulator_disable(fep->reg_phy); failed_reset: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); + if (fep->reg_phy) + regulator_disable(fep->reg_phy); failed_regulator: clk_disable_unprepare(fep->clk_ahb); failed_clk_ahb: diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index a62ddd626929..c0c8efe42fce 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -981,7 +981,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes { #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04 #define I40E_AQC_SET_VSI_DEFAULT 0x08 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000 __le16 seid; __le16 vlan_tag; #define I40E_AQC_SET_VSI_VLAN_VALID 0x8000 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index afad5e9f80e0..6ab52cbd697a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1967,6 +1967,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags, } /** + * i40e_is_aq_api_ver_ge + * @aq: pointer to AdminQ info containing HW API version to compare + * @maj: API major value + * @min: API minor value + * + * Assert whether current HW API version is greater/equal than provided. + **/ +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj, + u16 min) +{ + return (aq->api_maj_ver > maj || + (aq->api_maj_ver == maj && aq->api_min_ver >= min)); +} + +/** * i40e_aq_add_vsi * @hw: pointer to the hw struct * @vsi_ctx: pointer to a vsi context struct @@ -2091,18 +2106,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (rx_only_promisc && - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1))) - flags |= I40E_AQC_SET_VSI_PROMISC_TX; + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); @@ -2199,11 +2212,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw, i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_vsi_promiscuous_modes); - if (enable) + if (enable) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY; + } cmd->promiscuous_flags = cpu_to_le16(flags); cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST); + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5)) + cmd->valid_flags |= + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY); cmd->seid = cpu_to_le16(seid); cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index b5399357a667..2e433fdbf2c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15463,6 +15463,9 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + usleep_range(1000, 2000); + /* no more scheduling of any task */ set_bit(__I40E_SUSPENDED, pf->state); set_bit(__I40E_DOWN, pf->state); diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 7a6f2a0d413f..9593aa4eea36 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5142,6 +5142,8 @@ static int igc_probe(struct pci_dev *pdev, device_set_wakeup_enable(&adapter->pdev->dev, adapter->flags & IGC_FLAG_WOL_SUPPORTED); + igc_ptp_init(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); @@ -5158,9 +5160,6 @@ static int igc_probe(struct pci_dev *pdev, /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - /* do hw tstamp init after resetting */ - igc_ptp_init(adapter); - /* Check if Media Autosense is enabled */ adapter->ei = *ei; diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index e67d4655b47e..36c999250fcc 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -496,8 +496,6 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - igc_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 5975521a4c86..93c4cf7fedbf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1226,8 +1226,8 @@ int otx2_config_npa(struct otx2_nic *pfvf) if (!hw->pool_cnt) return -EINVAL; - qset->pool = devm_kzalloc(pfvf->dev, sizeof(struct otx2_pool) * - hw->pool_cnt, GFP_KERNEL); + qset->pool = devm_kcalloc(pfvf->dev, hw->pool_cnt, + sizeof(struct otx2_pool), GFP_KERNEL); if (!qset->pool) return -ENOMEM; diff --git a/drivers/net/ethernet/sfc/ef100_rx.c b/drivers/net/ethernet/sfc/ef100_rx.c index 13ba1a4f66fc..012925e878f4 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.c +++ b/drivers/net/ethernet/sfc/ef100_rx.c @@ -31,6 +31,11 @@ #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \ ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH +bool ef100_rx_buf_hash_valid(const u8 *prefix) +{ + return PREFIX_FIELD(prefix, RSS_HASH_VALID); +} + static bool check_fcs(struct efx_channel *channel, u32 *prefix) { u16 rxclass; diff --git a/drivers/net/ethernet/sfc/ef100_rx.h b/drivers/net/ethernet/sfc/ef100_rx.h index f2f266863966..fe45b36458d1 100644 --- a/drivers/net/ethernet/sfc/ef100_rx.h +++ b/drivers/net/ethernet/sfc/ef100_rx.h @@ -14,6 +14,7 @@ #include "net_driver.h" +bool ef100_rx_buf_hash_valid(const u8 *prefix); void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event); void ef100_rx_write(struct efx_rx_queue *rx_queue); void __ef100_rx_packet(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a9808e86068d..daf0c00c1242 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -45,6 +45,14 @@ static inline void efx_rx_flush_packet(struct efx_channel *channel) __ef100_rx_packet, __efx_rx_packet, channel); } +static inline bool efx_rx_buf_hash_valid(struct efx_nic *efx, const u8 *prefix) +{ + if (efx->type->rx_buf_hash_valid) + return INDIRECT_CALL_1(efx->type->rx_buf_hash_valid, + ef100_rx_buf_hash_valid, + prefix); + return true; +} /* Maximum number of TCP segments we support for soft-TSO */ #define EFX_TSO_MAX_SEGS 100 diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index 7bb7ecb480ae..dcb741d8bd11 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -1265,6 +1265,7 @@ struct efx_udp_tunnel { * @rx_write: Write RX descriptors and doorbell * @rx_defer_refill: Generate a refill reminder event * @rx_packet: Receive the queued RX buffer on a channel + * @rx_buf_hash_valid: Determine whether the RX prefix contains a valid hash * @ev_probe: Allocate resources for event queue * @ev_init: Initialise event queue on the NIC * @ev_fini: Deinitialise event queue on the NIC @@ -1409,6 +1410,7 @@ struct efx_nic_type { void (*rx_write)(struct efx_rx_queue *rx_queue); void (*rx_defer_refill)(struct efx_rx_queue *rx_queue); void (*rx_packet)(struct efx_channel *channel); + bool (*rx_buf_hash_valid)(const u8 *prefix); int (*ev_probe)(struct efx_channel *channel); int (*ev_init)(struct efx_channel *channel); void (*ev_fini)(struct efx_channel *channel); diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index fb77c7bbe4af..ef9bca92b0b7 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -525,7 +525,8 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, return; } - if (efx->net_dev->features & NETIF_F_RXHASH) + if (efx->net_dev->features & NETIF_F_RXHASH && + efx_rx_buf_hash_valid(efx, eh)) skb_set_hash(skb, efx_rx_buf_hash(efx, eh), PKT_HASH_TYPE_L3); if (csum) { diff --git a/drivers/net/fddi/skfp/cfm.c b/drivers/net/fddi/skfp/cfm.c index e9bf42996de8..4eea3408034b 100644 --- a/drivers/net/fddi/skfp/cfm.c +++ b/drivers/net/fddi/skfp/cfm.c @@ -36,10 +36,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)cfm.c 2.18 98/10/06 (C) SK " ; -#endif - /* * FSM Macros */ @@ -208,7 +204,6 @@ void cfm(struct s_smc *smc, int event) { int state ; /* remember last state */ int cond ; - int oldstate ; /* We will do the following: */ /* - compute the variable WC_Flag for every port (This is where */ @@ -222,7 +217,6 @@ void cfm(struct s_smc *smc, int event) /* - change the portstates */ cem_priv_state (smc, event); - oldstate = smc->mib.fddiSMTCF_State ; do { DB_CFM("CFM : state %s%s event %s", smc->mib.fddiSMTCF_State & AFLAG ? "ACTIONS " : "", @@ -250,18 +244,11 @@ void cfm(struct s_smc *smc, int event) if (cond != smc->mib.fddiSMTPeerWrapFlag) smt_srf_event(smc,SMT_COND_SMT_PEER_WRAP,0,cond) ; -#if 0 /* - * Don't send ever MAC_PATH_CHANGE events. Our MAC is hard-wired + * Don't ever send MAC_PATH_CHANGE events. Our MAC is hard-wired * to the primary path. */ - /* - * path change - */ - if (smc->mib.fddiSMTCF_State != oldstate) { - smt_srf_event(smc,SMT_EVENT_MAC_PATH_CHANGE,INDEX_MAC,0) ; - } -#endif + #endif /* no SLIM_SMT */ /* diff --git a/drivers/net/fddi/skfp/fplustm.c b/drivers/net/fddi/skfp/fplustm.c index 02966d141948..4cbb145c74ab 100644 --- a/drivers/net/fddi/skfp/fplustm.c +++ b/drivers/net/fddi/skfp/fplustm.c @@ -21,10 +21,6 @@ #include <linux/bitrev.h> #include <linux/etherdevice.h> -#ifndef lint -static const char ID_sccs[] = "@(#)fplustm.c 1.32 99/02/23 (C) SK " ; -#endif - #ifndef UNUSED #ifdef lint #define UNUSED(x) (x) = (x) diff --git a/drivers/net/fddi/skfp/hwmtm.c b/drivers/net/fddi/skfp/hwmtm.c index 3412e0fb0ac4..107039056511 100644 --- a/drivers/net/fddi/skfp/hwmtm.c +++ b/drivers/net/fddi/skfp/hwmtm.c @@ -10,10 +10,6 @@ * ******************************************************************************/ -#ifndef lint -static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ; -#endif - #define HWMTM #ifndef FDDI diff --git a/drivers/net/fddi/skfp/smt.c b/drivers/net/fddi/skfp/smt.c index b8c59d803ce6..774a6e3b0a67 100644 --- a/drivers/net/fddi/skfp/smt.c +++ b/drivers/net/fddi/skfp/smt.c @@ -20,10 +20,6 @@ #define KERNEL #include "h/smtstate.h" -#ifndef lint -static const char ID_sccs[] = "@(#)smt.c 2.43 98/11/23 (C) SK " ; -#endif - /* * FC in SMbuf */ @@ -1561,7 +1557,7 @@ u_long smt_get_tid(struct s_smc *smc) return tid & 0x3fffffffL; } - +#ifdef LITTLE_ENDIAN /* * table of parameter lengths */ @@ -1641,6 +1637,7 @@ static const struct smt_pdef { } ; #define N_SMT_PLEN ARRAY_SIZE(smt_pdef) +#endif int smt_check_para(struct s_smc *smc, struct smt_header *sm, const u_short list[]) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 15e87c097b0b..5bca94c99006 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev) kfree(port); } +#define IPVLAN_ALWAYS_ON_OFLOADS \ + (NETIF_F_SG | NETIF_F_HW_CSUM | \ + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL) + +#define IPVLAN_ALWAYS_ON \ + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED) + #define IPVLAN_FEATURES \ - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_ALL_TSO | NETIF_F_GSO_ROBUST | \ NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER) + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */ + #define IPVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) @@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev) dev->state = (dev->state & ~IPVLAN_STATE_MASK) | (phy_dev->state & IPVLAN_STATE_MASK); dev->features = phy_dev->features & IPVLAN_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED; + dev->features |= IPVLAN_ALWAYS_ON; + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES; + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS; dev->hw_enc_features |= dev->features; dev->gso_max_size = phy_dev->gso_max_size; dev->gso_max_segs = phy_dev->gso_max_segs; @@ -227,7 +238,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev, { struct ipvl_dev *ipvlan = netdev_priv(dev); - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features |= NETIF_F_ALL_FOR_ALL; + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES); + features = netdev_increment_features(ipvlan->phy_dev->features, + features, features); + features |= IPVLAN_ALWAYS_ON; + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON); + + return features; } static void ipvlan_change_rx_flags(struct net_device *dev, int change) @@ -734,10 +752,9 @@ static int ipvlan_device_event(struct notifier_block *unused, case NETDEV_FEAT_CHANGE: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { - ipvlan->dev->features = dev->features & IPVLAN_FEATURES; ipvlan->dev->gso_max_size = dev->gso_max_size; ipvlan->dev->gso_max_segs = dev->gso_max_segs; - netdev_features_change(ipvlan->dev); + netdev_update_features(ipvlan->dev); } break; diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 7bcee41905cf..3ca4daf63389 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -295,14 +295,13 @@ static int dlci_close(struct net_device *dev) { struct dlci_local *dlp; struct frad_local *flp; - int err; netif_stop_queue(dev); dlp = netdev_priv(dev); flp = netdev_priv(dlp->slave); - err = (*flp->deactivate)(dlp->slave, dev); + (*flp->deactivate)(dlp->slave, dev); return 0; } diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c index dfc16770458d..386ed2aa31fd 100644 --- a/drivers/net/wan/hdlc.c +++ b/drivers/net/wan/hdlc.c @@ -230,6 +230,7 @@ static void hdlc_setup_dev(struct net_device *dev) dev->max_mtu = HDLC_MAX_MTU; dev->type = ARPHRD_RAWHDLC; dev->hard_header_len = 16; + dev->needed_headroom = 0; dev->addr_len = 0; dev->header_ops = &hdlc_null_ops; } diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index f70336bb6f52..f52b9fed0593 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -107,8 +107,14 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev) { int result; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) { + kfree_skb(skb); + return NETDEV_TX_OK; + } - /* X.25 to LAPB */ switch (skb->data[0]) { case X25_IFACE_DATA: /* Data to be transmitted */ skb_pull(skb, 1); @@ -294,6 +300,15 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return result; memcpy(&state(hdlc)->settings, &new_settings, size); + + /* There's no header_ops so hard_header_len should be 0. */ + dev->hard_header_len = 0; + /* When transmitting data: + * first we'll remove a pseudo header of 1 byte, + * then we'll prepend an LAPB header of at most 3 bytes. + */ + dev->needed_headroom = 3 - 1; + dev->type = ARPHRD_X25; call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev); netif_dormant_off(dev); diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 9d7fb45b1786..9668ea04cc80 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -893,8 +893,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) * have OPP table for the device, while others don't and * opp_set_rate() just needs to behave like clk_set_rate(). */ - if (!_get_opp_count(opp_table)) - return 0; + if (!_get_opp_count(opp_table)) { + ret = 0; + goto put_opp_table; + } if (!opp_table->required_opp_tables && !opp_table->regulators && !opp_table->paths) { @@ -905,7 +907,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) ret = _set_opp_bw(opp_table, NULL, dev, true); if (ret) - return ret; + goto put_opp_table; if (opp_table->regulator_enabled) { regulator_disable(opp_table->regulators[0]); @@ -932,10 +934,13 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) /* Return early if nothing to do */ if (old_freq == freq) { - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", - __func__, freq); - ret = 0; - goto put_opp_table; + if (!opp_table->required_opp_tables && !opp_table->regulators && + !opp_table->paths) { + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", + __func__, freq); + ret = 0; + goto put_opp_table; + } } /* diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index 64ebed129dbf..f357f9a32b3a 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -556,13 +556,14 @@ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, return -1; for (i = 0; i < num_clients; i++) { - if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) && - clients[i]->dma_ops == &dma_virt_ops) { +#ifdef CONFIG_DMA_VIRT_OPS + if (clients[i]->dma_ops == &dma_virt_ops) { if (verbose) dev_warn(clients[i], "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n"); return -1; } +#endif pci_client = find_parent_pci_dev(clients[i]); if (!pci_client) { @@ -842,9 +843,10 @@ static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap, * this should never happen because it will be prevented * by the check in pci_p2pdma_distance_many() */ - if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) && - dev->dma_ops == &dma_virt_ops)) +#ifdef CONFIG_DMA_VIRT_OPS + if (WARN_ON_ONCE(dev->dma_ops == &dma_virt_ops)) return 0; +#endif for_each_sg(sg, s, nents, i) { paddr = sg_phys(s); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index c3008e423f59..c6ea760ea5f0 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1017,4 +1017,7 @@ config SPI_SLAVE_SYSTEM_CONTROL endif # SPI_SLAVE +config SPI_DYNAMIC + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE + endif # SPI diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 4c643dfc7fbb..d4b33b358a31 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -13,6 +13,7 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spi/spi.h> @@ -441,7 +442,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, { u32 div, mbrdiv; - div = DIV_ROUND_UP(spi->clk_rate, speed_hz); + /* Ensure spi->clk_rate is even */ + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -467,20 +469,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, /** * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level * @spi: pointer to the spi controller data structure + * @xfer_len: length of the message to be transferred */ -static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len) { - u32 fthlv, half_fifo; + u32 fthlv, half_fifo, packet; /* data packet should not exceed 1/2 of fifo space */ half_fifo = (spi->fifo_size / 2); + /* data_packet should not exceed transfer length */ + if (half_fifo > xfer_len) + packet = xfer_len; + else + packet = half_fifo; + if (spi->cur_bpw <= 8) - fthlv = half_fifo; + fthlv = packet; else if (spi->cur_bpw <= 16) - fthlv = half_fifo / 2; + fthlv = packet / 2; else - fthlv = half_fifo / 4; + fthlv = packet / 4; /* align packet size with data registers access */ if (spi->cur_bpw > 8) @@ -488,6 +497,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi) else fthlv -= (fthlv % 4); /* multiple of 4 */ + if (!fthlv) + fthlv = 1; + return fthlv; } @@ -966,13 +978,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) stm32h7_spi_read_rxfifo(spi, false); - writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR); + writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR); spin_unlock_irqrestore(&spi->lock, flags); if (end) { - spi_finalize_current_transfer(master); stm32h7_spi_disable(spi); + spi_finalize_current_transfer(master); } return IRQ_HANDLED; @@ -1393,7 +1405,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi) cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) & STM32H7_SPI_CFG1_DSIZE; - spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi); + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen); fthlv = spi->cur_fthlv - 1; cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV; @@ -1585,39 +1597,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, unsigned long flags; unsigned int comm_type; int nb_words, ret = 0; + int mbr; spin_lock_irqsave(&spi->lock, flags); - if (spi->cur_bpw != transfer->bits_per_word) { - spi->cur_bpw = transfer->bits_per_word; - spi->cfg->set_bpw(spi); - } - - if (spi->cur_speed != transfer->speed_hz) { - int mbr; + spi->cur_xferlen = transfer->len; - /* Update spi->cur_speed with real clock speed */ - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, - spi->cfg->baud_rate_div_min, - spi->cfg->baud_rate_div_max); - if (mbr < 0) { - ret = mbr; - goto out; - } + spi->cur_bpw = transfer->bits_per_word; + spi->cfg->set_bpw(spi); - transfer->speed_hz = spi->cur_speed; - stm32_spi_set_mbr(spi, mbr); + /* Update spi->cur_speed with real clock speed */ + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz, + spi->cfg->baud_rate_div_min, + spi->cfg->baud_rate_div_max); + if (mbr < 0) { + ret = mbr; + goto out; } + transfer->speed_hz = spi->cur_speed; + stm32_spi_set_mbr(spi, mbr); + comm_type = stm32_spi_communication_type(spi_dev, transfer); - if (spi->cur_comm != comm_type) { - ret = spi->cfg->set_mode(spi, comm_type); + ret = spi->cfg->set_mode(spi, comm_type); + if (ret < 0) + goto out; - if (ret < 0) - goto out; - - spi->cur_comm = comm_type; - } + spi->cur_comm = comm_type; if (spi->cfg->set_data_idleness) spi->cfg->set_data_idleness(spi, transfer->len); @@ -1635,8 +1641,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi, goto out; } - spi->cur_xferlen = transfer->len; - dev_dbg(spi->dev, "transfer communication mode set to %d\n", spi->cur_comm); dev_dbg(spi->dev, @@ -1996,6 +2000,8 @@ static int stm32_spi_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); + pinctrl_pm_select_sleep_state(&pdev->dev); + return 0; } @@ -2007,13 +2013,18 @@ static int stm32_spi_runtime_suspend(struct device *dev) clk_disable_unprepare(spi->clk); - return 0; + return pinctrl_pm_select_sleep_state(dev); } static int stm32_spi_runtime_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct stm32_spi *spi = spi_master_get_devdata(master); + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; return clk_prepare_enable(spi->clk); } @@ -2043,10 +2054,23 @@ static int stm32_spi_resume(struct device *dev) return ret; ret = spi_master_resume(master); - if (ret) + if (ret) { clk_disable_unprepare(spi->clk); + return ret; + } - return ret; + ret = pm_runtime_get_sync(dev); + if (ret) { + dev_err(dev, "Unable to power device:%d\n", ret); + return ret; + } + + spi->cfg->config(spi); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return 0; } #endif diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6626587e77b4..dc12af018350 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list); */ static DEFINE_MUTEX(board_lock); +/* + * Prevents addition of devices with same chip select and + * addition of devices below an unregistering controller. + */ +static DEFINE_MUTEX(spi_add_lock); + /** * spi_alloc_device - Allocate a new SPI device * @ctlr: Controller to which device is connected @@ -554,7 +560,6 @@ static int spi_dev_check(struct device *dev, void *data) */ int spi_add_device(struct spi_device *spi) { - static DEFINE_MUTEX(spi_add_lock); struct spi_controller *ctlr = spi->controller; struct device *dev = ctlr->dev.parent; int status; @@ -582,6 +587,13 @@ int spi_add_device(struct spi_device *spi) goto done; } + /* Controller may unregister concurrently */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) && + !device_is_registered(&ctlr->dev)) { + status = -ENODEV; + goto done; + } + /* Descriptors take precedence */ if (ctlr->cs_gpiods) spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select]; @@ -2795,6 +2807,10 @@ void spi_unregister_controller(struct spi_controller *ctlr) struct spi_controller *found; int id = ctlr->bus_num; + /* Prevent addition of new devices, unregister existing ones */ + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_lock(&spi_add_lock); + device_for_each_child(&ctlr->dev, NULL, __unregister); /* First make sure that this controller was ever added */ @@ -2815,6 +2831,9 @@ void spi_unregister_controller(struct spi_controller *ctlr) if (found == ctlr) idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); + + if (IS_ENABLED(CONFIG_SPI_DYNAMIC)) + mutex_unlock(&spi_add_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 86a02aff8735..61ca8ab165dc 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -33,12 +33,14 @@ struct vfio_pci_ioeventfd { struct list_head next; + struct vfio_pci_device *vdev; struct virqfd *virqfd; void __iomem *addr; uint64_t data; loff_t pos; int bar; int count; + bool test_mem; }; struct vfio_pci_irq_ctx { diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 916b184df3a5..9e353c484ace 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -37,17 +37,70 @@ #define vfio_ioread8 ioread8 #define vfio_iowrite8 iowrite8 +#define VFIO_IOWRITE(size) \ +static int vfio_pci_iowrite##size(struct vfio_pci_device *vdev, \ + bool test_mem, u##size val, void __iomem *io) \ +{ \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + vfio_iowrite##size(val, io); \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} + +VFIO_IOWRITE(8) +VFIO_IOWRITE(16) +VFIO_IOWRITE(32) +#ifdef iowrite64 +VFIO_IOWRITE(64) +#endif + +#define VFIO_IOREAD(size) \ +static int vfio_pci_ioread##size(struct vfio_pci_device *vdev, \ + bool test_mem, u##size *val, void __iomem *io) \ +{ \ + if (test_mem) { \ + down_read(&vdev->memory_lock); \ + if (!__vfio_pci_memory_enabled(vdev)) { \ + up_read(&vdev->memory_lock); \ + return -EIO; \ + } \ + } \ + \ + *val = vfio_ioread##size(io); \ + \ + if (test_mem) \ + up_read(&vdev->memory_lock); \ + \ + return 0; \ +} + +VFIO_IOREAD(8) +VFIO_IOREAD(16) +VFIO_IOREAD(32) + /* * Read or write from an __iomem region (MMIO or I/O port) with an excluded * range which is inaccessible. The excluded range drops writes and fills * reads with -1. This is intended for handling MSI-X vector tables and * leftover space for ROM BARs. */ -static ssize_t do_io_rw(void __iomem *io, char __user *buf, +static ssize_t do_io_rw(struct vfio_pci_device *vdev, bool test_mem, + void __iomem *io, char __user *buf, loff_t off, size_t count, size_t x_start, size_t x_end, bool iswrite) { ssize_t done = 0; + int ret; while (count) { size_t fillable, filled; @@ -66,9 +119,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 4)) return -EFAULT; - vfio_iowrite32(val, io + off); + ret = vfio_pci_iowrite32(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread32(io + off); + ret = vfio_pci_ioread32(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 4)) return -EFAULT; @@ -82,9 +141,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 2)) return -EFAULT; - vfio_iowrite16(val, io + off); + ret = vfio_pci_iowrite16(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread16(io + off); + ret = vfio_pci_ioread16(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 2)) return -EFAULT; @@ -98,9 +163,15 @@ static ssize_t do_io_rw(void __iomem *io, char __user *buf, if (copy_from_user(&val, buf, 1)) return -EFAULT; - vfio_iowrite8(val, io + off); + ret = vfio_pci_iowrite8(vdev, test_mem, + val, io + off); + if (ret) + return ret; } else { - val = vfio_ioread8(io + off); + ret = vfio_pci_ioread8(vdev, test_mem, + &val, io + off); + if (ret) + return ret; if (copy_to_user(buf, &val, 1)) return -EFAULT; @@ -178,14 +249,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, count = min(count, (size_t)(end - pos)); - if (res->flags & IORESOURCE_MEM) { - down_read(&vdev->memory_lock); - if (!__vfio_pci_memory_enabled(vdev)) { - up_read(&vdev->memory_lock); - return -EIO; - } - } - if (bar == PCI_ROM_RESOURCE) { /* * The ROM can fill less space than the BAR, so we start the @@ -213,7 +276,8 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, x_end = vdev->msix_offset + vdev->msix_size; } - done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite); + done = do_io_rw(vdev, res->flags & IORESOURCE_MEM, io, buf, pos, + count, x_start, x_end, iswrite); if (done >= 0) *ppos += done; @@ -221,9 +285,6 @@ ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf, if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); out: - if (res->flags & IORESOURCE_MEM) - up_read(&vdev->memory_lock); - return done; } @@ -278,7 +339,12 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, return ret; } - done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite); + /* + * VGA MMIO is a legacy, non-BAR resource that hopefully allows + * probing, so we don't currently worry about access in relation + * to the memory enable bit in the command register. + */ + done = do_io_rw(vdev, false, iomem, buf, off, count, 0, 0, iswrite); vga_put(vdev->pdev, rsrc); @@ -296,17 +362,21 @@ static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) switch (ioeventfd->count) { case 1: - vfio_iowrite8(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; case 2: - vfio_iowrite16(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; case 4: - vfio_iowrite32(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; #ifdef iowrite64 case 8: - vfio_iowrite64(ioeventfd->data, ioeventfd->addr); + vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem, + ioeventfd->data, ioeventfd->addr); break; #endif } @@ -378,11 +448,13 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, goto out_unlock; } + ioeventfd->vdev = vdev; ioeventfd->addr = vdev->barmap[bar] + pos; ioeventfd->data = data; ioeventfd->pos = pos; ioeventfd->bar = bar; ioeventfd->count = count; + ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM; ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, NULL, NULL, &ioeventfd->virqfd, fd); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 6990fc711a80..c992973cc2d5 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1424,13 +1424,16 @@ static int vfio_bus_type(struct device *dev, void *data) static int vfio_iommu_replay(struct vfio_iommu *iommu, struct vfio_domain *domain) { - struct vfio_domain *d; + struct vfio_domain *d = NULL; struct rb_node *n; unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; int ret; /* Arbitrarily pick the first domain in the list for lookups */ - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); + if (!list_empty(&iommu->domain_list)) + d = list_first_entry(&iommu->domain_list, + struct vfio_domain, next); + n = rb_first(&iommu->dma_list); for (; n; n = rb_next(n)) { @@ -1448,6 +1451,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, phys_addr_t p; dma_addr_t i; + if (WARN_ON(!d)) { /* mapped w/o a domain?! */ + ret = -EINVAL; + goto unwind; + } + phys = iommu_iova_to_phys(d->domain, iova); if (WARN_ON(!phys)) { @@ -1477,7 +1485,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, if (npage <= 0) { WARN_ON(!npage); ret = (int)npage; - return ret; + goto unwind; } phys = pfn << PAGE_SHIFT; @@ -1486,14 +1494,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, ret = iommu_map(domain->domain, iova, phys, size, dma->prot | domain->prot); - if (ret) - return ret; + if (ret) { + if (!dma->iommu_mapped) + vfio_unpin_pages_remote(dma, iova, + phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, + true); + goto unwind; + } iova += size; } + } + + /* All dmas are now mapped, defer to second tree walk for unwind */ + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma->iommu_mapped = true; } + return 0; + +unwind: + for (; n; n = rb_prev(n)) { + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node); + dma_addr_t iova; + + if (dma->iommu_mapped) { + iommu_unmap(domain->domain, dma->iova, dma->size); + continue; + } + + iova = dma->iova; + while (iova < dma->iova + dma->size) { + phys_addr_t phys, p; + size_t size; + dma_addr_t i; + + phys = iommu_iova_to_phys(domain->domain, iova); + if (!phys) { + iova += PAGE_SIZE; + continue; + } + + size = PAGE_SIZE; + p = phys + size; + i = iova + size; + while (i < dma->iova + dma->size && + p == iommu_iova_to_phys(domain->domain, i)) { + size += PAGE_SIZE; + p += PAGE_SIZE; + i += PAGE_SIZE; + } + + iommu_unmap(domain->domain, iova, size); + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT, + size >> PAGE_SHIFT, true); + } + } + + return ret; } /* diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c index 65491ae74808..e57c00824965 100644 --- a/drivers/video/fbdev/efifb.c +++ b/drivers/video/fbdev/efifb.c @@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev) info->apertures->ranges[0].base = efifb_fix.smem_start; info->apertures->ranges[0].size = size_remap; - if (efi_enabled(EFI_BOOT) && + if (efi_enabled(EFI_MEMMAP) && !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) { if ((efifb_fix.smem_start + efifb_fix.smem_len) > (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) { |