summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHarry Wentland <harry.wentland@amd.com>2015-07-31 21:34:39 -0400
committerAlex Deucher <alexander.deucher@amd.com>2015-09-21 17:45:15 -0400
commitfde684f4488014c34748198e6c113b8105766575 (patch)
tree04b0b4bd7d8618b9931b48e7f87b4779b6598c77
parentf8846a938eff8610d3a0e42863890fbd0dee0b41 (diff)
amd/dal: HW Sequencer
The HW Sequencer is responsible for programming sequences and other DCE specific operations. It is stateless and doesn't program HW by itself. It calls graphics objects (encoder, controller, etc) to do HW register and VBIOS command table programming SW Layer /===============================================================\ | Timing Asic | | Service Capability | | | | Display Display Adapter | | Path Capability Service | | Service | |---------------------------------------------------------------| | GPIO IRQ I2cAux HW BIOS | | Service Manager Sequencer Parser | | | | Connector Encoder Audio GPU Controller | \===============================================================/ HW Layer Signed-off-by: Harry Wentland <harry.wentland@amd.com>
-rw-r--r--drivers/gpu/drm/amd/dal/Makefile3
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/Makefile22
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.c711
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.c114
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.h34
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_path_mode_set.c106
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.c3629
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.h190
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_adjustments.c556
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_helpers.c594
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.c1198
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.h112
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.c107
-rw-r--r--drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.h73
-rw-r--r--drivers/gpu/drm/amd/dal/include/hw_adjustment_set.h50
-rw-r--r--drivers/gpu/drm/amd/dal/include/hw_sequencer_interface.h391
17 files changed, 7923 insertions, 1 deletions
diff --git a/drivers/gpu/drm/amd/dal/Makefile b/drivers/gpu/drm/amd/dal/Makefile
index fe54c46125ad..d36b8982aef8 100644
--- a/drivers/gpu/drm/amd/dal/Makefile
+++ b/drivers/gpu/drm/amd/dal/Makefile
@@ -8,7 +8,8 @@ AMDDALPATH = $(RELATIVE_AMD_DAL_PATH)
subdir-ccflags-y += -I$(AMDDALPATH)/ -I$(AMDDALPATH)/include -DDAL_CZ_BRINGUP
DAL_LIBS = adapter amdgpu_dm audio asic_capability basics bios connector \
- controller dcs display_path encoder gpio gpu i2caux irq timing_service
+ controller dcs display_path encoder gpio gpu hw_sequencer i2caux irq \
+ timing_service
AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DAL_PATH)/,$(DAL_LIBS)))
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/Makefile b/drivers/gpu/drm/amd/dal/hw_sequencer/Makefile
new file mode 100644
index 000000000000..d367c7f460a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the 'hw_sequencer' sub-component of DAL.
+# It provides hw programming sequences implementation.
+
+HWS = hw_sequencer.o hw_path_mode_set.o hw_sequencer_helpers.o \
+ hw_sync_control.o hw_sequencer_adjustments.o hw_sequencer_parameters.o
+
+AMD_DAL_HWS = $(addprefix $(AMDDALPATH)/hw_sequencer/,$(HWS))
+
+AMD_DAL_FILES += $(AMD_DAL_HWS)
+
+###############################################################################
+# DCE 11x family
+###############################################################################
+ifdef CONFIG_DRM_AMD_DAL_DCE11_0
+HWS_DCE110 = hw_sequencer_dce110.o hw_sync_control_dce110.o
+
+AMD_DAL_HWS_DCE110 = $(addprefix \
+ $(AMDDALPATH)/hw_sequencer/dce110/,$(HWS_DCE110))
+
+AMD_DAL_FILES += $(AMD_DAL_HWS_DCE110)
+endif
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.c b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.c
new file mode 100644
index 000000000000..9e90c6535a29
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.c
@@ -0,0 +1,711 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/logger_interface.h"
+#include "include/bandwidth_manager_interface.h"
+#include "include/connector_interface.h"
+#include "include/controller_interface.h"
+#include "include/display_path_interface.h"
+#include "include/display_path_types.h"
+#include "include/display_clock_interface.h"
+#include "include/adapter_service_interface.h"
+#include "include/dcs_interface.h"
+
+#include "hw_sequencer_dce110.h"
+#include "hw_sync_control_dce110.h"
+
+
+/******************/
+/* Implementation */
+/******************/
+
+enum hwss_result dal_hw_sequencer_dce110_enable_link(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in)
+{
+ enum hwss_result ret;
+
+ if ((in->link_idx == ASIC_LINK_INDEX) &&
+ (in->link_settings.link_rate == LINK_RATE_HIGH2)) {
+
+ struct display_path *display_path = in->display_path;
+ struct controller *controller =
+ dal_display_path_get_controller(display_path);
+ struct display_clock *disp_clk =
+ dal_controller_get_display_clock(controller);
+ enum clocks_state current_clock_state =
+ dal_display_clock_get_min_clocks_state(
+ disp_clk);
+
+ if (current_clock_state < CLOCKS_STATE_NOMINAL) {
+ dal_display_clock_set_min_clocks_state(
+ disp_clk, CLOCKS_STATE_NOMINAL);
+ }
+ }
+
+ ret = dal_hw_sequencer_enable_link_base(hws, in);
+
+ return ret;
+}
+
+static void setup_timing_and_blender(
+ struct hw_sequencer *hws,
+ struct controller *crtc,
+ const struct hw_path_mode *path_mode,
+ struct hw_crtc_timing *crtc_timing)
+{
+# if 0 /* TODOSTEREO */
+ struct crtc_mixer_params sm_params = { false };
+ struct controller *other_crtc;
+
+ other_crtc = dal_display_path_get_stereo_mixer_object(
+ path_mode->display_path);
+
+ /* TODO: Add blender/column/row interleave for stereo. Only disable
+ * case is supported for now */
+
+ switch (path_mode->mode.stereo_mixer_params.mode) {
+ case HW_STEREO_MIXER_MODE_ROW_INTERLEAVE:
+ case HW_STEREO_MIXER_MODE_COLUMN_INTERLEAVE:
+ case HW_STEREO_MIXER_MODE_PIXEL_INTERLEAVE:
+ sm_params.mode = path_mode->mode.stereo_mixer_params.mode;
+ sm_params.sub_sampling =
+ path_mode->mode.stereo_mixer_params.sub_sampling;
+ dal_controller_enable_stereo_mixer(crtc, &sm_params);
+
+ /* other pipe already enable */
+ if (other_crtc)
+ dal_controller_program_blanking(
+ other_crtc,
+ crtc_timing);
+ break;
+ default: /* HWStereoMixerMode_Inactive */
+ dal_controller_disable_stereo_mixer(crtc);
+ break;
+ }
+#endif
+ /* build overscan parameters for current and other pipe */
+ dal_controller_program_timing_generator(crtc, crtc_timing);
+}
+
+
+
+static void set_display_mark(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ struct watermark_input_params *params,
+ uint32_t params_count)
+{
+ struct hw_global_objects objs = { NULL };
+ uint32_t display_clock;
+ struct dal_context *dal_context = hws->dal_context;
+
+ if (params_count == 0)
+ return;
+
+ dal_hw_sequencer_get_global_objects(path_set, &objs);
+
+ display_clock = dal_display_clock_get_clock(objs.dc);
+
+ dal_bandwidth_manager_program_watermark(
+ objs.bm, params_count, params,
+ display_clock);
+
+ dal_bandwidth_manager_program_display_mark(
+ objs.bm, params_count, params,
+ display_clock);
+
+ /* TODO: ProgramVBIEndSignal */
+ DAL_LOGGER_NOT_IMPL(LOG_MINOR_COMPONENT_HWSS,
+ "ProgramVBIEndSignal - %s()\n", __func__);
+}
+
+/*****************************************/
+/* Constructor, destructor, fcn pointers */
+/*****************************************/
+
+static void destruct(struct hw_sequencer *hws)
+{
+ if (hws->sync_control != NULL)
+ hws->sync_control->funcs->destroy(&hws->sync_control);
+}
+
+static void destroy(struct hw_sequencer **hws)
+{
+ destruct(*hws);
+
+ dal_free(*hws);
+
+ *hws = NULL;
+}
+
+void set_safe_displaymark(struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct watermark_input_params *wm_params,
+ uint32_t params_number)
+{
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(set, 0);
+ struct controller *controller =
+ dal_display_path_get_controller(path_mode->display_path);
+ struct display_clock *display_clock =
+ dal_controller_get_display_clock(controller);
+
+ if (!params_number)
+ return;
+
+ /* Set the stutter mark */
+ dal_bandwidth_manager_program_safe_display_mark(
+ dal_controller_get_bandwidth_manager(controller),
+ params_number,
+ wm_params,
+ dal_display_clock_get_clock(display_clock));
+}
+
+/*
+ * get_required_state_for_dp_link_for_all_paths
+ *
+ * @brief
+ * gets required state for DP link for all paths. For DCE11, the max required
+ * state is low state.
+ *
+ * @param
+ * struct hw_path_mode_set *set - not used.
+ *
+ * @return
+ * clock state in enum clocks_state
+ */
+static enum clocks_state get_required_state_for_dp_link_for_all_paths(
+ struct hw_path_mode_set *set)
+{
+ return CLOCKS_STATE_ULTRA_LOW;
+}
+
+static uint32_t get_pixel_clock_for_single_path(
+ const struct hw_path_mode *path_mode)
+{
+ struct pixel_clk_params pixel_clk_params;
+ struct pll_settings pll_settings;
+
+ dal_memset(&pixel_clk_params, 0, sizeof(struct pixel_clk_params));
+ dal_memset(&pll_settings, 0, sizeof(struct pll_settings));
+
+ dal_hw_sequencer_get_pixel_clock_parameters(
+ path_mode, &pixel_clk_params);
+
+ dal_clock_source_get_pix_clk_dividers(
+ dal_display_path_get_clock_source(path_mode->display_path),
+ &pixel_clk_params,
+ &pll_settings);
+
+ return pll_settings.actual_pix_clk;
+}
+
+static uint32_t get_max_pixel_clock_for_all_paths(
+ struct hw_path_mode_set *set)
+{
+ uint32_t path_num = dal_hw_path_mode_set_get_paths_number(set);
+ uint32_t max_pixel_clock = 0;
+ uint32_t i;
+
+ for (i = 0; i < path_num; i++) {
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(set, i);
+ enum signal_type signal_type =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+ uint32_t pixel_clock;
+
+ /* for DP/EDP, or DVO, there is no pixel clock requirements
+ * (leave as zero) */
+ if (dal_is_dp_signal(signal_type) ||
+ dal_is_cf_signal(signal_type))
+ continue;
+
+ pixel_clock = get_pixel_clock_for_single_path(path_mode);
+
+ /* update the max dvo clock found so far, if appropriate */
+ if (pixel_clock > max_pixel_clock)
+ max_pixel_clock = pixel_clock;
+ }
+
+ return max_pixel_clock;
+}
+
+static enum clocks_state get_required_clocks_state(
+ struct hw_sequencer *hws,
+ struct display_clock *display_clock,
+ struct hw_path_mode_set *path_set,
+ const struct minimum_clocks_calculation_result *min_clk)
+{
+ struct state_dependent_clocks required_state_dependent_clocks;
+ enum clocks_state clocks_required_state;
+ enum clocks_state dp_link_required_state;
+ enum clocks_state overall_required_state;
+
+ dal_memset(&required_state_dependent_clocks, 0,
+ sizeof(required_state_dependent_clocks));
+
+ required_state_dependent_clocks.display_clk_khz = min_clk->min_dclk_khz;
+ required_state_dependent_clocks.pixel_clk_khz =
+ get_max_pixel_clock_for_all_paths(path_set);
+
+ clocks_required_state = dal_display_clock_get_required_clocks_state(
+ display_clock, &required_state_dependent_clocks);
+
+ dp_link_required_state = get_required_state_for_dp_link_for_all_paths(
+ path_set);
+
+ /* overall required state is the max of required state for clocks
+ * (pixel, display clock) and the required state for DP link. */
+ overall_required_state =
+ clocks_required_state > dp_link_required_state ?
+ clocks_required_state : dp_link_required_state;
+
+ /* return the min required state */
+ return overall_required_state;
+}
+
+void dal_hw_sequencer_dce110_apply_vce_timing_adjustment(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *vce_adj_timing_params)
+{
+ if (!vce_adj_timing_params || !vce_adj_timing_params->hw_crtc_timing
+ || !vce_adj_timing_params->hw_overscan) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_HWSS,
+ LOG_MINOR_COMPONENT_HWSS,
+ "Invalid input parameters");
+ return;
+ }
+
+ if (vce_adj_timing_params->vce_multi_instance) {
+ dal_hw_sequencer_extend_hblank(hws, vce_adj_timing_params);
+ } else if (vce_adj_timing_params->extend_vblank) {
+ if (vce_adj_timing_params->full_timing_adjustment) {
+ dal_hw_sequencer_wireless_full_timing_adjustment(
+ hws, vce_adj_timing_params);
+ } else {
+ dal_hw_sequencer_extend_vblank(
+ hws, vce_adj_timing_params);
+ }
+ }
+}
+
+static void set_display_clock_dfs_bypass(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ struct display_clock *display_clock,
+ uint32_t min_display_clock)
+{
+ /* check conditions to enter DFS bypass mode
+ * single embedded digital display (traditional LVDS, Travis-LVDS,
+ * Realtek-LVDS, eDP) + dispclk < 100MHz. */
+ bool request_bypass_active = false;
+ bool current_bypass_active = false;
+ bool single_embedde_display = true;
+ uint32_t paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+
+ uint32_t i = 0;
+ uint32_t active_path_num = 0;
+ uint32_t embed_active_path_num = 0;
+ uint32_t dfs_bypass_threshold;
+ struct display_clock_state disp_clk_state;
+
+ struct blank_stream_param blank_param = {0};
+ bool video_timing_unchanged = false;
+ bool embedded_dp_display = false;
+ uint32_t link_count = 0;
+ const struct hw_path_mode *path_mode = NULL;
+ struct display_path *display_path = NULL;
+ enum connector_id connector_id;
+ enum signal_type asic_signal;
+
+ for (i = 0; i < paths_num; i++) {
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(path_set, i);
+ if ((path_mode->action == HW_PATH_ACTION_SET) ||
+ (path_mode->action == HW_PATH_ACTION_EXISTING) ||
+ (path_mode->action == HW_PATH_ACTION_SET_ADJUSTMENT)) {
+ enum connector_id connector_id =
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(
+ path_mode->display_path)).id;
+ active_path_num++;
+
+ if ((connector_id == CONNECTOR_ID_LVDS) ||
+ (connector_id == CONNECTOR_ID_EDP))
+ embed_active_path_num++;
+ }
+ }
+
+ if (!((active_path_num == 1) && (embed_active_path_num == 1)))
+ single_embedde_display = false;
+
+ dfs_bypass_threshold =
+ dal_display_clock_get_dfs_bypass_threshold(display_clock);
+
+ if (single_embedde_display && (dfs_bypass_threshold > 0)
+ && (min_display_clock < dfs_bypass_threshold)) {
+ request_bypass_active = true;
+ }
+
+ /* Check if bypass mode toggle -- get current state , teh do XOR with
+ * request state */
+ disp_clk_state = dal_display_clock_get_clock_state(display_clock);
+ if (disp_clk_state.DFS_BYPASS_ACTIVE == 1)
+ current_bypass_active = true;
+
+ /* update display clock state with new bypass mode state */
+ disp_clk_state.DFS_BYPASS_ACTIVE = request_bypass_active;
+ dal_display_clock_set_clock_state(display_clock, disp_clk_state);
+
+ /*For embedded DP displays, like eDP, Travis-VDS, Realtek-LVDS, while
+ * DPREFCLK switch between 500MHz-DFS and 100MHz -PCIE bus reference
+ * clock. This is no HW or SW change DP video DTO at the same time, this
+ * may let some DP RX show corruption on screen. In order to avoid,
+ * TX will send DP idle pattern before switch DPREFFCLK, change DTO, and
+ * then Unblank */
+
+ /* check if eDP, Travis-LVDS, Realtek-LVDS exist. should only one path.
+ * Save path infor into blankParam */
+
+ for (i = 0; i < paths_num; i++) {
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, i);
+ display_path = path_mode->display_path;
+ connector_id = dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(
+ display_path)).id;
+ asic_signal = dal_hw_sequencer_get_asic_signal(path_mode);
+
+ video_timing_unchanged =
+ (path_mode->action == HW_PATH_ACTION_SET &&
+ path_mode->action_flags.TIMING_CHANGED == 0) ||
+ path_mode->action == HW_PATH_ACTION_EXISTING;
+ embedded_dp_display = ((connector_id == CONNECTOR_ID_LVDS) &&
+ (asic_signal == SIGNAL_TYPE_DISPLAY_PORT)) ||
+ (connector_id == CONNECTOR_ID_EDP);
+ link_count = dal_display_path_get_number_of_links(display_path);
+
+ if (embedded_dp_display) {
+ blank_param.display_path = display_path;
+ /* only used for unblank */
+ blank_param.timing = path_mode->mode.timing;
+ /* only used for unblank */
+ blank_param.link_settings = path_mode->link_settings;
+ /* there could be only one embedded display. Find one
+ * then do not need continue loop. */
+ break;
+ }
+ }
+
+ /* 1. Set DP idle pattern for embedded display path
+ * DISPCLK, DPREFCLK change, embedded DP video timing unchanged */
+ if ((request_bypass_active != current_bypass_active)
+ && (video_timing_unchanged || current_bypass_active == false) &&
+ embedded_dp_display) {
+ int32_t j;
+ union dcs_monitor_patch_flags patch_flags;
+
+ for (j = link_count - 1; j >= 0; --j) {
+ blank_param.link_idx = j;
+ dal_hw_sequencer_blank_stream(hws, &blank_param);
+ }
+
+ /* 2. Blank stream also powers off backlight. Backlight will
+ * be re-enabled later on in UnblankStream. Some panels
+ * cannot handle toggle of backlight high->low->high too
+ * quickly. Therefore, we need monitor patch here to add some
+ * delay between this sequence.*/
+ patch_flags = dal_dcs_get_monitor_patch_flags(
+ dal_display_path_get_dcs(display_path));
+
+ if (patch_flags.flags.
+ DELAY_AFTER_DISABLE_BACKLIGHT_DFS_BYPASS) {
+ const struct monitor_patch_info *patch_info;
+ unsigned int delay_after_disable_backlight_dfs_bypass;
+
+ patch_info = dal_dcs_get_monitor_patch_info(
+ dal_display_path_get_dcs(display_path),
+ MONITOR_PATCH_TYPE_DELAY_AFTER_DISABLE_BACKLIGHT_DFS_BYPASS);
+ delay_after_disable_backlight_dfs_bypass =
+ patch_info->param;
+ dal_sleep_in_milliseconds(
+ delay_after_disable_backlight_dfs_bypass);
+ }
+
+ }
+
+ /* 3. Switch DISPCLK and DPREFCLK - always do */
+ dal_display_clock_set_clock(display_clock, min_display_clock);
+
+ /* 4. Re-program DP Video DTO */
+ /* 5. Remove DP idle pattern */
+ /*DISPCLK, DPREFCLK change, embedded DP video timing unchanged */
+ if ((request_bypass_active != current_bypass_active) &&
+ (video_timing_unchanged || current_bypass_active == false) &&
+ embedded_dp_display) {
+ struct pixel_clk_params pixel_clk_params;
+ int32_t j;
+
+ dal_memset(&pixel_clk_params, 0, sizeof(pixel_clk_params));
+ dal_hw_sequencer_get_pixel_clock_parameters(
+ path_mode, &pixel_clk_params);
+ /* Set programPixelClock flag */
+ pixel_clk_params.flags.PROGRAM_PIXEL_CLOCK = true;
+ dal_clock_source_program_pix_clk(
+ dal_display_path_get_clock_source(
+ display_path),
+ &pixel_clk_params, NULL);
+
+ for (j = link_count - 1; j >= 0; --j) {
+ blank_param.link_idx = j;
+ dal_hw_sequencer_unblank_stream(hws, &blank_param);
+ }
+ }
+}
+
+/**
+ * Call display_engine_clock_dce80 to perform the Dclk programming.
+ */
+static void set_display_clock(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ const struct minimum_clocks_calculation_result *min_clocks)
+{
+ struct hw_global_objects objs = { NULL };
+
+ dal_hw_sequencer_get_global_objects(path_set, &objs);
+
+ ASSERT_CRITICAL(dal_hw_path_mode_set_get_paths_number(path_set) != 0);
+
+ /* Program the display engine clock.
+ * Check DFS bypass mode support or not. DFSbypass feature is only when
+ * BIOS GPU info table reports support. For DCE8.0, the feature is not
+ * supported in BIOS table. */
+
+ if (dal_adapter_service_is_dfs_bypass_enabled(hws->as))
+ set_display_clock_dfs_bypass(
+ hws,
+ path_set,
+ objs.dc,
+ min_clocks->min_dclk_khz);
+ else
+ dal_display_clock_set_clock(objs.dc,
+ min_clocks->min_dclk_khz);
+
+ /* Start GTC counter */
+ hws->funcs->start_gtc_counter(hws, path_set);
+}
+
+/**
+* start_gtc_counter
+*
+* @brief
+* Start GTC counter if it is not already started.
+*
+* GTC counter is started after display clock is set during set mode.
+* The counter is clocked by dprefclk. Dprefclk, in some cases, is only
+* ready after VBIOS is updated with display clock.
+*/
+void start_gtc_counter(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode_set *set)
+{
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: Not Implemented\n",
+ __func__);
+}
+
+static uint32_t get_dp_dto_source_clock(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ return dal_display_clock_get_dp_ref_clk_frequency(
+ dal_controller_get_display_clock(
+ dal_display_path_get_controller(display_path)));
+}
+
+static void setup_audio_wall_dto(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode_set *path_set,
+ const struct hwss_build_params *build_params)
+{
+ uint32_t path_id;
+ uint32_t selected_path_id = 0;
+ struct display_path_objects obj;
+ uint32_t number_of_paths =
+ dal_hw_path_mode_set_get_paths_number(path_set);
+ struct audio *selected_audio = NULL;
+ bool is_hdmi_active = false;
+ bool need_to_reprogram_dto = false;
+
+ /* Select appropriate path*/
+ for (path_id = 0; path_id < number_of_paths; path_id++) {
+ bool is_hdmi_found = false;
+ struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, path_id);
+
+ dal_hw_sequencer_get_objects(
+ path_mode->display_path, &obj);
+
+ if (obj.audio == NULL)
+ continue;
+
+
+ /* Check if we have audio on this path and
+ * the path is set or existing. Priority is
+ * given to HDMI interfaces
+ */
+ is_hdmi_found = dal_is_hdmi_signal(
+ dal_hw_sequencer_get_asic_signal(path_mode));
+
+ if (path_mode->action == HW_PATH_ACTION_SET ||
+ path_mode->action == HW_PATH_ACTION_EXISTING) {
+
+ if (selected_audio == NULL ||
+ (is_hdmi_found && !is_hdmi_active)) {
+ is_hdmi_active = is_hdmi_found;
+ selected_audio = obj.audio;
+ selected_path_id = path_id;
+ need_to_reprogram_dto = true;
+ }
+ }
+ }
+
+ if (selected_audio != NULL && need_to_reprogram_dto) {
+ /* Setup audio clock source*/
+ struct audio_output audio_output;
+ struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, selected_path_id);
+
+ enum engine_id engine_id =
+ dal_hw_sequencer_get_engine_id(
+ path_mode->display_path);
+
+ dal_hw_sequencer_build_audio_output(
+ hws,
+ path_mode,
+ engine_id,
+ &build_params->pll_settings_params[selected_path_id],
+ &audio_output);
+
+ dal_audio_setup_audio_wall_dto(
+ selected_audio,
+ dal_hw_sequencer_get_asic_signal(path_mode),
+ &audio_output.crtc_info,
+ &audio_output.pll_info);
+
+ }
+}
+
+static bool setup_line_buffer_pixel_depth(
+ struct hw_sequencer *hws,
+ struct controller *crtc,
+ enum lb_pixel_depth depth,
+ bool blank)
+{
+ enum lb_pixel_depth current_depth;
+ struct line_buffer *lb;
+
+ if (!crtc)
+ return false;
+
+ lb = dal_controller_get_line_buffer(crtc);
+
+ if (!lb)
+ return false;
+
+ if (!dal_line_buffer_get_current_pixel_storage_depth(
+ lb,
+ &current_depth))
+ return false;
+
+ if (current_depth != depth) {
+ if (blank)
+ dal_controller_wait_for_vblank(crtc);
+
+ return dal_line_buffer_set_pixel_storage_depth(lb, depth);
+ }
+
+ return false;
+}
+
+static const struct hw_sequencer_funcs funcs = {
+ .apply_vce_timing_adjustment =
+ dal_hw_sequencer_dce110_apply_vce_timing_adjustment,
+ .get_dp_dto_source_clock = get_dp_dto_source_clock,
+ .set_display_clock = set_display_clock,
+ .set_displaymark = set_display_mark,
+ .set_safe_displaymark = set_safe_displaymark,
+ .setup_audio_wall_dto = setup_audio_wall_dto,
+ .setup_timing_and_blender =
+ setup_timing_and_blender,
+ .setup_line_buffer_pixel_depth =
+ setup_line_buffer_pixel_depth,
+ .start_gtc_counter = start_gtc_counter,
+ .destroy = destroy,
+ .get_required_clocks_state = get_required_clocks_state,
+ .hwss_enable_link = dal_hw_sequencer_dce110_enable_link,
+};
+
+static bool construct(struct hw_sequencer *hws,
+ struct hws_init_data *init_data)
+{
+ if (!dal_hw_sequencer_construct_base(hws, init_data))
+ return false;
+
+ hws->sync_control = dal_hw_sync_control_dce110_create(hws->dal_context,
+ hws->as);
+
+ if (!hws->sync_control) {
+ destruct(hws);
+ return false;
+ }
+
+ hws->funcs = &funcs;
+
+ return true;
+}
+
+struct hw_sequencer *dal_hw_sequencer_dce110_create(
+ struct hws_init_data *init_data)
+{
+ struct hw_sequencer *hws = dal_alloc(sizeof(struct hw_sequencer));
+
+ if (!hws)
+ return NULL;
+
+ if (construct(hws, init_data))
+ return hws;
+
+ dal_free(hws);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.h b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.h
new file mode 100644
index 000000000000..ede4663e8c2a
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sequencer_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2013-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SEQUENCER_DCE110_H__
+#define __DAL_HW_SEQUENCER_DCE110_H__
+
+#include "../hw_sequencer.h"
+
+struct hw_sequencer *dal_hw_sequencer_dce110_create(
+ struct hws_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.c b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.c
new file mode 100644
index 000000000000..f31169c656ce
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+#include "include/grph_object_defs.h"
+
+#include "include/grph_object_id.h"
+#include "include/display_path_interface.h"
+#include "include/controller_interface.h"
+#include "include/clock_source_interface.h"
+#include "hw_sync_control_dce110.h"
+
+
+struct hw_sync_control_dce110 {
+ struct hw_sync_control control;
+};
+
+#define FROM_HW_SYNC_CONTROL(c)\
+ container_of((c), struct hw_sync_control_dce110, control)
+
+static bool switch_dp_clock_source(
+ struct hw_sync_control *hw_sync_control,
+ struct hw_path_mode_set *path_mode_set)
+{
+ /*TODO: add implementation after dal_pixel_clock_switch_dp_clk_src*/
+ return false;
+}
+
+static enum hwss_result resync_display_paths(
+ struct hw_sync_control *hw_sync_control,
+ struct hw_path_mode_set *path_mode_set,
+ struct hw_resync_flags resync_flags)
+{
+ /* TODO: Add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+static void destruct(struct hw_sync_control *cntrl)
+{
+
+}
+
+static void destroy(struct hw_sync_control **cntrl)
+{
+ destruct(*cntrl);
+
+ dal_free(*cntrl);
+
+ *cntrl = NULL;
+}
+
+static const struct hw_sync_control_funcs sync_funcs = {
+ .resync_display_paths = resync_display_paths,
+ .switch_dp_clock_source = switch_dp_clock_source,
+ .destroy = destroy,
+};
+
+static bool construct(struct hw_sync_control_dce110 *cntrl,
+ struct dal_context *ctx,
+ struct adapter_service *as)
+{
+ if (!dal_hw_sync_control_construct_base(&cntrl->control))
+ return false;
+
+ /* TODO: Create GSL Mgr/
+ if (!dal_hw_gsl_mgr_construct_dce110(&cntrl->gsl_mgr, ctx, as))
+ return false;
+ */
+
+ cntrl->control.funcs = &sync_funcs;
+ return true;
+
+}
+
+struct hw_sync_control *dal_hw_sync_control_dce110_create(
+ struct dal_context *ctx,
+ struct adapter_service *as)
+{
+ struct hw_sync_control_dce110 *cntrl;
+
+ cntrl = dal_alloc(sizeof(*cntrl));
+
+ if (!cntrl)
+ return NULL;
+
+ if (construct(cntrl, ctx, as))
+ return &cntrl->control;
+
+ ASSERT_CRITICAL(false);
+ dal_free(cntrl);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.h b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.h
new file mode 100644
index 000000000000..98d32a483fde
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/dce110/hw_sync_control_dce110.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SYNC_CONTROL_DCE110_H__
+#define __DAL_HW_SYNC_CONTROL_DCE110_H__
+
+#include "../hw_sync_control.h"
+
+struct hw_sync_control *dal_hw_sync_control_dce110_create(
+ struct dal_context *ctx,
+ struct adapter_service *as);
+#endif /* __DAL_HW_SYNC_CONTROL_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_path_mode_set.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_path_mode_set.c
new file mode 100644
index 000000000000..afbae4569409
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_path_mode_set.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/hw_path_mode_set_interface.h"
+#include "include/hw_sequencer_types.h"
+#include "include/display_path_types.h"
+#include "include/vector.h"
+
+struct hw_path_mode_set {
+ struct vector vector;
+};
+
+static bool construct(struct hw_path_mode_set *set)
+{
+ if (!dal_vector_construct(
+ &set->vector,
+ MAX_COFUNCTIONAL_PATHS,
+ sizeof(struct hw_path_mode)))
+ return false;
+
+ return true;
+}
+
+struct hw_path_mode_set *dal_hw_path_mode_set_create(void)
+{
+ struct hw_path_mode_set *set;
+
+ set = dal_alloc(sizeof(struct hw_path_mode_set));
+
+ if (!set)
+ return NULL;
+
+ if (construct(set))
+ return set;
+
+ dal_free(set);
+ BREAK_TO_DEBUGGER();
+ return NULL;
+}
+
+static void destruct(struct hw_path_mode_set *set)
+{
+ dal_vector_destruct(&set->vector);
+}
+
+void dal_hw_path_mode_set_destroy(struct hw_path_mode_set **set)
+{
+ if (!set || !*set)
+ return;
+
+ destruct(*set);
+
+ dal_free(*set);
+
+ *set = NULL;
+}
+
+bool dal_hw_path_mode_set_add(
+ struct hw_path_mode_set *set,
+ struct hw_path_mode *path_mode,
+ uint32_t *index)
+{
+ if (!dal_vector_append(&set->vector, path_mode))
+ return false;
+
+ if (index != NULL)
+ *index = dal_vector_get_count(&set->vector) - 1;
+ return true;
+}
+
+struct hw_path_mode *dal_hw_path_mode_set_get_path_by_index(
+ const struct hw_path_mode_set *set,
+ uint32_t index)
+{
+ return dal_vector_at_index(&set->vector, index);
+}
+
+uint32_t dal_hw_path_mode_set_get_paths_number(
+ const struct hw_path_mode_set *set)
+{
+ return dal_vector_get_count(&set->vector);
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.c
new file mode 100644
index 000000000000..dcde0146250d
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.c
@@ -0,0 +1,3629 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/adapter_service_interface.h"
+#include "include/asic_capability_interface.h"
+#include "include/audio_interface.h"
+#include "include/bios_parser_interface.h"
+#include "include/bandwidth_manager_interface.h"
+#include "include/connector_interface.h"
+#include "include/dc_clock_generator_interface.h"
+#include "include/dcs_interface.h"
+#include "include/ddc_service_types.h"
+#include "include/encoder_interface.h"
+#include "include/logger_interface.h"
+#include "include/signal_types.h"
+#include "include/hw_sequencer_types.h"
+#include "include/formatter_types.h"
+#include "include/hw_adjustment_set.h"
+#include "include/isr_config_types.h"
+#include "include/line_buffer_interface.h"
+
+#include "hw_sequencer.h"
+
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+#include "dce110/hw_sequencer_dce110.h"
+#endif
+
+struct mpo_prototype_params {
+ uint32_t window_start_x;
+ uint32_t window_start_y;
+ uint32_t window_width;
+ uint32_t window_height;
+ uint32_t overscan_left;
+ uint32_t overscan_right;
+ uint32_t overscan_top;
+ uint32_t overscan_bottom;
+ uint32_t viewport_x;
+ uint32_t viewport_y;
+ uint32_t viewport_width;
+ uint32_t viewport_height;
+};
+
+static enum signal_type dal_get_signal(const struct hw_path_mode *path_mode)
+{
+ return dal_hw_sequencer_get_timing_adjusted_signal(
+ path_mode,
+ dal_display_path_get_config_signal(
+ path_mode->display_path, SINK_LINK_INDEX));
+}
+
+
+uint32_t dal_hw_sequencer_translate_to_graphics_bpp(
+ enum pixel_format pixel_format)
+{
+ switch (pixel_format) {
+ case PIXEL_FORMAT_INDEX8:
+ return 8;
+ case PIXEL_FORMAT_RGB565:
+ return 16;
+ case PIXEL_FORMAT_ARGB8888:
+ return 32;
+ case PIXEL_FORMAT_ARGB2101010:
+ return 32;
+ case PIXEL_FORMAT_ARGB2101010_XRBIAS:
+ return 32;
+ case PIXEL_FORMAT_FP16:
+ return 64;
+ default:
+ return 32;
+ }
+}
+
+uint32_t dal_hw_sequencer_translate_to_backend_bpp(
+ enum hw_overlay_backend_bpp backend_bpp)
+{
+ switch (backend_bpp) {
+ case HW_OVERLAY_BACKEND_BPP32_FULL_BANDWIDTH:
+ return 32;
+ case HW_OVERLAY_BACKEND_BPP16_FULL_BANDWIDTH:
+ return 16;
+ case HW_OVERLAY_BACKEND_BPP32_HALF_BANDWIDTH:
+ return 16;
+ default:
+ return 0;
+ }
+}
+
+enum dc_deep_color_depth dal_hw_sequencer_translate_to_dec_deep_color_depth(
+ enum hw_color_depth hw_color_depth)
+{
+ switch (hw_color_depth) {
+ case HW_COLOR_DEPTH_101010:
+ return DC_DEEP_COLOR_DEPTH_30;
+ case HW_COLOR_DEPTH_121212:
+ return DC_DEEP_COLOR_DEPTH_36;
+ case HW_COLOR_DEPTH_161616:
+ return DC_DEEP_COLOR_DEPTH_48;
+ default:
+ return DC_DEEP_COLOR_DEPTH_24;
+ }
+}
+/* no used for now
+static uint32_t get_validation_display_clock(
+ struct hw_path_mode_set *set)
+{
+ struct hw_global_objects objs = { NULL };
+ dal_hw_sequencer_get_global_objects(set, &objs);
+ return dal_display_clock_get_validation_clock(objs.dc);
+}
+
+static enum scaler_validation_code validate_display_clock_for_scaling(
+ struct display_path *display_path,
+ struct min_clock_params *min_clock_params,
+ struct scaler_validation_params *scaler_params,
+ struct scaling_tap_info *tap_info)
+{
+ enum scaler_validation_code result = SCALER_VALIDATION_OK;
+ struct controller *controller;
+ struct display_clock *display_clock;
+ ASSERT(display_path != NULL);
+ ASSERT(min_clock_params != NULL);
+
+ controller = dal_display_path_get_controller(display_path);
+ display_clock = dal_controller_get_display_clock(controller);
+
+ do {
+ if (dal_display_clock_validate(
+ display_clock, min_clock_params))
+ break;
+
+ result =
+ dal_controller_get_next_lower_taps_number(
+ controller,
+ scaler_params,
+ tap_info);
+
+ if (result != SCALER_VALIDATION_OK) {
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ min_clock_params->scaling_info.v_taps = tap_info->v_taps;
+ min_clock_params->scaling_info.h_taps = tap_info->h_taps;
+ } while (result == SCALER_VALIDATION_OK);
+
+ return result;
+}
+*/
+
+static bool build_bit_depth_reduction_params(
+ const struct hw_path_mode *path_mode,
+ struct bit_depth_reduction_params *fmt_bit_depth)
+{
+ enum hw_color_depth color_depth =
+ path_mode->mode.timing.flags.COLOR_DEPTH;
+ enum hw_pixel_encoding pixel_encoding =
+ path_mode->mode.timing.flags.PIXEL_ENCODING;
+
+ /* For DPtoLVDS translators VBIOS sets FMT for LCD; VBIOS does not
+ * handle DPtoVGA case */
+ if (SIGNAL_TYPE_DISPLAY_PORT ==
+ dal_hw_sequencer_get_asic_signal(path_mode) &&
+ SIGNAL_TYPE_LVDS == dal_get_signal(path_mode))
+ return false;
+
+ if (path_mode->mode.dithering == HW_DITHERING_OPTION_SKIP_PROGRAMMING)
+ return false;
+
+ /* dithering is disabled (usually due to restrictions) but programming
+ * (to disable it) is still required */
+ if (path_mode->mode.dithering == HW_DITHERING_OPTION_DISABLE) {
+ /* low level usually disables it if it gets all-zeros */
+ /* but we will set disable bit just to indicate that output
+ * structure is changed */
+ fmt_bit_depth->flags.TRUNCATE_ENABLED = 0;
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 0;
+ fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 0;
+ return true;
+ }
+
+ /*TODO:
+ if (pAdjustmentSet != NULL) {
+ HWAdjustmentInterface* pBitDepthReductionAdjustment =
+ pAdjustmentSet->GetAdjustmentById(
+ HWAdjustmentId_BitDepthReduction);
+ if (pBitDepthReductionAdjustment != NULL) {
+ hw_bit_depth = pBitDepthReductionAdjustment->GetBitDepth();
+ }
+ }
+ */
+ /* TODO: In order to apply the dithering from adjustment
+ * the above commented code should be used. Also in order
+ * to avoid translation from "hw_bit_depth_reduction" to
+ * "bit_depth_reduction_params", the adjustment code
+ * should use the struct "bit_depth_reduction_params"
+ * */
+
+
+ /*TODO:if (hw_bit_depth == NULL) */
+ /* apply spatial dithering */
+ switch (color_depth) {
+ case HW_COLOR_DEPTH_666:
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
+ break;
+ case HW_COLOR_DEPTH_888:
+ fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
+ break;
+ case HW_COLOR_DEPTH_101010:
+ /* bypass for 10-bit and 12-bit by default*/
+ return true;
+ case HW_COLOR_DEPTH_121212:
+ /* bypass for 10-bit and 12-bit by default*/
+ return true;
+ default:
+ /* unexpected case, skip programming dither*/
+ return false;
+
+ }
+ fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
+ /* frame random is on by default */
+ fmt_bit_depth->flags.FRAME_RANDOM = 1;
+ /* apply RGB dithering */
+ fmt_bit_depth->flags.RGB_RANDOM =
+ pixel_encoding == HW_PIXEL_ENCODING_RGB;
+
+ /*TODO:else
+ * fmt_bit_depth = hw_bit_depth;
+ * return true;
+ */
+ return true;
+}
+
+static bool setup_pixel_encoding(
+ const struct hw_path_mode *path_mode,
+ struct clamping_and_pixel_encoding_params *clamping)
+{
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+
+ if (!clamping)
+ return false;
+
+ switch (asic_signal) {
+ case SIGNAL_TYPE_LVDS:
+ case SIGNAL_TYPE_EDP:
+ /* pixel encoding programming will be done by VBIO's
+ * crtc_source_select()
+ * no Formatter programming is required
+ */
+ break;
+ default:
+ /* set default clamping for specified pixel encoding */
+ switch (path_mode->mode.timing.flags.PIXEL_ENCODING) {
+ case HW_PIXEL_ENCODING_RGB:
+ clamping->pixel_encoding = CNTL_PIXEL_ENCODING_RGB;
+ return true;
+ case HW_PIXEL_ENCODING_YCBCR422:
+ clamping->pixel_encoding = CNTL_PIXEL_ENCODING_YCBCR422;
+ return true;
+ case HW_PIXEL_ENCODING_YCBCR444:
+ clamping->pixel_encoding = CNTL_PIXEL_ENCODING_YCBCR444;
+ return true;
+ default:
+ break;
+ }
+ break;
+ }
+
+ return false;
+}
+
+static enum color_depth translate_to_color_depth(
+ enum hw_color_depth hw_color_depth)
+{
+ switch (hw_color_depth) {
+ case HW_COLOR_DEPTH_101010:
+ return COLOR_DEPTH_30;
+ case HW_COLOR_DEPTH_121212:
+ return COLOR_DEPTH_36;
+ case HW_COLOR_DEPTH_161616:
+ return COLOR_DEPTH_48;
+ default:
+ return COLOR_DEPTH_24;
+ }
+}
+
+static enum color_space translate_to_color_space(
+ enum hw_color_space hw_color_space)
+{
+ switch (hw_color_space) {
+ case HW_COLOR_SPACE_UNKNOWN:
+ return COLOR_SPACE_UNKNOWN;
+ case HW_COLOR_SPACE_SRGB_FULL_RANGE:
+ return COLOR_SPACE_SRGB_FULL_RANGE;
+ case HW_COLOR_SPACE_SRGB_LIMITED_RANGE:
+ return COLOR_SPACE_SRGB_LIMITED_RANGE;
+ case HW_COLOR_SPACE_YPBPR601:
+ return COLOR_SPACE_YPBPR601;
+ case HW_COLOR_SPACE_YPBPR709:
+ return COLOR_SPACE_YPBPR709;
+ case HW_COLOR_SPACE_YCBCR601:
+ return COLOR_SPACE_YCBCR601;
+ case HW_COLOR_SPACE_YCBCR709:
+ return COLOR_SPACE_YCBCR709;
+ case HW_COLOR_SPACE_NMVPU_SUPERAA:
+ return COLOR_SPACE_N_MVPU_SUPER_AA;
+ default:
+ return COLOR_SPACE_UNKNOWN;
+ }
+}
+
+static enum display_output_bit_depth translate_to_display_output_bit_depth(
+ enum hw_color_depth color_depth)
+{
+ switch (color_depth) {
+ case HW_COLOR_DEPTH_666:
+ return PANEL_6BIT_COLOR;
+ case HW_COLOR_DEPTH_888:
+ return PANEL_8BIT_COLOR;
+ case HW_COLOR_DEPTH_101010:
+ return PANEL_10BIT_COLOR;
+ case HW_COLOR_DEPTH_121212:
+ return PANEL_12BIT_COLOR;
+ case HW_COLOR_DEPTH_161616:
+ return PANEL_16BIT_COLOR;
+ default:
+ return PANEL_8BIT_COLOR;
+ }
+}
+
+void build_encoder_context(
+ struct display_path *dp,
+ struct encoder *encoder,
+ struct encoder_context *context)
+{
+ uint32_t i;
+ struct connector_feature_support cfs;
+ uint32_t links_number = dal_display_path_get_number_of_links(dp);
+
+ ASSERT(dp != NULL);
+ ASSERT(context != NULL);
+
+ dal_connector_get_features(
+ dal_display_path_get_connector(dp),
+ &cfs);
+ context->connector =
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(dp));
+ context->hpd_source = cfs.hpd_line;
+ context->channel = cfs.ddc_line;
+ context->engine = ENGINE_ID_UNKNOWN;
+ context->signal = SIGNAL_TYPE_NONE;
+
+ for (i = 0; i < links_number; i++) {
+ if (dal_display_path_is_link_active(dp, i) &&
+ context->engine == ENGINE_ID_UNKNOWN)
+ context->engine =
+ dal_display_path_get_stream_engine(dp, i);
+
+ if (dal_display_path_get_upstream_encoder(dp, i) != encoder)
+ continue;
+
+ context->signal =
+ dal_display_path_get_config_signal(dp, i);
+ if (dal_display_path_get_stream_engine(dp, i) !=
+ ENGINE_ID_UNKNOWN)
+ context->engine =
+ dal_display_path_get_stream_engine(dp, i);
+
+ if (dal_display_path_get_downstream_encoder(dp, i) !=
+ NULL)
+ context->downstream =
+ dal_encoder_get_graphics_object_id(
+ dal_display_path_get_downstream_encoder(
+ dp, i));
+ else
+ context->downstream =
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(dp));
+ }
+}
+
+static void update_coherent_adjustment(
+ const struct hw_path_mode *path_mode,
+ struct encoder_output *encoder_output)
+{
+ const struct hw_adjustment_value *value;
+
+ if (!path_mode->adjustment_set)
+ return;
+
+ value = path_mode->adjustment_set->coherent;
+
+ if (!value)
+ return;
+
+ /* update coherency mode (flag) in encoder output structure */
+ encoder_output->flags.bits.COHERENT = value->ui_value == 1;
+}
+
+void update_hdmi_info(
+ const struct hw_path_mode *path_mode,
+ struct encoder_output *encoder_output)
+{
+ struct cea_vendor_specific_data_block cea_vendor_block = { 0 };
+
+ if (encoder_output->ctx.signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ if (dal_dcs_get_cea_vendor_specific_data_block(
+ dal_display_path_get_dcs(path_mode->display_path),
+ &cea_vendor_block)) {
+ encoder_output->max_tmds_clk_from_edid_in_mhz =
+ cea_vendor_block.max_tmds_clk_mhz;
+ }
+ }
+}
+
+void translate_info_frame(const struct hw_info_frame *hw_info_frame,
+ struct encoder_info_frame *encoder_info_frame)
+{
+ dal_memset(encoder_info_frame, 0, sizeof(struct encoder_info_frame));
+
+ /* For gamut we recalc checksum */
+ if (hw_info_frame->gamut_packet.valid) {
+ uint8_t chk_sum = 0;
+ uint8_t *ptr;
+ uint8_t i;
+
+ dal_memmove(
+ &encoder_info_frame->gamut,
+ &hw_info_frame->gamut_packet,
+ sizeof(struct hw_info_packet));
+ /*start of the Gamut data. */
+ ptr = &encoder_info_frame->gamut.sb[3];
+
+ for (i = 0; i <= encoder_info_frame->gamut.sb[1]; i++)
+ chk_sum += ptr[i];
+
+ encoder_info_frame->gamut.sb[2] = (uint8_t) (0x100 - chk_sum);
+ }
+
+ if (hw_info_frame->avi_info_packet.valid) {
+ dal_memmove(
+ &encoder_info_frame->avi,
+ &hw_info_frame->avi_info_packet,
+ sizeof(struct hw_info_packet));
+ }
+
+ if (hw_info_frame->vendor_info_packet.valid) {
+ dal_memmove(
+ &encoder_info_frame->vendor,
+ &hw_info_frame->vendor_info_packet,
+ sizeof(struct hw_info_packet));
+ }
+
+ if (hw_info_frame->spd_packet.valid) {
+ dal_memmove(
+ &encoder_info_frame->spd,
+ &hw_info_frame->spd_packet,
+ sizeof(struct hw_info_packet));
+ }
+
+ if (hw_info_frame->vsc_packet.valid) {
+ dal_memmove(
+ &encoder_info_frame->vsc,
+ &hw_info_frame->vsc_packet,
+ sizeof(struct hw_info_packet));
+ }
+}
+
+static void build_encoder_output(
+ const struct hw_path_mode *path_mode,
+ enum build_option option,
+ struct encoder_output *encoder_output)
+{
+ uint32_t link_idx = 0;
+ struct display_path *display_path;
+ struct encoder *upstream_encoder;
+ struct encoder *downstream_encoder;
+ struct display_sink_capability sink_capability = {
+ DISPLAY_DONGLE_NONE };
+ union dcs_monitor_patch_flags patch_flags;
+
+ if (!path_mode || !encoder_output)
+ return;
+
+ display_path = path_mode->display_path;
+ upstream_encoder =
+ dal_display_path_get_upstream_encoder(display_path, link_idx);
+ downstream_encoder =
+ dal_display_path_get_downstream_encoder(display_path, link_idx);
+
+ if (!downstream_encoder)
+ downstream_encoder = upstream_encoder;
+
+ /* TODO: do not sure if this logic is correct */
+ if (!upstream_encoder || !downstream_encoder) {
+ BREAK_TO_DEBUGGER();
+ /* Failed to obtain encoder */
+ return;
+ }
+
+ /* get sink capability for alternate scrambler */
+ dal_dcs_get_sink_capability(
+ dal_display_path_get_dcs(path_mode->display_path),
+ &sink_capability);
+
+ /* get dp receiver workarounds */
+ patch_flags = dal_dcs_get_monitor_patch_flags(
+ dal_display_path_get_dcs(path_mode->display_path));
+
+ /* fill encoder output parameters */
+ dal_memset(encoder_output, 0, sizeof(struct encoder_output));
+ encoder_output->crtc_timing = path_mode->mode.timing;
+ encoder_output->clock_source =
+ dal_clock_source_get_id(
+ dal_display_path_get_clock_source(
+ path_mode->display_path));
+ encoder_output->controller =
+ dal_controller_get_id(
+ dal_display_path_get_controller(
+ path_mode->display_path));
+ encoder_output->max_hdmi_pixel_clock =
+ sink_capability.max_hdmi_pixel_clock;
+ encoder_output->max_hdmi_deep_color =
+ sink_capability.max_hdmi_deep_color;
+ encoder_output->flags.bits.KEEP_RECEIVER_POWERED =
+ patch_flags.flags.KEEP_DP_RECEIVER_POWERED;
+ encoder_output->flags.bits.ENABLE_AUDIO =
+ dal_display_path_get_audio(path_mode->display_path, link_idx) !=
+ NULL;
+ encoder_output->flags.bits.COHERENT = false;
+ encoder_output->flags.bits.DELAY_AFTER_PIXEL_FORMAT_CHANGE =
+ patch_flags.flags.DELAY_AFTER_PIXEL_FORMAT_CHANGE;
+ encoder_output->flags.bits.VID_STREAM_DIFFER_TO_SYNC =
+ patch_flags.flags.VID_STREAM_DIFFER_TO_SYNC;
+ encoder_output->flags.bits.TURN_OFF_VCC =
+ path_mode->action_flags.TURN_OFF_VCC;
+
+ if (patch_flags.flags.DELAY_AFTER_DP_RECEIVER_POWER_UP) {
+ encoder_output->delay_after_dp_receiver_power_up =
+ dal_dcs_get_monitor_patch_info(
+ dal_display_path_get_dcs(
+ path_mode->display_path),
+ MONITOR_PATCH_TYPE_DELAY_AFTER_DP_RECEIVER_POWER_UP)->
+ param;
+
+ }
+
+ if (patch_flags.flags.DELAY_AFTER_PIXEL_FORMAT_CHANGE) {
+ encoder_output->delay_after_pixel_format_change =
+ dal_dcs_get_monitor_patch_info(
+ dal_display_path_get_dcs(
+ path_mode->display_path),
+ MONITOR_PATCH_TYPE_DELAY_AFTER_PIXEL_FORMAT_CHANGE)->
+ param;
+ }
+
+ /* Build encoder (upstream or downstream) context. We may need to adjust
+ * signal based on timing */
+ switch (option) {
+ case BUILD_OPTION_SET_MODE:
+ case BUILD_OPTION_ENABLE_UPSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_UPSTREAM:
+ build_encoder_context(path_mode->display_path, upstream_encoder,
+ &encoder_output->ctx);
+ encoder_output->ctx.signal =
+ dal_hw_sequencer_get_timing_adjusted_signal(
+ path_mode,
+ encoder_output->ctx.signal);
+ break;
+
+ case BUILD_OPTION_SET_MODE2:
+ case BUILD_OPTION_ENABLE_DOWNSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_DOWNSTREAM:
+ build_encoder_context(
+ path_mode->display_path,
+ downstream_encoder,
+ &encoder_output->ctx);
+ encoder_output->ctx.signal =
+ dal_hw_sequencer_get_timing_adjusted_signal(
+ path_mode,
+ encoder_output->ctx.signal);
+ break;
+
+ case BUILD_OPTION_DISABLE:
+ build_encoder_context(path_mode->display_path, upstream_encoder,
+ &encoder_output->ctx);
+ break;
+
+ case BUILD_OPTION_DISABLE2:
+ build_encoder_context(
+ path_mode->display_path,
+ downstream_encoder,
+ &encoder_output->ctx);
+ break;
+
+ default:
+ build_encoder_context(path_mode->display_path, upstream_encoder,
+ &encoder_output->ctx);
+ break;
+ }
+
+ /* coherence mode override */
+ update_coherent_adjustment(path_mode, encoder_output);
+
+ /* get preferred settings */
+ switch (option) {
+ case BUILD_OPTION_SET_MODE:
+ case BUILD_OPTION_ENABLE_UPSTREAM:
+ case BUILD_OPTION_SET_MODE2:
+ case BUILD_OPTION_ENABLE_DOWNSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_UPSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_DOWNSTREAM:
+ /* HDMI information */
+ update_hdmi_info(path_mode, encoder_output);
+ /* Info Frame */
+ translate_info_frame(&path_mode->info_frame,
+ &encoder_output->info_frame);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static struct audio_info *build_audio_info(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode)
+{
+ struct audio_info *audio_info = NULL;
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+ struct dcs *dcs =
+ dal_display_path_get_dcs(
+ path_mode->display_path);
+ const struct dcs_cea_audio_mode_list *audio_modes =
+ dal_dcs_get_cea_audio_modes(dcs, asic_signal);
+ union cea_speaker_allocation_data_block cea_speaker_allocation = {
+ { 0 } };
+ struct cea_vendor_specific_data_block cea_vendor_block = { 0 };
+ bool speakers;
+ uint32_t am_index;
+ uint32_t am_count;
+ uint32_t ai_size;
+ struct dcs_container_id container_id = { { 0 } };
+
+ if (audio_modes == NULL)
+ return NULL;
+
+ am_count = dal_dcs_cea_audio_mode_list_get_count(audio_modes);
+
+ if (am_count == 0)
+ return NULL;
+
+ ai_size = sizeof(struct audio_info) + (am_count - 1) *
+ sizeof(struct audio_mode);
+
+ /* allocate array for audio modes */
+ audio_info = dal_alloc(ai_size);
+ if (!audio_info)
+ return NULL;
+
+ /* set the display path index */
+ audio_info->display_index =
+ dal_display_path_get_display_index(
+ path_mode->display_path);
+
+ /* set the display name */
+ dal_dcs_get_display_name(
+ dcs,
+ audio_info->display_name,
+ AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
+
+ /* get speaker allocation */
+ speakers =
+ dal_dcs_get_cea_speaker_allocation_data_block(
+ dcs,
+ asic_signal,
+ &cea_speaker_allocation);
+ if (speakers) {
+ /* speaker allocation information is in the first byte */
+ audio_info->flags.speaker_flags =
+ *(struct audio_speaker_flags *)
+ &cea_speaker_allocation.raw;
+ }
+ if (dal_dcs_get_cea_vendor_specific_data_block(
+ dcs, &cea_vendor_block)) {
+ audio_info->flags.info.SUPPORT_AI =
+ cea_vendor_block.byte6.SUPPORTS_AI;
+
+ if (cea_vendor_block.byte8.LATENCY_FIELDS_PRESENT) {
+ audio_info->video_latency =
+ cea_vendor_block.video_latency;
+ audio_info->audio_latency =
+ cea_vendor_block.audio_latency;
+ }
+ if (cea_vendor_block.byte8.ILATENCY_FIELDS_PRESENT
+ && path_mode->mode.timing.flags.INTERLACED) {
+ audio_info->video_latency =
+ cea_vendor_block.i_video_latency;
+ audio_info->audio_latency =
+ cea_vendor_block.i_audio_latency;
+ }
+ }
+
+ /* Needed for DP1.2 and DP A/V Sync */
+ if (dal_is_dp_external_signal(asic_signal)) {
+ struct display_sink_capability sink_capability = {
+ DISPLAY_DONGLE_NONE };
+ dal_dcs_get_sink_capability(
+ dcs, &sink_capability);
+
+ /* convert to LIPSYNC format ms/2 + 1 (= us/2000 + 1) */
+ audio_info->audio_latency =
+ (sink_capability.audio_latency / 2000)
+ + 1;
+ if (path_mode->mode.timing.flags.INTERLACED) {
+ audio_info->video_latency =
+ (sink_capability.video_latency_interlace
+ / 2000) + 1;
+ } else {
+ audio_info->video_latency =
+ (sink_capability.video_latency_progressive
+ / 2000) + 1;
+ }
+ }
+
+ audio_info->mode_count = am_count;
+ /* translate modes */
+ for (am_index = 0; am_index < am_count; am_index++) {
+ const struct cea_audio_mode *audio_mode =
+ dal_dcs_cea_audio_mode_list_at_index(
+ audio_modes,
+ am_index);
+ audio_info->modes[am_index].format_code =
+ (enum audio_format_code)(audio_mode->format_code);
+ audio_info->modes[am_index].channel_count =
+ audio_mode->channel_count;
+ audio_info->modes[am_index].sample_rates.all =
+ audio_mode->sample_rate;
+ audio_info->modes[am_index].max_bit_rate =
+ audio_mode->max_bit_rate;
+ }
+
+ if (dal_dcs_get_container_id(dcs, &container_id)) {
+ audio_info->manufacture_id =
+ container_id.manufacturer_name;
+ audio_info->product_id = container_id.product_code;
+ audio_info->port_id[0] = container_id.port_id[0];
+ audio_info->port_id[1] = container_id.port_id[1];
+ }
+
+ if (container_id.product_code == 0 &&
+ container_id.manufacturer_name == 0) {
+ struct vendor_product_id_info vendor_product_id = { 0 };
+ struct bdf_info bdf_info =
+ dal_adapter_service_get_adapter_info(hws->as);
+
+ if (dal_dcs_get_vendor_product_id_info(
+ dcs,
+ &vendor_product_id)) {
+ audio_info->manufacture_id =
+ vendor_product_id.manufacturer_id;
+ audio_info->product_id =
+ vendor_product_id.product_id;
+ }
+
+ audio_info->port_id[0] =
+ audio_info->display_index;
+ audio_info->port_id[1] = (bdf_info.BUS_NUMBER << 8
+ | bdf_info.DEVICE_NUMBER << 3
+ | bdf_info.FUNCTION_NUMBER);
+ }
+
+ return audio_info;
+}
+
+static void build_adjustment_scaler_params(
+ struct hw_adjustment_set *adjustment_set,
+ struct sharpness_adjustment *sharpness,
+ struct adjustment_factor *scale_ratio_hp_factor,
+ struct adjustment_factor *scale_ratio_lp_factor)
+{
+ const struct hw_adjustment_deflicker *value;
+
+ if (!adjustment_set)
+ goto default_values;
+
+ value = adjustment_set->deflicker_filter;
+
+ if (value == NULL)
+ goto default_values;
+
+ scale_ratio_hp_factor->adjust = value->hp_factor;
+ scale_ratio_hp_factor->divider = value->hp_divider;
+
+ scale_ratio_lp_factor->adjust = value->lp_factor;
+ scale_ratio_lp_factor->divider = value->lp_divider;
+
+ sharpness->sharpness = value->sharpness;
+ sharpness->enable_sharpening = value->enable_sharpening;
+
+default_values:
+ scale_ratio_hp_factor->adjust = 71;
+ scale_ratio_hp_factor->divider = 100;
+ scale_ratio_lp_factor->adjust = 99;
+ scale_ratio_lp_factor->divider = 100;
+ sharpness->sharpness = 0;
+ sharpness->enable_sharpening = false;
+}
+
+void dal_hw_sequencer_build_scaler_parameter_plane(
+ const struct plane_config *plane_config,
+ const struct scaling_tap_info *taps,
+ struct scaler_data *scaler_data,
+ bool program_viewport,
+ bool program_alpha,
+ bool unlock)
+{
+ /*TODO: get from feature from adapterservice*/
+ scaler_data->flags.bits.SHOW_COLOURED_BORDER = false;
+
+ scaler_data->flags.bits.SHOULD_PROGRAM_ALPHA = 0;
+ if (program_alpha)
+ scaler_data->flags.bits.SHOULD_PROGRAM_ALPHA = 1;
+
+ scaler_data->flags.bits.SHOULD_PROGRAM_VIEWPORT = 0;
+ if (program_viewport)
+ scaler_data->flags.bits.SHOULD_PROGRAM_VIEWPORT = 1;
+
+ scaler_data->flags.bits.SHOULD_UNLOCK = 0;
+ if (unlock)
+ scaler_data->flags.bits.SHOULD_UNLOCK = 1;
+
+ scaler_data->flags.bits.INTERLACED = 0;
+ if (plane_config->attributes.video_scan_format !=
+ PLANE_VID_SCAN_FMT_PROGRESSIVE)
+ scaler_data->flags.bits.INTERLACED = 1;
+
+ scaler_data->dal_pixel_format = plane_config->config.dal_pixel_format;
+
+ scaler_data->taps.h_taps = taps->h_taps;
+ scaler_data->taps.v_taps = taps->v_taps;
+ scaler_data->taps.h_taps_c = taps->h_taps_c;
+ scaler_data->taps.v_taps_c = taps->v_taps_c;
+ /*hard code for bigbunny play hard code 2 TAPS to use auto calc coeff
+ *TODO: find final solution */
+ if (scaler_data->dal_pixel_format == PIXEL_FORMAT_420BPP12) {
+ scaler_data->taps.h_taps_c = 2;
+ scaler_data->taps.v_taps_c = 2;
+ }
+
+ scaler_data->viewport.x = plane_config->mp_scaling_data.viewport.x;
+ scaler_data->viewport.y = plane_config->mp_scaling_data.viewport.y;
+ scaler_data->viewport.width =
+ plane_config->mp_scaling_data.viewport.width;
+ scaler_data->viewport.height =
+ plane_config->mp_scaling_data.viewport.height;
+
+ if (scaler_data->viewport.width == 0 ||
+ scaler_data->viewport.height == 0) {
+ scaler_data->viewport.height =
+ (scaler_data->hw_crtc_timing->v_addressable + 1) & ~1;
+ scaler_data->viewport.width =
+ scaler_data->hw_crtc_timing->h_addressable;
+ }
+
+ scaler_data->dst_res = plane_config->mp_scaling_data.dst_res;
+
+ scaler_data->overscan.left =
+ plane_config->mp_scaling_data.overscan.left;
+ scaler_data->overscan.right =
+ plane_config->mp_scaling_data.overscan.right;
+ scaler_data->overscan.top =
+ plane_config->mp_scaling_data.overscan.top;
+ scaler_data->overscan.bottom =
+ plane_config->mp_scaling_data.overscan.bottom;
+
+ /*TODO rotation and adjustment */
+ scaler_data->h_sharpness = 0;
+ scaler_data->v_sharpness = 0;
+
+ scaler_data->ratios = &plane_config->mp_scaling_data.ratios;
+
+}
+
+void dal_hw_sequencer_build_scaler_parameter(
+ const struct hw_path_mode *path_mode,
+ const struct scaling_tap_info *taps,
+ bool build_timing_required,
+ struct scaler_data *scaler_data)
+{
+ scaler_data->src_res.width =
+ path_mode->mode.scaling_info.src.width;
+ scaler_data->src_res.height =
+ path_mode->mode.scaling_info.src.height;
+ scaler_data->dst_res.width =
+ path_mode->mode.scaling_info.dst.width;
+ scaler_data->dst_res.height =
+ path_mode->mode.scaling_info.dst.height;
+
+ scaler_data->pixel_type =
+ path_mode->mode.timing.flags.PIXEL_ENCODING ==
+ HW_PIXEL_ENCODING_YCBCR422 ?
+ PIXEL_TYPE_20BPP : PIXEL_TYPE_30BPP;
+ scaler_data->flags.bits.INTERLACED =
+ path_mode->mode.timing.flags.INTERLACED;
+ scaler_data->flags.bits.DOUBLE_SCAN_MODE =
+ path_mode->mode.timing.flags.DOUBLESCAN;
+ scaler_data->flags.bits.PIPE_LOCK_REQ = 1;
+
+ scaler_data->overscan.left = path_mode->mode.overscan.left;
+ scaler_data->overscan.right = path_mode->mode.overscan.right;
+ scaler_data->overscan.top = path_mode->mode.overscan.top;
+ scaler_data->overscan.bottom = path_mode->mode.overscan.bottom;
+
+ scaler_data->taps = *taps;
+
+ if (path_mode->mode.color_space == HW_COLOR_SPACE_SRGB_FULL_RANGE ||
+ path_mode->mode.color_space ==
+ HW_COLOR_SPACE_SRGB_LIMITED_RANGE)
+ scaler_data->flags.bits.RGB_COLOR_SPACE = 1;
+ else
+ scaler_data->flags.bits.RGB_COLOR_SPACE = 0;
+
+ build_adjustment_scaler_params(path_mode->adjustment_set,
+ &scaler_data->sharp_gain,
+ &scaler_data->scale_ratio_hp_factor,
+ &scaler_data->scale_ratio_lp_factor);
+
+ scaler_data->h_sharpness = 0;
+ scaler_data->v_sharpness = 0;
+
+ if (build_timing_required)
+ scaler_data->hw_crtc_timing = &path_mode->mode.timing;
+}
+
+bool dal_hw_sequencer_enable_line_buffer_power_gating(
+ struct line_buffer *lb,
+ enum controller_id id,
+ enum pixel_type pixel_type,
+ uint32_t src_pixel_width,
+ uint32_t dst_pixel_width,
+ struct scaling_tap_info *taps,
+ enum lb_pixel_depth lb_depth,
+ uint32_t src_height,
+ uint32_t dst_height,
+ bool interlaced)
+{
+ struct lb_config_data lb_config_data;
+
+ if (!lb)
+ return false;
+
+ dal_memset(&lb_config_data, 0, sizeof(struct lb_config_data));
+
+ lb_config_data.src_pixel_width = src_pixel_width;
+ lb_config_data.dst_pixel_width = dst_pixel_width;
+ lb_config_data.taps = *taps;
+ /* this parameter is meaningful for DCE8 and up */
+ lb_config_data.depth = lb_depth;
+ /* this parameter is meaningful for DCE8 and up */
+ lb_config_data.src_height = src_height;
+ /* this parameter is meaningful for DCE8 and up */
+ lb_config_data.dst_height = dst_height;
+ /* this parameter is meaningful for DCE8 and up */
+ lb_config_data.interlaced = interlaced;
+
+ return dal_line_buffer_enable_power_gating(lb, id, &lb_config_data);
+}
+
+static enum hwss_result reset_path_mode_back_end(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ uint32_t path_id);
+
+static enum hwss_result set_path_mode_back_end(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t path_id,
+ struct hwss_build_params *build_params);
+
+void dal_hw_sequencer_update_info_frame(
+ const struct hw_path_mode *hw_path_mode)
+{
+ struct encoder *enc;
+ struct encoder_info_frame_param encoder_info_frame_param = {
+ { { 0 } } };
+
+ ASSERT(hw_path_mode != NULL);
+
+ enc = dal_display_path_get_upstream_encoder(
+ hw_path_mode->display_path,
+ ASIC_LINK_INDEX);
+
+ build_encoder_context(
+ hw_path_mode->display_path,
+ enc,
+ &encoder_info_frame_param.ctx);
+
+ translate_info_frame(
+ &hw_path_mode->info_frame,
+ &encoder_info_frame_param.packets);
+
+ dal_encoder_update_info_frame(enc, &encoder_info_frame_param);
+}
+
+
+static void enable_hpd(
+ const struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ struct encoder_context encoder_context;
+ struct display_path_objects disp_path_obj;
+
+ dal_hw_sequencer_get_objects(display_path, &disp_path_obj);
+ build_encoder_context(
+ display_path,
+ disp_path_obj.upstream_encoder,
+ &encoder_context);
+
+ /* call encoder on GPU to enable master lock of HPD */
+ dal_encoder_enable_hpd(disp_path_obj.upstream_encoder,
+ &encoder_context);
+}
+
+static bool has_travis_or_nutmeg_encoder(
+ struct display_path *display_path)
+{
+ bool has_travis_crt;
+ bool has_travis_lcd;
+ bool has_nutmeg_crt;
+
+ /* For Travis, EnumId_1 is crt and EnumId_2 is lcd */
+ /* For Nutmeg, EnumId_1 is crt */
+
+ if (display_path == NULL)
+ return false;
+
+ has_travis_crt = dal_display_path_contains_object(
+ display_path,
+ dal_graphics_object_id_init(
+ ENCODER_ID_EXTERNAL_TRAVIS,
+ ENUM_ID_1,
+ OBJECT_TYPE_ENCODER));
+
+ has_travis_lcd = dal_display_path_contains_object(
+ display_path,
+ dal_graphics_object_id_init(
+ ENCODER_ID_EXTERNAL_TRAVIS,
+ ENUM_ID_2,
+ OBJECT_TYPE_ENCODER));
+
+ has_nutmeg_crt = dal_display_path_contains_object(
+ display_path,
+ dal_graphics_object_id_init(
+ ENCODER_ID_EXTERNAL_NUTMEG,
+ ENUM_ID_1,
+ OBJECT_TYPE_ENCODER));
+
+ return has_travis_crt || has_travis_lcd || has_nutmeg_crt;
+}
+
+static void update_coherent_overide(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *hw_path_mode,
+ struct encoder_output *encoder_output)
+{
+ /*TODO to be implemented*/
+}
+
+static void build_upstream_encoder_output(
+ struct hw_sequencer *hws,
+ uint32_t link_idx,
+ const struct hw_path_mode *hw_path_mode,
+ const struct link_settings *link_settings,
+ enum build_option build_option,
+ struct encoder_output *encoder_output)
+{
+ struct dcs *dcs;
+ struct encoder *enc;
+ union dcs_monitor_patch_flags patch_flags;
+
+ if (hw_path_mode == NULL || encoder_output == NULL)
+ return;
+ dcs = dal_display_path_get_dcs(hw_path_mode->display_path);
+ enc = dal_display_path_get_upstream_encoder(
+ hw_path_mode->display_path,
+ link_idx);
+
+ if (enc == NULL || dcs == NULL) {
+ BREAK_TO_DEBUGGER();
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: Failed to obtain encoder or dcs", __func__);
+ return;
+ }
+
+ /* get DP receiver workarounds */
+ patch_flags =
+ dal_dcs_get_monitor_patch_flags(dcs);
+
+ /* fill encoder output parameters */
+ dal_memset(encoder_output, 0, sizeof(struct encoder_output));
+ dal_memmove(
+ &encoder_output->crtc_timing,
+ &hw_path_mode->mode.timing,
+ sizeof(struct hw_crtc_timing));
+
+ encoder_output->clock_source = dal_clock_source_get_id(
+ dal_display_path_get_clock_source(
+ hw_path_mode->display_path));
+
+ encoder_output->flags.bits.KEEP_RECEIVER_POWERED =
+ patch_flags.flags.KEEP_DP_RECEIVER_POWERED;
+ encoder_output->flags.bits.ENABLE_AUDIO =
+ (dal_display_path_get_audio
+ (hw_path_mode->display_path, link_idx) != NULL);
+
+ encoder_output->flags.bits.COHERENT = false;
+ /*default coherent is false*/
+
+ encoder_output->flags.bits.DELAY_AFTER_PIXEL_FORMAT_CHANGE =
+ patch_flags.flags.DELAY_AFTER_PIXEL_FORMAT_CHANGE;
+
+ encoder_output->flags.bits.TURN_OFF_VCC =
+ hw_path_mode->action_flags.TURN_OFF_VCC;
+
+ if (patch_flags.flags.DELAY_AFTER_DP_RECEIVER_POWER_UP) {
+ const struct monitor_patch_info *info =
+ dal_dcs_get_monitor_patch_info(dcs,
+ MONITOR_PATCH_TYPE_DELAY_AFTER_DP_RECEIVER_POWER_UP);
+ encoder_output->delay_after_dp_receiver_power_up =
+ info->param;
+ }
+
+ if (patch_flags.flags.DELAY_AFTER_PIXEL_FORMAT_CHANGE) {
+ const struct monitor_patch_info *info =
+ dal_dcs_get_monitor_patch_info(dcs,
+ MONITOR_PATCH_TYPE_DELAY_AFTER_PIXEL_FORMAT_CHANGE);
+ encoder_output->delay_after_pixel_format_change =
+ info->param;
+ }
+
+ build_encoder_context(hw_path_mode->display_path, enc,
+ &encoder_output->ctx);
+
+ /* Build encoder (upstream or downstream) context.
+ * May need to adjust signal based on timing */
+
+ switch (build_option) {
+ case BUILD_OPTION_SET_MODE:
+ case BUILD_OPTION_ENABLE_UPSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_UPSTREAM:
+ encoder_output->ctx.signal =
+ dal_hw_sequencer_get_timing_adjusted_signal(
+ hw_path_mode, encoder_output->ctx.signal);
+ break;
+
+ case BUILD_OPTION_DISABLE:
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* coherence mode override */
+ update_coherent_overide(hws, hw_path_mode, encoder_output);
+
+ /* get preferred settings */
+ switch (build_option) {
+ case BUILD_OPTION_SET_MODE:
+ case BUILD_OPTION_ENABLE_UPSTREAM:
+ case BUILD_OPTION_STATIC_VALIDATE_UPSTREAM:
+ update_hdmi_info(hw_path_mode, encoder_output);
+ translate_info_frame(&hw_path_mode->info_frame,
+ &encoder_output->info_frame);
+
+ /* Query and update link capabilities */
+ dal_memmove(
+ &encoder_output->link_settings,
+ link_settings,
+ sizeof(struct link_settings));
+ break;
+
+ case BUILD_OPTION_DISABLE:
+ break;
+
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ /* set SS parameters */
+ encoder_output->link_settings.link_spread = (
+ dal_display_path_is_ss_supported(
+ hw_path_mode->display_path) ?
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ :
+ LINK_SPREAD_DISABLED);
+}
+
+/**
+ * Validate video memory bandwidth with the default (i.e. highest) display
+ * engine clock value.
+ */
+static bool validate_video_memory_bandwidth(
+ struct controller *controller,
+ uint32_t param_count,
+ struct bandwidth_params *bandwidth_params)
+{
+ uint32_t display_clock_in_khz;
+ struct display_clock *display_clock;
+ struct bandwidth_manager *bandwidth_manager;
+
+ display_clock = dal_controller_get_display_clock(controller);
+
+ /* Get the DCE-specific default (highest) validation clock. */
+ display_clock_in_khz = dal_display_clock_get_validation_clock(
+ display_clock);
+
+ bandwidth_manager = dal_controller_get_bandwidth_manager(controller);
+
+ return dal_bandwidth_manager_validate_video_memory_bandwidth(
+ bandwidth_manager,
+ param_count,
+ bandwidth_params,
+ display_clock_in_khz);
+}
+
+void dal_hw_sequencer_enable_audio_endpoint(
+ struct hw_sequencer *hws,
+ struct link_settings *ls,
+ struct display_path *display_path,
+ bool enable)
+{
+ struct audio *audio;
+
+ if (!display_path)
+ return;
+
+ audio = dal_display_path_get_audio(display_path, ASIC_LINK_INDEX);
+
+ if (!audio)
+ return;
+
+ {
+ enum engine_id e_id =
+ dal_hw_sequencer_get_engine_id(display_path);
+ enum signal_type asic_signal =
+ dal_display_path_get_config_signal(
+ display_path,
+ ASIC_LINK_INDEX);
+
+ if (enable) {
+ dal_audio_enable_output(
+ audio,
+ e_id,
+ asic_signal,
+ ls->link_rate);
+ dal_audio_enable_azalia_audio_jack_presence(
+ audio,
+ e_id);
+ } else
+ dal_audio_disable_output(
+ audio,
+ e_id,
+ asic_signal);
+ }
+
+}
+
+void dal_hw_sequencer_mute_audio_endpoint(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ bool mute)
+{
+ struct audio *audio;
+ struct encoder_context context = { 0 };
+ struct encoder *enc;
+
+ if (display_path == NULL) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ audio = dal_display_path_get_audio(
+ display_path,
+ ASIC_LINK_INDEX);
+
+ if (audio == NULL)
+ return;
+
+ enc = dal_display_path_get_upstream_encoder(
+ display_path,
+ ASIC_LINK_INDEX);
+ ASSERT(enc != NULL);
+
+ build_encoder_context(display_path, enc, &context);
+
+ if (mute)
+ dal_audio_mute(audio,
+ context.engine, context.signal);
+ else
+ dal_audio_unmute(audio,
+ context.engine, context.signal);
+}
+
+enum hwss_result dal_hw_sequencer_reset_audio_device(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ struct display_path_objects disp_path_obj;
+ enum engine_id engine_id =
+ dal_hw_sequencer_get_engine_id(display_path);
+ enum signal_type asic_signal =
+ dal_display_path_get_config_signal(
+ display_path, ASIC_LINK_INDEX);
+ dal_hw_sequencer_get_objects(display_path, &disp_path_obj);
+
+ if (disp_path_obj.audio != NULL) {
+ struct audio_channel_associate_info audio_mapping;
+
+ audio_mapping.u32all = 0;
+
+ dal_audio_enable_channel_splitting_mapping(
+ disp_path_obj.audio,
+ engine_id,
+ asic_signal,
+ &audio_mapping,
+ false);
+
+ if (AUDIO_RESULT_OK == dal_audio_disable_output(
+ disp_path_obj.audio,
+ engine_id,
+ asic_signal))
+ return HWSS_RESULT_OK;
+
+ }
+ return HWSS_RESULT_ERROR;
+}
+
+bool dal_hw_sequencer_has_audio_bandwidth_changed(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *old,
+ const struct hw_path_mode *new)
+{
+ /* TODO implement */
+
+ return true;
+}
+
+void dal_hw_sequencer_enable_azalia_audio_jack_presence(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ /* enable audio only when set mode.*/
+ struct audio *audio = dal_display_path_get_audio(
+ display_path, ASIC_LINK_INDEX);
+
+ if (audio) {
+ enum engine_id engine_id =
+ dal_hw_sequencer_get_engine_id(display_path);
+
+ dal_audio_enable_azalia_audio_jack_presence(audio, engine_id);
+ }
+}
+
+void dal_hw_sequencer_disable_azalia_audio_jack_presence(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ /* enable audio only when set mode.*/
+ struct audio *audio = dal_display_path_get_audio(
+ display_path, ASIC_LINK_INDEX);
+
+ if (audio) {
+ enum engine_id engine_id =
+ dal_hw_sequencer_get_engine_id(display_path);
+
+ dal_audio_disable_azalia_audio_jack_presence(audio, engine_id);
+ }
+}
+
+void dal_hw_sequencer_enable_memory_requests(struct hw_sequencer *hws,
+ struct hw_path_mode *hw_path_mode)
+{
+ struct controller *controller;
+ enum color_space color_space = translate_to_color_space(
+ hw_path_mode->mode.color_space);
+
+ controller = dal_display_path_get_controller(
+ hw_path_mode->display_path);
+
+ if (!dal_display_path_is_source_blanked(hw_path_mode->display_path))
+ /* no need to enable memory requests if source is not blanked */
+ return;
+
+ dal_line_buffer_reset_on_vblank(
+ dal_controller_get_line_buffer(controller),
+ dal_controller_get_id(controller));
+
+ dal_controller_unblank_crtc(controller, color_space);
+
+ dal_display_path_set_source_blanked(
+ hw_path_mode->display_path,
+ DISPLAY_TRI_STATE_FALSE);
+
+ dal_hw_sequencer_psr_enable(hws, hw_path_mode->display_path);
+}
+
+void dal_hw_sequencer_disable_memory_requests(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *hw_path_mode)
+{
+ struct controller *controller;
+ enum color_space color_space = translate_to_color_space(
+ hw_path_mode->mode.color_space);
+ controller = dal_display_path_get_controller(
+ hw_path_mode->display_path);
+
+ /* no need to disable memory requests if source is blanked */
+ if (dal_display_path_is_source_blanked(hw_path_mode->display_path))
+ return;
+
+ dal_controller_blank_crtc(controller, color_space);
+
+ dal_display_path_set_source_blanked(
+ hw_path_mode->display_path,
+ DISPLAY_TRI_STATE_TRUE);
+}
+
+void dal_hw_sequencer_update_info_packets(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode)
+{
+ return dal_hw_sequencer_update_info_frame(path_mode);
+}
+
+static enum hwss_result encoder_validate_path_mode(
+ const struct hw_path_mode *path_mode,
+ struct encoder *encoder,
+ enum build_option bo_validate,
+ enum build_option bo_enable)
+{
+ struct encoder_output enc_output;
+ enum build_option option =
+ path_mode->action == HW_PATH_ACTION_STATIC_VALIDATE ?
+ bo_validate : bo_enable;
+ enum encoder_result validation_result;
+
+ build_encoder_output(path_mode, option, &enc_output);
+
+ validation_result =
+ dal_encoder_validate_output(
+ encoder,
+ &enc_output);
+
+ switch (validation_result) {
+ case ENCODER_RESULT_OK:
+ return HWSS_RESULT_OK;
+ case ENCODER_RESULT_NOBANDWIDTH:
+ return HWSS_RESULT_NO_BANDWIDTH;
+ default:
+ return HWSS_RESULT_ERROR;
+ }
+}
+
+/* Validate display path mode against static capabilities of graphics objects
+ * in the display path. */
+enum hwss_result dal_hw_sequencer_validate_display_path_mode(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode)
+{
+ enum hwss_result validation_result;
+ struct display_path_objects disp_path_obj;
+ struct controller *controller;
+
+ dal_hw_sequencer_get_objects(path_mode->display_path, &disp_path_obj);
+
+ controller = dal_display_path_get_controller(path_mode->display_path);
+
+ if (controller) {
+
+ if (!dal_controller_validate_timing(
+ controller,
+ &path_mode->mode.timing,
+ dal_hw_sequencer_get_asic_signal(path_mode)))
+ return HWSS_RESULT_ERROR;
+ }
+
+
+ if (disp_path_obj.upstream_encoder) {
+ validation_result = encoder_validate_path_mode(
+ path_mode,
+ disp_path_obj.upstream_encoder,
+ BUILD_OPTION_STATIC_VALIDATE_UPSTREAM,
+ BUILD_OPTION_ENABLE_UPSTREAM);
+
+ if (validation_result != HWSS_RESULT_OK)
+ return validation_result;
+ }
+
+ if (disp_path_obj.downstream_encoder) {
+ validation_result = encoder_validate_path_mode(
+ path_mode,
+ disp_path_obj.downstream_encoder,
+ BUILD_OPTION_STATIC_VALIDATE_DOWNSTREAM,
+ BUILD_OPTION_ENABLE_DOWNSTREAM);
+
+ if (validation_result != HWSS_RESULT_OK)
+ return validation_result;
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_gamma_ramp_adjustment(
+ struct hw_sequencer *hws,
+ const struct display_path *display_path,
+ struct hw_adjustment_gamma_ramp *adjusment)
+{
+ struct gamma_ramp *ramp = NULL;
+ struct gamma_parameters *gamma_param = NULL;
+ enum hwss_result result = HWSS_RESULT_OK;
+ struct controller *crtc;
+
+ crtc = dal_display_path_get_controller(display_path);
+
+ if (crtc == NULL)
+ return HWSS_RESULT_ERROR;
+
+ if (adjusment == NULL)
+ return HWSS_RESULT_ERROR;
+
+ ramp = dal_alloc(sizeof(struct gamma_ramp));
+ gamma_param = dal_alloc(sizeof(struct gamma_parameters));
+
+ if (ramp && gamma_param) {
+ dal_hw_sequencer_build_gamma_ramp_adj_params(
+ adjusment,
+ gamma_param,
+ ramp);
+
+ if (!dal_controller_set_gamma_ramp(crtc, ramp, gamma_param))
+ result = HWSS_RESULT_ERROR;
+ }
+
+ dal_free(ramp);
+ dal_free(gamma_param);
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_color_control_adjustment(
+ struct hw_sequencer *hws,
+ struct controller *crtc,
+ struct hw_adjustment_color_control *adjustment)
+{
+ struct grph_csc_adjustment adjust;
+
+ dal_memset(&adjust, 0, sizeof(adjust));
+ if (dal_hw_sequencer_build_csc_adjust(
+ hws,
+ adjustment,
+ &adjust) != HWSS_RESULT_OK)
+ return HWSS_RESULT_ERROR;
+
+ dal_controller_set_grph_csc_adjustment(crtc, &adjust);
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_vertical_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment)
+{
+ /* TODO: add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+enum hwss_result dal_hw_sequencer_set_horizontal_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment)
+{
+ /* TODO: add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+enum hwss_result dal_hw_sequencer_set_composite_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment)
+{
+ /* TODO: add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+enum hwss_result dal_hw_sequencer_enable_sync_output(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ /* TODO: add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+enum hwss_result dal_hw_sequencer_disable_sync_output(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ /* TODO: add implementation */
+ return HWSS_RESULT_ERROR;
+}
+
+enum hwss_result dal_hw_sequencer_set_backlight_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment)
+{
+ struct display_path_objects obj;
+
+ if (adjustment == NULL)
+ return HWSS_RESULT_ERROR;
+
+ dal_hw_sequencer_get_objects(display_path, &obj);
+
+ if (dal_display_path_get_controller(display_path) == NULL
+ || obj.upstream_encoder == NULL)
+ return HWSS_RESULT_ERROR;
+
+ dal_encoder_set_lcd_backlight_level(obj.upstream_encoder,
+ adjustment->ui_value);
+
+ if (obj.downstream_encoder != NULL)
+ dal_encoder_set_lcd_backlight_level(
+ obj.downstream_encoder,
+ adjustment->ui_value);
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_validate_link(
+ struct hw_sequencer *hws,
+ const struct validate_link_param *param)
+{
+ const struct display_path *display_path = param->display_path;
+ struct encoder *upstream;
+ struct encoder *downstream =
+ dal_display_path_get_downstream_encoder(
+ display_path, param->link_idx);
+ if (downstream)
+ if (!dal_encoder_is_link_settings_supported(
+ downstream, &param->link_settings))
+ return HWSS_RESULT_OUT_OF_RANGE;
+
+ upstream = dal_display_path_get_upstream_encoder(
+ display_path, param->link_idx);
+ if (!dal_encoder_is_link_settings_supported(
+ upstream, &param->link_settings)) {
+ BREAK_TO_DEBUGGER();
+ return HWSS_RESULT_OUT_OF_RANGE;
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+bool dal_hw_sequencer_is_supported_dp_training_pattern3(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ uint32_t link_idx)
+{
+ struct encoder *encoder;
+ struct encoder_feature_support features;
+
+ if (!display_path) {
+ BREAK_TO_DEBUGGER();
+
+ return false;
+ }
+
+ encoder = dal_display_path_get_upstream_encoder(display_path, link_idx);
+
+ if (!encoder) {
+ BREAK_TO_DEBUGGER();
+
+ return false;
+ }
+
+ features = dal_encoder_get_supported_features(encoder);
+
+ return features.flags.bits.IS_TPS3_CAPABLE;
+}
+
+enum hwss_result dal_hw_sequencer_set_dp_phy_pattern(
+ struct hw_sequencer *hws,
+ const struct set_dp_phy_pattern_param *param)
+{
+ struct encoder_set_dp_phy_pattern_param dp_phy_pattern_param = {0};
+ struct encoder *encoder = dal_display_path_get_upstream_encoder(
+ param->display_path, param->link_idx);
+
+ /* Build encoder context */
+ struct encoder_context context;
+
+ build_encoder_context(param->display_path, encoder, &context);
+
+ /* Set EncoderDpPhyPattern */
+ switch (param->test_pattern) {
+ case DP_TEST_PATTERN_D102:
+ case DP_TEST_PATTERN_SYMBOL_ERROR:
+ case DP_TEST_PATTERN_PRBS7:
+ case DP_TEST_PATTERN_80BIT_CUSTOM:
+ case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE:
+ case DP_TEST_PATTERN_TRAINING_PATTERN1:
+ case DP_TEST_PATTERN_TRAINING_PATTERN2:
+ case DP_TEST_PATTERN_TRAINING_PATTERN3:
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* Build SetTestPatternParam */
+ dp_phy_pattern_param.ctx = &context;
+ dp_phy_pattern_param.dp_phy_pattern = param->test_pattern;
+ dp_phy_pattern_param.custom_pattern = param->custom_pattern;
+ dp_phy_pattern_param.custom_pattern_size = param->cust_pattern_size;
+ dp_phy_pattern_param.alt_scrambler_reset = param->alt_scrambler_reset;
+
+ /* Call encoder to set test pattern */
+ ;
+
+ /* Return proper result based on Encoder call */
+ if (ENCODER_RESULT_OK !=
+ dal_encoder_set_dp_phy_pattern(
+ encoder, &dp_phy_pattern_param)) {
+ BREAK_TO_DEBUGGER();
+ return HWSS_RESULT_ERROR;
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_lane_settings(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ const struct link_training_settings *link_settings)
+{
+ struct display_path_objects obj;
+ struct encoder *encoder;
+ struct encoder_context context;
+
+ if (!link_settings) {
+ BREAK_TO_DEBUGGER();
+ return HWSS_RESULT_ERROR;
+ }
+
+ dal_hw_sequencer_get_objects(display_path, &obj);
+
+ /* use downstream encoder to handle the command */
+ encoder = obj.downstream_encoder != NULL ?
+ obj.downstream_encoder : obj.upstream_encoder;
+
+ /* Build encoder context */
+ build_encoder_context(display_path, encoder, &context);
+
+ /* call Encoder to set lane settings */
+ dal_encoder_set_lane_settings(encoder, &context, link_settings);
+
+ return HWSS_RESULT_OK;
+}
+
+void dal_hw_sequencer_set_test_pattern(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *link_settings,
+ const uint8_t *custom_pattern,
+ uint8_t cust_pattern_size)
+{
+ struct controller *crtc =
+ dal_display_path_get_controller(path_mode->display_path);
+ /* handle test harness command */
+ switch (test_pattern) {
+ /* these patterns generated by controller */
+ case DP_TEST_PATTERN_COLOR_SQUARES:
+ case DP_TEST_PATTERN_COLOR_SQUARES_CEA:
+ case DP_TEST_PATTERN_VERTICAL_BARS:
+ case DP_TEST_PATTERN_HORIZONTAL_BARS:
+ case DP_TEST_PATTERN_COLOR_RAMP: {
+ /* disable bit depth reduction */
+ struct bit_depth_reduction_params params = {{ 0 } };
+
+ dal_controller_program_formatter_bit_depth_reduction(
+ crtc,
+ &params);
+
+ /* call controller to set test pattern */
+ dal_controller_set_test_pattern(
+ dal_display_path_get_controller(
+ path_mode->display_path),
+ test_pattern,
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ break;
+ }
+ /* turn off test pattern mode */
+ case DP_TEST_PATTERN_VIDEO_MODE: {
+ /* restore bit depth reduction */
+ struct bit_depth_reduction_params params = {{ 0 } };
+
+ build_bit_depth_reduction_params(path_mode, &params);
+
+ dal_controller_program_formatter_bit_depth_reduction(
+ crtc,
+ &params);
+
+ /* reset test patterns on controller */
+ dal_controller_set_test_pattern(
+ dal_display_path_get_controller(
+ path_mode->display_path),
+ test_pattern,
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ break;
+ }
+ default:
+ BREAK_TO_DEBUGGER(); /* invalid test pattern */
+ }
+}
+
+enum hwss_result dal_hw_sequencer_enable_link(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in)
+{
+ return hws->funcs->hwss_enable_link(hws, in);
+}
+
+enum hwss_result dal_hw_sequencer_enable_link_base(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in)
+{
+ enum hwss_result ret = HWSS_RESULT_OK;
+ struct display_path *display_path = in->display_path;
+ const struct hw_path_mode *hw_path_mode = in->path_mode;
+ struct encoder *upstream_enc;
+ struct encoder *downstream_enc;
+
+ /* Skip link reprogramming when optimization requested */
+ if (!in->optimized_programming) {
+ /* enable video output */
+ struct encoder_output encoder_output = {
+ { ENGINE_ID_UNKNOWN } };
+ downstream_enc = dal_display_path_get_downstream_encoder(
+ in->display_path, in->link_idx);
+ if (downstream_enc != NULL) {
+ struct encoder_pre_enable_output_param
+ pre_enable_output_param = { { 0 } };
+ build_encoder_context(display_path, downstream_enc,
+ &pre_enable_output_param.ctx);
+ dal_memmove(&pre_enable_output_param.crtc_timing,
+ &in->timing,
+ sizeof(struct hw_crtc_timing));
+ dal_memmove(&pre_enable_output_param.link_settings,
+ &in->link_settings,
+ sizeof(struct link_settings));
+ dal_encoder_pre_enable_output(downstream_enc,
+ &pre_enable_output_param);
+ }
+
+ upstream_enc = dal_display_path_get_upstream_encoder(
+ in->display_path,
+ in->link_idx);
+ /* here we need to specify that encoder output settings
+ * need to be calculated as for the set mode,
+ * it will lead to querying dynamic link capabilities
+ * which should be done before enable output */
+ build_upstream_encoder_output(
+ hws,
+ in->link_idx,
+ hw_path_mode,
+ &in->link_settings,
+ BUILD_OPTION_SET_MODE,
+ &encoder_output);
+
+ if (dal_encoder_enable_output(upstream_enc, &encoder_output)
+ != ENCODER_RESULT_OK)
+ ret = HWSS_RESULT_ERROR;
+ }
+
+ return ret;
+}
+
+void dal_hw_sequencer_disable_link(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in)
+{
+ struct encoder *enc;
+ struct encoder_output encoder_output = { { ENGINE_ID_UNKNOWN } };
+ const struct hw_path_mode *hw_path_mode = in->path_mode;
+
+ /* Re-enable HPD if it is disabled */
+ enable_hpd(hws, in->display_path);
+
+ if (has_travis_or_nutmeg_encoder(in->display_path)) {
+ /* Travis and Nutmeg require us to wait for one frame */
+ struct controller *controller =
+ dal_display_path_get_controller(
+ in->display_path);
+
+ dal_controller_wait_for_vblank(controller);
+ }
+
+ enc = dal_display_path_get_upstream_encoder(
+ in->display_path,
+ in->link_idx);
+
+ build_upstream_encoder_output(
+ hws,
+ in->link_idx,
+ hw_path_mode,
+ &in->link_settings,
+ BUILD_OPTION_DISABLE,
+ &encoder_output);
+
+ dal_encoder_disable_output(enc, &encoder_output);
+}
+
+void dal_hw_sequencer_enable_stream(
+ struct hw_sequencer *hws,
+ const struct enable_stream_param *in)
+{
+ const struct hw_path_mode *hw_path_mode = in->path_mode;
+ struct display_path *display_path = in->display_path;
+ struct audio *audio;
+
+ if (in->link_idx == ASIC_LINK_INDEX) {
+ /* 1. update AVI info frame (HDMI, DP)
+ * we always need to update info frame
+ */
+ uint32_t active_total_with_borders;
+ uint32_t early_control = 0;
+ struct controller *controller =
+ dal_display_path_get_controller(display_path);
+
+ dal_hw_sequencer_update_info_frame(hw_path_mode);
+ /* enable early control to avoid corruption on DP monitor*/
+ active_total_with_borders =
+ in->timing.h_addressable
+ + in->timing.h_overscan_left
+ + in->timing.h_overscan_right;
+
+ if (in->link_settings.lane_count != LANE_COUNT_UNKNOWN) {
+ early_control = active_total_with_borders
+ % in->link_settings.lane_count;
+
+ if (early_control == 0)
+ early_control = in->link_settings.lane_count;
+ }
+ dal_controller_set_early_control(controller, early_control);
+ }
+
+ /* 3. enable audio only when set mode. */
+ audio = dal_display_path_get_audio(display_path, in->link_idx);
+ if (audio != NULL) {
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(hw_path_mode);
+ enum engine_id engine_id =
+ dal_hw_sequencer_get_engine_id(display_path);
+ dal_audio_enable_output(audio, engine_id, asic_signal,
+ in->link_settings.link_rate);
+ }
+
+}
+
+void dal_hw_sequencer_disable_stream(
+ struct hw_sequencer *hws,
+ const struct enable_stream_param *in)
+{
+ struct encoder *enc;
+ struct encoder_context enc_ctx;
+
+ /* update AVI info frame (HDMI, DP), cleanup HW registers */
+ enc = dal_display_path_get_upstream_encoder(
+ in->display_path,
+ in->link_idx);
+
+ build_encoder_context(in->display_path, enc, &enc_ctx);
+ dal_encoder_stop_info_frame(enc, &enc_ctx);
+}
+
+/**
+* dal_hw_sequencer_blank_stream
+*
+* @brief
+* blanks the output stream associated to the input display path on the
+* specified link
+*/
+void dal_hw_sequencer_blank_stream(
+ struct hw_sequencer *hws,
+ const struct blank_stream_param *in)
+{
+ struct encoder *enc;
+ struct encoder_context enc_ctx;
+
+ ASSERT(in && in->display_path);
+ enc = dal_display_path_get_upstream_encoder(in->display_path,
+ in->link_idx);
+ ASSERT(enc != NULL);
+ build_encoder_context(in->display_path, enc, &enc_ctx);
+ dal_encoder_blank(enc, &enc_ctx);
+}
+
+void dal_hw_sequencer_unblank_stream(
+ struct hw_sequencer *hws,
+ const struct blank_stream_param *in)
+{
+ struct encoder *enc;
+ struct encoder_unblank_param enc_unbl_param;
+
+ ASSERT(in != NULL && in->display_path != NULL);
+
+ enc = dal_display_path_get_upstream_encoder(
+ in->display_path,
+ in->link_idx);
+
+ ASSERT(enc != NULL);
+
+ build_encoder_context(in->display_path, enc, &enc_unbl_param.ctx);
+ dal_memmove(
+ &enc_unbl_param.crtc_timing,
+ &in->timing,
+ sizeof(struct hw_crtc_timing));
+ dal_memmove(
+ &enc_unbl_param.link_settings,
+ &in->link_settings,
+ sizeof(struct link_settings));
+ dal_encoder_unblank(enc, &enc_unbl_param);
+}
+
+static enum hwss_result hw_sequencer_pre_dce_clock_change(
+ const struct hw_sequencer *hws,
+ const struct minimum_clocks_calculation_result *min_clk_in,
+ enum clocks_state required_clocks_state,
+ struct power_to_dal_info *output)
+{
+ struct dal_to_power_info input;
+
+ if (false == hws->use_pp_lib) {
+ /* Usage of PPLib is disabled */
+ return HWSS_RESULT_NOT_SUPPORTED;
+ }
+
+ dal_memset(&input, 0, sizeof(input));
+
+ input.min_deep_sleep_sclk = min_clk_in->min_deep_sleep_sclk;
+ input.min_mclk = min_clk_in->min_mclk_khz;
+ input.min_sclk = min_clk_in->min_sclk_khz;
+
+ switch (required_clocks_state) {
+ case CLOCKS_STATE_ULTRA_LOW:
+ input.required_clock = PP_CLOCKS_STATE_ULTRA_LOW;
+ break;
+ case CLOCKS_STATE_LOW:
+ input.required_clock = PP_CLOCKS_STATE_LOW;
+ break;
+ case CLOCKS_STATE_NOMINAL:
+ input.required_clock = PP_CLOCKS_STATE_NOMINAL;
+ break;
+ case CLOCKS_STATE_PERFORMANCE:
+ input.required_clock = PP_CLOCKS_STATE_PERFORMANCE;
+ break;
+ default:
+ input.required_clock = PP_CLOCKS_STATE_NOMINAL;
+ break;
+ }
+
+ if (!dal_pp_pre_dce_clock_change(hws->dal_context, &input, output)) {
+ /*dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: dal_pp_pre_dce_clock_change failed!\n",
+ __func__);*/
+ return HWSS_RESULT_ERROR;
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+/* Notify PPLib about Clocks and Clock State values which WE need.
+ * PPLib will return the actual values (it will program in HW) which
+ * we'll use for stutter and display marks. */
+enum hwss_result dal_hw_sequencer_set_clocks_and_clock_state(
+ struct hw_sequencer *hws,
+ struct hw_global_objects *g_obj,
+ const struct minimum_clocks_calculation_result *min_clk_in,
+ enum clocks_state required_clocks_state)
+{
+ struct power_to_dal_info output;
+ struct bandwidth_mgr_clk_info bm_clk_info;
+
+ dal_memset(&output, 0, sizeof(output));
+
+ if (HWSS_RESULT_OK != hw_sequencer_pre_dce_clock_change(
+ hws,
+ min_clk_in,
+ required_clocks_state,
+ &output)) {
+ /* "output" was not updated by PPLib.
+ * DAL will use default values for set mode.
+ *
+ * Do NOT fail this call. */
+ return HWSS_RESULT_OK;
+ }
+
+ /* PPLib accepted the "clock state" that we need, that means we
+ * can store it as minimum state because PPLib guarantees not go below
+ * that state.
+ *
+ * Update the clock state here (prior to setting Pixel clock,
+ * DVO clock, or Display clock) */
+ if (!dal_display_clock_set_min_clocks_state(g_obj->dc,
+ required_clocks_state)) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: failed to set minimum clock state!\n",
+ __func__);
+ }
+
+ dal_memset(&bm_clk_info, 0, sizeof(bm_clk_info));
+
+ bm_clk_info.max_mclk_khz = output.max_mclk;
+ bm_clk_info.min_mclk_khz = output.min_mclk;
+ bm_clk_info.max_sclk_khz = output.max_sclk;
+ bm_clk_info.min_sclk_khz = output.min_sclk;
+
+ /* Now let Bandwidth Manager know about values we got from PPLib. */
+ dal_bandwidth_manager_set_dynamic_clock_info(g_obj->bm, &bm_clk_info);
+
+ return HWSS_RESULT_OK;
+}
+
+/**
+* perform HW blocks programming for each block in Chain to set the mode
+*
+* first we need to fill minimum clock parameters for the existent paths,
+* then each time setMode is called fill parameters for the programming path
+* to be passed to the next setMode
+*
+* \param path_set - display path set to operate on
+*/
+enum hwss_result dal_hw_sequencer_set_mode(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ uint32_t path_id = 0;
+ struct hwss_build_params *build_params = NULL;
+ union hwss_build_params_mask params_mask;
+ struct hw_global_objects g_obj = { NULL };
+ uint32_t paths_num;
+ enum clocks_state required_clocks_state;
+
+ if (!path_set)
+ return HWSS_RESULT_ERROR;
+
+ paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+
+ dal_hw_sequencer_get_global_objects(path_set, &g_obj);
+
+ /* Fill information for new acquired set of display paths. */
+ params_mask.all = PARAMS_MASK_ALL;
+ /* At this point we DO NOT have any information on Planes Config.
+ * That means we can NOT request Parameters for SCALING_TAPS and for
+ * LINE_BUFFER */
+ params_mask.bits.SCALING_TAPS = 0;
+ params_mask.bits.LINE_BUFFER = 0;
+
+ build_params = dal_hw_sequencer_prepare_path_parameters(
+ hws,
+ path_set,
+ params_mask,
+ false);
+
+ if (NULL == build_params) {
+ dal_logger_write(dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: prepare_path_parameters failed!\n",
+ __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* Set bit in scratch register to notify VBIOS that driver is under
+ * mode change sequence (critical state) and VBIOS will skip certain
+ * operations requested from other clients when this bit set. */
+ dal_bios_parser_set_scratch_critical_state(
+ dal_adapter_service_get_bios_parser(hws->as),
+ true);
+
+ /* Program stutter and other display marks to safe values to avoid
+ * corruption. */
+ hws->funcs->set_safe_displaymark(hws, path_set,
+ build_params->wm_input_params, build_params->params_num);
+
+ /* Note that 'build_params->min_clock_result' contains the minimum
+ * clock for mode set because 'params_mask.all = PARAMS_MASK_ALL'
+ * (which means params_mask.bits.MIN_CLOCKS == true).
+ *
+ * We need the minimum clocks
+ * because minimum clocks == maximum power efficiency */
+
+ /* Raise Required Clock State PRIOR to programming of state-dependent
+ * clocks. */
+ required_clocks_state = hws->funcs->get_required_clocks_state(hws,
+ g_obj.dc, path_set, &build_params->min_clock_result);
+
+ /* Call PPLib and Bandwidth Manager. */
+ if (true == hws->use_pp_lib &&
+ HWSS_RESULT_OK != dal_hw_sequencer_set_clocks_and_clock_state(
+ hws, &g_obj,
+ &build_params->min_clock_result,
+ required_clocks_state)) {
+ /* should never happen */
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* reset modes and turn off displays */
+ for (path_id = 0; path_id < paths_num; path_id++) {
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, path_id);
+
+ /* If we require full mode sequence, then reset path first. */
+ if (path_mode->action == HW_PATH_ACTION_RESET ||
+ path_mode->action_flags.TIMING_CHANGED) {
+ reset_path_mode_back_end(hws, path_set, path_id);
+ }
+ }
+
+ /* PPLib takes care of Mclk and Sclk.
+ * We program Dclk here. */
+ hws->funcs->set_display_clock(hws, path_set,
+ &build_params->min_clock_result);
+
+ /* set new modes */
+ for (path_id = 0; path_id < paths_num; path_id++) {
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, path_id);
+
+ switch (path_mode->action) {
+ case HW_PATH_ACTION_SET:
+ set_path_mode_back_end(
+ hws, path_set, path_id, build_params);
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Setup audio rate clock source */
+ hws->funcs->setup_audio_wall_dto(hws, path_set, build_params);
+
+ /* Up to this point we are using 'safe' marks, which are not
+ * power-efficient.
+ * Program stutter and other display marks to values provided
+ * by PPLib. */
+ hws->funcs->set_displaymark(hws, path_set,
+ build_params->wm_input_params,
+ build_params->params_num);
+
+ if (true == hws->use_pp_lib) {
+ /* Let PPLib know that we are using the clocks it
+ * provided to us. */
+ if (!dal_pp_post_dce_clock_change(dal_context)) {
+ /*dal_logger_write(dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: dal_pp_post_dce_clock_change() failed!\n",
+ __func__);*/
+ }
+ }
+
+ dal_hw_sync_control_inter_path_synchronize(hws->sync_control, path_set);
+
+ /* reset critical state bit in scratch register (end of mode change) */
+ dal_bios_parser_set_scratch_critical_state(
+ dal_adapter_service_get_bios_parser(hws->as),
+ false);
+
+ /* release allocated memory */
+ dal_hw_sequencer_free_path_parameters(build_params);
+
+ return HWSS_RESULT_OK;
+}
+
+static void program_fmt(
+ const struct hw_path_mode *path_mode)
+{
+ /* dithering is affected by <CrtcSourceSelect>, hence should be
+ * programmed afterwards */
+ bool program_fmt;
+ struct clamping_and_pixel_encoding_params fmt_clamping;
+ struct bit_depth_reduction_params fmt_bit_depth = {{ 0 } };
+ struct controller *crtc =
+ dal_display_path_get_controller(path_mode->display_path);
+
+ program_fmt = build_bit_depth_reduction_params(
+ path_mode, &fmt_bit_depth);
+
+ if (program_fmt)
+ dal_controller_program_formatter_bit_depth_reduction(
+ crtc,
+ &fmt_bit_depth);
+
+ /* set pixel encoding based on adjustment of pixel format if any! */
+ dal_memset(&fmt_clamping, 0, sizeof(fmt_clamping));
+ program_fmt = setup_pixel_encoding(path_mode, &fmt_clamping);
+
+ if (!program_fmt)
+ return;
+
+ dal_controller_program_formatter_clamping_and_pixel_encoding(
+ crtc,
+ &fmt_clamping);
+}
+
+static void program_adjustments(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ struct hwss_build_params *build_params,
+ enum color_space color_space,
+ uint32_t path_id,
+ struct controller *crtc)
+{
+ /*
+ * TODO should we look at plane_config instead of path_mode?
+ */
+
+ struct hw_adjustment_set *adjustment_set = path_mode->adjustment_set;
+
+ struct hw_adjustment_color_control *color_adjustment = NULL;
+ struct hw_adjustment_gamma_ramp *gamma_adjustment = NULL;
+
+ struct hw_adjustment_value *v_sync_adjustment = NULL;
+ struct hw_adjustment_value *h_sync_adjustment = NULL;
+ struct hw_adjustment_value *composite_sync_adjustment = NULL;
+ struct hw_adjustment_value *backlight_adjustment = NULL;
+ struct hw_adjustment_value *vb_level_adjustment = NULL;
+
+ /* TODO: using 0 plane for now always */
+ uint32_t plane_id = 0;
+
+ if (adjustment_set) {
+ gamma_adjustment = adjustment_set->gamma_ramp;
+ color_adjustment = adjustment_set->color_control;
+
+ /* could be zero's, only for CRT non zero's */
+ v_sync_adjustment = adjustment_set->v_sync;
+ h_sync_adjustment = adjustment_set->h_sync;
+ composite_sync_adjustment = adjustment_set->composite_sync;
+ backlight_adjustment = adjustment_set->backlight;
+ vb_level_adjustment = adjustment_set->vb_level;
+ }
+
+ if (gamma_adjustment != NULL) {
+ /* user gamma */
+ dal_hw_sequencer_set_gamma_ramp_adjustment(
+ hws, path_mode->display_path, gamma_adjustment);
+ } else {
+ /* default gamma */
+ dal_controller_set_default_gamma(
+ crtc,
+ path_mode->mode.pixel_format);
+ }
+ if (color_adjustment != NULL) {
+ /* user colors */
+ color_adjustment->lb_color_depth =
+ dal_hw_sequencer_translate_to_lb_color_depth(
+ build_params->
+ line_buffer_params[path_id][plane_id].depth);
+ dal_hw_sequencer_set_color_control_adjustment(
+ hws,
+ crtc,
+ color_adjustment);
+ } else {
+ /* default colors */
+ struct default_adjustment default_adjust;
+
+ dal_memset(&default_adjust, 0, sizeof(default_adjust));
+
+ default_adjust.force_hw_default = false;
+ default_adjust.color_space = color_space;
+ default_adjust.csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ default_adjust.surface_pixel_format =
+ path_mode->mode.pixel_format;
+ /* display color depth */
+ default_adjust.color_depth =
+ dal_hw_sequencer_translate_to_csc_color_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ /* Lb color depth */
+ default_adjust.lb_color_depth =
+ dal_hw_sequencer_translate_to_lb_color_depth(
+ build_params->
+ line_buffer_params[path_id][plane_id].depth);
+ dal_controller_set_grph_csc_default(
+ crtc,
+ &default_adjust);
+ }
+
+ dal_controller_set_input_csc(crtc, color_space);
+
+ /* the adjustments routines check for NULL hw_adjustment*,
+ * so no problem! */
+ dal_hw_sequencer_set_vertical_sync_adjustment(
+ hws,
+ path_mode->display_path,
+ v_sync_adjustment);
+ dal_hw_sequencer_set_horizontal_sync_adjustment(
+ hws,
+ path_mode->display_path,
+ h_sync_adjustment);
+ dal_hw_sequencer_set_composite_sync_adjustment(
+ hws,
+ path_mode->display_path,
+ composite_sync_adjustment);
+ dal_hw_sequencer_set_backlight_adjustment(
+ hws,
+ path_mode->display_path,
+ backlight_adjustment);
+}
+
+static void program_encoder_and_audio(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ struct display_path_objects *objs,
+ struct hwss_build_params *build_params,
+ enum engine_id engine_id,
+ uint32_t path_id)
+{
+ /* if pixel format changed on HDMI we should update gamut packet
+ * information for example when you go from A888 to XR_BIAS or FP16
+ */
+ bool is_hdmi = false;
+
+ if (path_mode->action_flags.GAMUT_CHANGED) {
+ enum dcs_edid_connector_type conn_type =
+ dal_dcs_get_connector_type(
+ dal_display_path_get_dcs(
+ path_mode->display_path));
+
+ if (conn_type == EDID_CONNECTOR_HDMIA)
+ is_hdmi = true;
+ }
+
+ if (is_hdmi || path_mode->action_flags.TIMING_CHANGED ||
+ path_mode->action_flags.PIXEL_ENCODING_CHANGED) {
+ /* setup Encoder - prepare enable encoder parameters */
+ struct encoder_output encoder_output;
+
+ build_encoder_output(path_mode, BUILD_OPTION_ENABLE_UPSTREAM,
+ &encoder_output);
+
+ /* setup Encoder */
+ dal_encoder_setup(objs->upstream_encoder, &encoder_output);
+ if (objs->downstream_encoder != NULL)
+ dal_encoder_setup(
+ objs->downstream_encoder,
+ &encoder_output);
+
+ /* setup Audio */
+ if (objs->audio != NULL) {
+ struct audio_output audio_output;
+ struct audio_info *audio_info = NULL;
+
+ dal_hw_sequencer_build_audio_output(
+ hws,
+ path_mode,
+ engine_id,
+ &build_params->pll_settings_params[path_id],
+ &audio_output);
+
+ audio_info = build_audio_info(hws, path_mode);
+
+ ASSERT(audio_info != NULL);
+ if (audio_info != NULL) {
+ /* setup audio */
+ dal_audio_setup(objs->audio,
+ &audio_output, audio_info);
+ dal_free(audio_info);
+ }
+
+ }
+ }
+}
+
+static void reprogram_crtc_and_pll(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ struct hwss_build_params *build_params,
+ struct hw_crtc_timing *hw_crtc_timing,
+ struct bandwidth_manager *bm,
+ struct controller *controller,
+ uint32_t path_id)
+{
+ if (path_mode->action_flags.TIMING_CHANGED) {
+ enum controller_id id = dal_controller_get_id(controller);
+
+ /* program PLL */
+ struct pixel_clk_params pixel_clk_params;
+
+ dal_memset(&pixel_clk_params, 0, sizeof(pixel_clk_params));
+ dal_hw_sequencer_get_pixel_clock_parameters(
+ path_mode, &pixel_clk_params);
+
+ pixel_clk_params.flags.PROGRAM_PIXEL_CLOCK = true;
+
+ dal_clock_source_program_pix_clk(
+ dal_display_path_get_clock_source(
+ path_mode->display_path),
+ &pixel_clk_params,
+ &build_params->pll_settings_params[path_id]);
+
+ /* program CRTC with the original timing and setup stereo mixer
+ * while CRTC is off (does nothing on regular path) */
+ hws->funcs->setup_timing_and_blender(
+ hws, controller, path_mode, hw_crtc_timing);
+
+ dal_bandwidth_manager_allocate_dmif_buffer(bm,
+ id, build_params->params_num,
+ build_params->bandwidth_params);
+
+ dal_controller_enable_timing_generator(controller);
+ } else
+ /* Partial setMode sequence - only need to blank CRTC */
+ dal_hw_sequencer_disable_memory_requests(hws, path_mode);
+}
+
+DAL_VECTOR_AT_INDEX(plane_configs, const struct plane_config *)
+
+static void program_scaler(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode,
+ const struct hw_crtc_timing *hw_crtc_timing,
+ struct hwss_build_params *build_params,
+ struct controller *controller,
+ struct bandwidth_manager *bm,
+ uint32_t path_id,
+ uint32_t plane_id,
+ enum color_space color_space)
+{
+ struct scaler_data scaler_data;
+ const struct plane_config *plane_config = NULL;
+ struct scaling_tap_info *taps;
+
+ dal_memset(&scaler_data, 0, sizeof(scaler_data));
+ scaler_data.hw_crtc_timing = hw_crtc_timing;
+ taps = &build_params->scaling_taps_params[path_id][plane_id];
+
+ if (path_mode->plane_configs != NULL)
+ plane_config = plane_configs_vector_at_index(
+ path_mode->plane_configs,
+ plane_id);
+
+ if (plane_config == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: plane_config is NULL!\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+
+ dal_hw_sequencer_build_scaler_parameter_plane(
+ plane_config,
+ taps,
+ &scaler_data,
+ true, /*program viewport*/
+ true, /*program_alpha*/
+ false); /*unlock scaler*/
+
+ hws->funcs->setup_line_buffer_pixel_depth(
+ hws,
+ controller,
+ build_params->line_buffer_params[path_id][plane_id].depth,
+ false);
+
+ /* ADJUST WIDTH FOR WIDE DISPLAY try to enable line buffer power gating
+ * feature */
+ dal_hw_sequencer_enable_line_buffer_power_gating(
+ dal_controller_get_line_buffer(controller),
+ dal_controller_get_id(controller),
+ scaler_data.pixel_type,
+ path_mode->mode.scaling_info.src.width,
+ path_mode->mode.scaling_info.dst.width,
+ &build_params->scaling_taps_params[path_id][plane_id],
+ build_params->line_buffer_params[path_id][plane_id].depth,
+ path_mode->mode.scaling_info.src.height,
+ path_mode->mode.scaling_info.dst.height,
+ path_mode->mode.timing.flags.INTERLACED);
+
+ dal_bandwidth_manager_setup_pipe_max_request(bm,
+ dal_controller_get_id(controller),
+ &build_params->bandwidth_params->color_info);
+
+ dal_controller_set_overscan_color_black(controller, color_space);
+
+ dal_controller_set_scaler_wrapper(controller, &scaler_data);
+ dal_controller_update_viewport(
+ controller,
+ &scaler_data.viewport,
+ false);
+}
+
+
+
+static enum hwss_result set_path_mode_back_end(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t path_id,
+ struct hwss_build_params *build_params)
+{
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(path_set, path_id);
+
+ struct display_path_objects display_path_objs;
+ struct hw_global_objects global_objs = { NULL };
+ enum color_space color_space;
+ struct display_path *display_path = path_mode->display_path;
+ const struct hw_mode_info *mode_info = &path_mode->mode;
+ enum engine_id engine_id = dal_hw_sequencer_get_engine_id(display_path);
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+ struct controller *controller =
+ dal_display_path_get_controller(display_path);
+
+ if (engine_id == ENGINE_ID_UNKNOWN)
+ return HWSS_RESULT_ERROR;
+
+ /* Extract global objects */
+ dal_hw_sequencer_get_global_objects(path_set, &global_objs);
+
+ /* Full setMode sequence (path was already reset in resetPathMode) */
+ reprogram_crtc_and_pll(
+ hws,
+ path_mode,
+ build_params,
+ (struct hw_crtc_timing *)&mode_info->timing,
+ global_objs.bm,
+ controller, path_id);
+
+ /* Program dynamic refresh rate - simply write a few CRTC registers
+ * (no BIOS involvement). Must be done after CRTC programmed. Also needs
+ * to be programmed for case where optimization skips full timing
+ * reprogramming. */
+ dal_controller_program_drr(
+ controller, &mode_info->timing.ranged_timing);
+
+ /* program Scaler */
+ color_space = translate_to_color_space(
+ path_mode->mode.color_space);
+
+ dal_hw_sequencer_get_objects(
+ path_mode->display_path, &display_path_objs);
+
+ {
+ /* select CRTC source for encoder */
+ struct bp_crtc_source_select crtc_source_select;
+
+ dal_memset(&crtc_source_select, 0, sizeof(crtc_source_select));
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.controller_id =
+ dal_controller_get_id(controller);
+ crtc_source_select.signal = asic_signal;
+ crtc_source_select.enable_dp_audio =
+ display_path_objs.audio != NULL;
+ crtc_source_select.sink_signal = dal_get_signal(path_mode);
+ crtc_source_select.display_output_bit_depth =
+ translate_to_display_output_bit_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+
+ /* call VBIOS table to set CRTC source for the HW encoder block
+ * note: video bios clears all FMT setting here. */
+ dal_bios_parser_crtc_source_select(
+ dal_adapter_service_get_bios_parser(hws->as),
+ &crtc_source_select);
+ }
+
+ /* deep color enable for HDMI, set FMAT */
+ dal_controller_formatter_set_dyn_expansion(
+ controller,
+ color_space,
+ translate_to_color_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH),
+ asic_signal);
+
+ {
+ /* Setup Engine Stereosync - stereosync select should always
+ * match source select */
+ struct encoder_3d_setup enconder_3d_setup = {
+ ENGINE_ID_UNKNOWN };
+ enconder_3d_setup.engine = engine_id;
+ enconder_3d_setup.source =
+ dal_controller_get_sync_source(controller);
+ enconder_3d_setup.flags.bits.SETUP_SYNC_SOURCE = true;
+ dal_encoder_setup_stereo(
+ display_path_objs.upstream_encoder, &enconder_3d_setup);
+
+ if (display_path_objs.downstream_encoder != NULL)
+ dal_encoder_setup_stereo(
+ display_path_objs.downstream_encoder,
+ &enconder_3d_setup);
+ }
+
+ program_fmt(path_mode);
+
+ program_encoder_and_audio(
+ hws,
+ path_mode,
+ &display_path_objs,
+ build_params,
+ engine_id,
+ path_id);
+
+ return HWSS_RESULT_OK;
+}
+
+/**
+ * reset_path_mode_back_end
+ *
+ * @brief
+ * reset (disable) all HW blocks for the specified path
+ *
+ */
+static enum hwss_result reset_path_mode_back_end(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ uint32_t path_id)
+{
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(set, path_id);
+ struct display_path *display_path = path_mode->display_path;
+ struct controller *controller =
+ dal_display_path_get_controller(display_path);
+ /* Extract global objects */
+ struct hw_global_objects global_objs = { NULL };
+
+ if (dal_hw_sequencer_get_engine_id(display_path) == ENGINE_ID_UNKNOWN)
+ return HWSS_RESULT_ERROR;
+
+ dal_hw_sequencer_get_global_objects(set, &global_objs);
+
+ dal_hw_sequencer_disable_memory_requests(hws, path_mode);
+
+ dal_controller_disable_timing_generator(controller);
+
+ dal_bandwidth_manager_deallocate_dmif_buffer(
+ global_objs.bm,
+ dal_controller_get_id(controller),
+ dal_hw_path_mode_set_get_paths_number(set));
+
+ dal_controller_set_scaler_bypass(controller);
+
+ dal_controller_disable_stereo_mixer(controller);
+
+ return HWSS_RESULT_OK;
+}
+
+void dal_hw_sequencer_destroy(struct hw_sequencer **hws)
+{
+ if (!hws || !*hws) {
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ (*hws)->funcs->destroy(hws);
+
+ *hws = NULL;
+}
+
+/* Derived objects call this function to do common initialization. */
+bool dal_hw_sequencer_construct_base(
+ struct hw_sequencer *hws,
+ struct hws_init_data *init_data)
+{
+ if (!init_data->as || !init_data->dal_context) {
+ /* TODO: call Logger with an error message. */
+ return false;
+ }
+
+ hws->as = init_data->as;
+ hws->dal_context = init_data->dal_context;
+
+ hws->use_pp_lib = dal_adapter_service_is_feature_supported(
+ FEATURE_USE_PPLIB);
+
+ return true;
+}
+
+struct hw_sequencer *dal_hw_sequencer_create(struct hws_init_data *init_data)
+{
+ struct hw_sequencer *hws = NULL;
+
+ switch (dal_adapter_service_get_dce_version(init_data->as)) {
+#if defined(CONFIG_DRM_AMD_DAL_DCE11_0)
+ case DCE_VERSION_11_0:
+ hws = dal_hw_sequencer_dce110_create(init_data);
+ break;
+#endif
+ default:
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ return hws;
+}
+
+enum signal_type dal_hw_sequencer_detect_sink(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ enum signal_type signal =
+ dal_display_path_get_config_signal(
+ display_path, SINK_LINK_INDEX);
+
+ /* the signal in the display path here is the one from reported by
+ * connector for analog signals we will report the same signal as in
+ * display path
+ */
+ switch (signal) {
+ case SIGNAL_TYPE_RGB:
+ case SIGNAL_TYPE_YPBPR:
+ case SIGNAL_TYPE_SCART:
+ case SIGNAL_TYPE_COMPOSITE:
+ case SIGNAL_TYPE_SVIDEO:
+ return signal;
+ default:
+ break;
+ }
+
+ /* when calling detect_sink, we should pass in the downstream object.
+ * This is not always the connector. But here we are ok to call
+ * detect_sink with connect_object_id since we are performing this for
+ * the SINK_LINK_INDEX.
+ * The downstream object in this case will always be the connector
+ * object.
+ */
+ return dal_encoder_detect_sink(
+ dal_display_path_get_upstream_encoder(
+ display_path, SINK_LINK_INDEX),
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(display_path)));
+}
+
+/**
+ * dal_hws_detect_load
+ *
+ * detect load on the sink, if non-destructive requested and detection is
+ * destructive skip it and return false
+ *
+ */
+enum signal_type dal_hw_sequencer_detect_load(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ struct display_path_objects objs;
+ struct encoder *encoder;
+ struct encoder_context context;
+
+ dal_hw_sequencer_get_objects(display_path, &objs);
+
+ encoder = objs.downstream_encoder != NULL ?
+ objs.downstream_encoder : objs.upstream_encoder;
+
+ build_encoder_context(display_path, encoder, &context);
+
+ return dal_encoder_detect_load(encoder, &context);
+}
+
+bool dal_hw_sequencer_is_sink_present(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+ /* When calling is_sink_present, we should pass in the downstream
+ * object. We are doing detection on the encoder closest to the
+ * connector by specifying the SINK_LINK_INDEX. The downstream object in
+ * this case will always be the connector object.
+ */
+ return dal_encoder_is_sink_present(
+ dal_display_path_get_upstream_encoder(
+ display_path, SINK_LINK_INDEX),
+ dal_connector_get_graphics_object_id(
+ dal_display_path_get_connector(display_path)));
+}
+
+void dal_hw_sequencer_program_drr(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode)
+{
+ struct controller *controller =
+ dal_display_path_get_controller(path_mode->display_path);
+
+ dal_controller_program_drr(
+ controller, &path_mode->mode.timing.ranged_timing);
+}
+
+void dal_hw_sequencer_psr_setup(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ const struct psr_caps *psr_caps)
+{
+}
+
+void dal_hw_sequencer_psr_enable(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+}
+
+/**
+ * Validate a set of display path modes including scaler and bandwidth in the
+ * multi-or-single display configuration.
+ *
+ * Assume static validation (by dal_hw_sequencer_validate_display_path_mode())
+ * passed, and will not be performed here. The idea is that modes which can't
+ * be supported "statically" are already filtered out (by timing service).
+ */
+enum hwss_result dal_hw_sequencer_validate_display_hwpms(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set)
+{
+ const struct hw_path_mode *path_mode;
+ struct controller *controller = NULL;
+ enum hwss_result result = HWSS_RESULT_OK;
+ struct hwss_build_params *build_params = NULL;
+ union hwss_build_params_mask params_mask;
+
+ if (path_set == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: invalid input!\n", __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* We need a Controller for Bandwidth Manager input.
+ * Get Controller from any path (first one is good enough). */
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, 0);
+ if (path_mode == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: path mode set is empty!\n", __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ if (path_mode->display_path == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: no Display Path in path mode!\n",
+ __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ controller = dal_display_path_get_controller(path_mode->display_path);
+ if (controller == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: no controller on the Path!\n", __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ /*********************************************
+ validate scaler
+ *********************************************/
+ /* For validate Path mode, we don't need PLL settings, the PLL setting
+ * is not being used here at all. */
+ /* Note:GetPLLDivider() will call video BIOS to adjustPLLRate. */
+ params_mask.all = 0;
+ params_mask.bits.BANDWIDTH = true;
+
+ /* fill information for active set of display paths */
+ build_params =
+ dal_hw_sequencer_prepare_path_parameters(
+ hws,
+ path_set,
+ params_mask,
+ true);
+
+ if (NULL == build_params)
+ return HWSS_RESULT_ERROR;
+
+ /**************************************************
+ bandwidth validation
+ **************************************************/
+ if (!validate_video_memory_bandwidth(
+ controller,
+ build_params->params_num,
+ build_params->bandwidth_params))
+ result = HWSS_RESULT_NO_BANDWIDTH;
+
+ /* release allocated memory */
+ dal_hw_sequencer_free_path_parameters(build_params);
+
+ return result;
+}
+
+void dal_hw_sequencer_psr_disable(
+ struct hw_sequencer *hws,
+ struct display_path *display_path)
+{
+}
+
+
+enum hwss_result dal_hw_sequencer_set_safe_displaymark(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set)
+{
+ struct hwss_build_params *build_params = NULL;
+ union hwss_build_params_mask params_mask;
+
+ if (!path_set)
+ return HWSS_RESULT_ERROR;
+
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_INTERFACE_TRACE,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: Setting safe display mark\n", __func__);
+
+ /* Fill information for new acquired set of display paths.
+ * For SetMode we need ALL parameters. */
+ params_mask.all = 0;
+ params_mask.bits.WATERMARK = true;
+ params_mask.bits.BANDWIDTH = true;
+
+ /* TODO: do we really need to calculate anything for SAFE marks?
+ * If not, we don't need to "prepare parameters" too. */
+ build_params =
+ dal_hw_sequencer_prepare_path_parameters(
+ hws,
+ path_set,
+ params_mask,
+ false);
+
+ if (NULL == build_params) {
+ dal_logger_write(hws->dal_context->logger, LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: failed to prepare HWSS parameters\n",
+ __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* Program stutter and other display marks to safe values to avoid
+ * corruption. */
+ hws->funcs->set_safe_displaymark(hws, path_set,
+ build_params->wm_input_params,
+ build_params->params_num);
+
+ dal_hw_sequencer_free_path_parameters(build_params);
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_displaymark(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set)
+{
+ struct hwss_build_params *build_params = NULL;
+ union hwss_build_params_mask params_mask;
+
+ if (!path_set)
+ return HWSS_RESULT_ERROR;
+
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_INTERFACE_TRACE,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: Setting real display mark\n", __func__);
+
+ /* Fill information for new acquired set of display paths.
+ * We should only need watermark and bandwidth parameters for
+ * setting display marks */
+ params_mask.all = 0;
+ params_mask.bits.WATERMARK = true;
+ params_mask.bits.BANDWIDTH = true;
+
+ build_params = dal_hw_sequencer_prepare_path_parameters(
+ hws, path_set, params_mask, false);
+
+ if (NULL == build_params) {
+ dal_logger_write(hws->dal_context->logger, LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: failed to prepare path parameters!\n",
+ __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ /* Program actual display marks from values cached in bandwidth manager
+ * gpu dynamic clock info. (see dal_gpu_update_dynamic_clock_info) */
+ hws->funcs->set_displaymark(hws, path_set,
+ build_params->wm_input_params,
+ build_params->params_num);
+
+ dal_hw_sequencer_free_path_parameters(build_params);
+
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_bit_depth_reduction_adj(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ union hw_adjustment_bit_depth_reduction *bit_depth)
+{
+ struct dcs *dcs = dal_display_path_get_dcs(disp_path);
+ enum signal_type signal = dal_display_path_get_config_signal(
+ disp_path, SINK_LINK_INDEX);
+ struct bit_depth_reduction_params fmt_bit_depth = {{ 0 } };
+
+ if (!bit_depth)
+ return HWSS_RESULT_ERROR;
+
+ if (disp_path != NULL && dcs != NULL) {
+ if (dal_dcs_get_enabled_packed_pixel_format(dcs) !=
+ DCS_PACKED_PIXEL_FORMAT_NOT_PACKED)
+ return HWSS_RESULT_ERROR;
+ }
+ if ((dal_is_dvi_signal(signal) &&
+ dal_adapter_service_is_feature_supported(
+ FEATURE_TMDS_DISABLE_DITHERING)) ||
+ (dal_is_dp_signal(signal) &&
+ dal_adapter_service_is_feature_supported(
+ FEATURE_DP_DISABLE_DITHERING)) ||
+ (dal_is_hdmi_signal(signal) &&
+ dal_adapter_service_is_feature_supported(
+ FEATURE_HDMI_DISABLE_DITHERING)) ||
+ (dal_is_embedded_signal(signal) &&
+ dal_adapter_service_is_feature_supported(
+ FEATURE_EMBEDDED_DISABLE_DITHERING)) ||
+ (signal == SIGNAL_TYPE_WIRELESS))
+ return HWSS_RESULT_ERROR;
+
+ fmt_bit_depth.flags.TRUNCATE_ENABLED =
+ bit_depth->bits.TRUNCATE_ENABLED;
+ fmt_bit_depth.flags.TRUNCATE_DEPTH =
+ bit_depth->bits.TRUNCATE_DEPTH;
+ fmt_bit_depth.flags.TRUNCATE_MODE =
+ bit_depth->bits.TRUNCATE_MODE;
+
+ fmt_bit_depth.flags.SPATIAL_DITHER_ENABLED =
+ bit_depth->bits.SPATIAL_DITHER_ENABLED;
+ fmt_bit_depth.flags.SPATIAL_DITHER_DEPTH =
+ bit_depth->bits.SPATIAL_DITHER_DEPTH;
+ fmt_bit_depth.flags.SPATIAL_DITHER_MODE =
+ bit_depth->bits.SPATIAL_DITHER_MODE;
+ fmt_bit_depth.flags.RGB_RANDOM =
+ bit_depth->bits.RGB_RANDOM;
+ fmt_bit_depth.flags.FRAME_RANDOM =
+ bit_depth->bits.FRAME_RANDOM;
+ fmt_bit_depth.flags.HIGHPASS_RANDOM =
+ bit_depth->bits.HIGHPASS_RANDOM;
+
+ fmt_bit_depth.flags.FRAME_MODULATION_ENABLED =
+ bit_depth->bits.FRAME_MODULATION_ENABLED;
+ fmt_bit_depth.flags.FRAME_MODULATION_DEPTH =
+ bit_depth->bits.FRAME_MODULATION_DEPTH;
+ fmt_bit_depth.flags.TEMPORAL_LEVEL =
+ bit_depth->bits.TEMPORAL_LEVEL;
+ fmt_bit_depth.flags.FRC25 =
+ bit_depth->bits.FRC_25;
+ fmt_bit_depth.flags.FRC50 =
+ bit_depth->bits.FRC_50;
+ fmt_bit_depth.flags.FRC75 =
+ bit_depth->bits.FRC_75;
+
+ dal_controller_program_formatter_bit_depth_reduction(
+ dal_display_path_get_controller(disp_path),
+ &fmt_bit_depth);
+
+ return HWSS_RESULT_OK;
+}
+
+void dal_hw_sequencer_enable_wireless_idle_detection(
+ struct hw_sequencer *hws,
+ bool enable)
+{
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "dal_hw_sequencer_enable_wireless_idle_detection is empty.");
+}
+
+static void program_alpha_mode(
+ struct controller *crtc,
+ const struct plane_blend_flags *blend_flags,
+ const enum hw_pixel_encoding pixel_encoding)
+{
+ struct alpha_mode_cfg blending_config;
+ bool alpha_enable;
+
+ dal_memset(&blending_config, 0, sizeof(struct alpha_mode_cfg));
+
+ /* GLOBAL_ALPHA_BLEND is not currently used, meaning that
+ * PER_PIXEL_ALPHA_BLEND is our only used case. */
+ if (blend_flags->bits.PER_PIXEL_ALPHA_BLEND == 1) {
+ blending_config.flags.bits.MODE_IS_SET = 1;
+ blending_config.mode = ALPHA_MODE_PIXEL;
+ /* TODO we need to understand why MODE_MULTIPLIED bits
+ * are set. These seem to be the root of
+ * color corruption on HDMI.
+ * We'll only set these for RGB for now to avoid color
+ * corruption with YCbCr outputs.
+ */
+ if (pixel_encoding == HW_PIXEL_ENCODING_RGB) {
+ blending_config.flags.bits.MODE_MULTIPLIED_IS_SET = 1;
+ blending_config.flags.bits.MULTIPLIED_MODE = 1;
+ }
+ }
+
+ alpha_enable = dal_controller_program_alpha_blending(
+ crtc,
+ &blending_config);
+
+ dal_line_buffer_enable_alpha(
+ dal_controller_get_line_buffer(crtc),
+ alpha_enable);
+}
+
+static enum hwss_result set_path_mode_front_end(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode,
+ uint32_t hw_path_mode_id,
+ uint32_t plane_id,
+ struct display_path_plane *plane,
+ struct hwss_build_params *build_params)
+{
+ enum color_space color_space;
+ const struct hw_mode_info *mode_info = &path_mode->mode;
+ struct controller *crtc = plane->controller;
+ struct controller *root_controller =
+ dal_display_path_get_controller(path_mode->display_path);
+ struct vector *plane_configs = path_mode->plane_configs;
+ const struct plane_config *pl_cfg = NULL;
+
+ dal_bandwidth_manager_program_pix_dur(
+ dal_controller_get_bandwidth_manager(crtc),
+ dal_controller_get_id(crtc),
+ path_mode->mode.timing.pixel_clock);
+
+ /* EPR #: 412143 - [AOSP][LL] 4K single DP light up failed
+ * This call to enable_advanced_request is added for MPO. However, it
+ * causes this EPR. The real root cause is unknown. For now, we just
+ * comment it out.
+ * TODO: Investigate this issue and find the real root cause. */
+ /* dal_controller_set_advanced_request(crtc,
+ true, &path_mode->mode.timing); */
+
+ dal_controller_program_blanking(crtc, &path_mode->mode.timing);
+
+ dal_controller_set_fe_clock(crtc, true);
+
+ if (crtc != root_controller) {
+ uint32_t display_clock = 643000;
+
+ dal_display_clock_set_clock(
+ dal_controller_get_display_clock(crtc), display_clock);
+
+ {
+ struct watermark_input_params params;
+
+ params.controller_id = dal_controller_get_id(crtc);
+
+ /*
+ * TODO setting pixel_format here as a temporary fix
+ * to force max watermarks. Fix properly once video
+ * playback through underlay is working.
+ */
+ params.surface_pixel_format = PIXEL_FORMAT_420BPP12;
+ dal_bandwidth_manager_program_watermark(
+ dal_controller_get_bandwidth_manager(crtc),
+ 1,
+ &params, display_clock);
+ dal_bandwidth_manager_program_display_mark(
+ dal_controller_get_bandwidth_manager(crtc),
+ 1,
+ &params,
+ display_clock);
+ }
+ }
+
+ if (plane_configs)
+ pl_cfg = plane_configs_vector_at_index(
+ plane_configs,
+ plane_id);
+
+
+ color_space = translate_to_color_space(
+ path_mode->mode.color_space);
+
+ program_adjustments(
+ hws,
+ path_mode,
+ build_params,
+ color_space,
+ hw_path_mode_id,
+ crtc);
+
+ /* program Scaler */
+ program_scaler(
+ hws,
+ path_mode,
+ &mode_info->timing,
+ build_params,
+ crtc,
+ dal_controller_get_bandwidth_manager(crtc),
+ hw_path_mode_id,
+ plane_id,
+ color_space);
+
+ dal_controller_set_blender_mode(
+ crtc,
+ plane->blnd_mode);
+
+ if (pl_cfg) {
+ program_alpha_mode(
+ crtc,
+ &pl_cfg->attributes.blend_flags,
+ path_mode->mode.timing.flags.PIXEL_ENCODING);
+
+ dal_controller_program_surface_config(
+ crtc,
+ &pl_cfg->config);
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+static void configure_locking(struct display_path *dp, bool enable)
+{
+ uint8_t i;
+ uint8_t num_planes = dal_display_path_get_number_of_planes(dp);
+ struct display_path_plane *root_plane =
+ dal_display_path_get_plane_at_index(dp, 0);
+
+ /* main controller should be in mode 0 (master pipe) */
+ dal_controller_pipe_control_lock(
+ root_plane->controller,
+ PIPE_LOCK_CONTROL_MODE,
+ false);
+
+ /* other contoller's mode should be 1 in order to make atomic
+ * locking/unlocking take effect. Please, refer to pseudocode for
+ * locks in BLND.doc. */
+ for (i = 1; i < num_planes; ++i) {
+ struct display_path_plane *plane =
+ dal_display_path_get_plane_at_index(dp, i);
+ struct controller *crtc = plane->controller;
+
+ dal_controller_pipe_control_lock(
+ crtc,
+ PIPE_LOCK_CONTROL_MODE,
+ enable);
+ }
+}
+
+/**
+ * Update the cursor position.
+ *
+ * Need to call dal_hw_sequencer_set_cursor_attributes first to program
+ * cursor surface address.
+ */
+enum hwss_result dal_hw_sequencer_set_cursor_position(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ const struct cursor_position *position)
+{
+ struct controller *crtc = dal_display_path_get_controller(dp);
+
+ /* TODO implement functionality for paired controllers when needed */
+
+ if (crtc && dal_controller_set_cursor_position(crtc, position))
+ return HWSS_RESULT_OK;
+
+ return HWSS_RESULT_ERROR;
+}
+
+/**
+ * Update the cursor attributes and set cursor surface address
+ */
+enum hwss_result dal_hw_sequencer_set_cursor_attributes(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ const struct cursor_attributes *attributes)
+{
+ struct controller *crtc = dal_display_path_get_controller(dp);
+
+ /* TODO implement functionality for paired controllers when needed */
+
+ if (crtc && dal_controller_set_cursor_attributes(crtc, attributes))
+ return HWSS_RESULT_OK;
+
+ return HWSS_RESULT_ERROR;
+}
+
+
+/**
+ * Program the Front End of the Pipe.
+ *
+ * The Back End was already programmed by dal_hw_sequencer_set_mode().
+ */
+enum hwss_result dal_hw_sequencer_set_plane_config(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t display_index)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ uint32_t paths_num;
+ struct hwss_build_params *build_params;
+ union hwss_build_params_mask mask;
+
+ paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+
+ mask.all = 0;
+ mask.bits.SCALING_TAPS = true;
+ mask.bits.PLL_SETTINGS = true;
+ mask.bits.MIN_CLOCKS = true;
+ mask.bits.WATERMARK = true;
+ mask.bits.BANDWIDTH = true;
+ mask.bits.LINE_BUFFER = true;
+
+ build_params =
+ dal_hw_sequencer_prepare_path_parameters(
+ hws,
+ path_set,
+ mask,
+ false);
+
+ if (!build_params) {
+ dal_logger_write(dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: prepare_path_parameters failed!\n",
+ __func__);
+ return HWSS_RESULT_ERROR;
+ }
+
+ {
+ uint8_t i;
+ uint8_t hw_path_mode_id;
+ struct controller *root_controller;
+ struct hw_path_mode *path_mode;
+ struct display_path *dp;
+ uint8_t planes_num;
+ struct display_path_plane *plane;
+
+ for (i = 0; i < paths_num; ++i) {
+ path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, i);
+ dp = path_mode->display_path;
+
+ if (dal_display_path_get_display_index(dp) ==
+ display_index) {
+ break;
+ }
+ }
+ hw_path_mode_id = i;
+
+ if (hw_path_mode_id == paths_num) {
+ dal_logger_write(dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: can't find hw_path_mode for display index %d!\n",
+ __func__, display_index);
+ BREAK_TO_DEBUGGER();
+ return HWSS_RESULT_ERROR;
+ }
+
+ configure_locking(dp, true);
+
+ planes_num = dal_display_path_get_number_of_planes(dp);
+ root_controller = dal_display_path_get_controller(dp);
+
+ /* While a non-root controller is programmed we
+ * have to lock the root controller. */
+ dal_controller_pipe_control_lock(
+ root_controller,
+ PIPE_LOCK_CONTROL_GRAPHICS |
+ PIPE_LOCK_CONTROL_SCL |
+ PIPE_LOCK_CONTROL_BLENDER |
+ PIPE_LOCK_CONTROL_SURFACE,
+ true);
+
+ for (i = 0; i < planes_num; ++i) {
+ plane = dal_display_path_get_plane_at_index(dp, i);
+
+ set_path_mode_front_end(
+ hws,
+ path_mode,
+ hw_path_mode_id,
+ i,
+ plane,
+ build_params);
+ }
+
+ dal_controller_pipe_control_lock(
+ root_controller,
+ PIPE_LOCK_CONTROL_GRAPHICS |
+ PIPE_LOCK_CONTROL_SCL |
+ PIPE_LOCK_CONTROL_BLENDER |
+ PIPE_LOCK_CONTROL_SURFACE,
+ false);
+ }
+
+ dal_hw_sequencer_free_path_parameters(build_params);
+
+ return HWSS_RESULT_OK;
+}
+
+bool dal_hw_sequencer_update_plane_address(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ uint32_t num_planes,
+ struct plane_addr_flip_info *info)
+{
+ uint32_t i;
+
+ struct controller *crtc = dal_display_path_get_controller(dp);
+
+ if (!crtc)
+ return false;
+
+ dal_controller_pipe_control_lock(
+ crtc,
+ PIPE_LOCK_CONTROL_SURFACE,
+ true);
+
+ for (i = 0; i < num_planes; i++) {
+ const struct plane_addr_flip_info *plane_info = NULL;
+ struct display_path_plane *plane;
+
+ plane_info = &info[i];
+ plane = dal_display_path_get_plane_at_index(dp, i);
+
+ if (!plane ||
+ !dal_controller_program_surface_flip_and_addr(
+ plane->controller,
+ plane_info))
+ return false;
+ }
+
+ dal_controller_pipe_control_lock(
+ crtc,
+ PIPE_LOCK_CONTROL_SURFACE,
+ false);
+
+ return true;
+}
+
+void dal_hw_sequencer_prepare_to_release_planes(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t display_index)
+{
+ struct controller *crtc;
+ uint8_t i;
+ uint8_t paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+ struct display_path *dp = NULL;
+
+ for (i = 0; i < paths_num; ++i) {
+ dp =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_set, i)->display_path;
+
+ if (dal_display_path_get_display_index(dp) ==
+ display_index) {
+ break;
+ }
+ }
+
+ if (dp == NULL) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_ERROR,
+ LOG_MINOR_COMPONENT_HWSS,
+ "%s: can't find display_path for display index %d!\n",
+ __func__, display_index);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ crtc = dal_display_path_get_controller(dp);
+
+ dal_controller_pipe_control_lock(crtc, PIPE_LOCK_CONTROL_BLENDER, true);
+
+ dal_controller_set_blender_mode(crtc, BLENDER_MODE_CURRENT_PIPE);
+
+ dal_controller_pipe_control_lock(
+ crtc,
+ PIPE_LOCK_CONTROL_BLENDER,
+ false);
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.h b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.h
new file mode 100644
index 000000000000..571315892a07
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SEQUENCER_H__
+#define __DAL_HW_SEQUENCER_H__
+
+#include "include/hw_sequencer_interface.h"
+#include "include/controller_interface.h"
+#include "include/clock_source_interface.h"
+#include "include/display_path_interface.h"
+#include "include/hw_path_mode_set_interface.h"
+#include "include/audio_interface.h"
+
+#include "hw_sequencer_parameters.h"
+#include "hw_sync_control.h"
+
+struct hw_global_objects {
+ struct bandwidth_manager *bm;
+ struct dc_clock_generator *dccg;
+ struct display_clock *dc;
+};
+
+struct hw_vce_adjust_timing_params {
+ struct hw_crtc_timing *hw_crtc_timing;
+ struct overscan_info *hw_overscan;
+ uint32_t refresh_rate;
+ bool extend_vblank;
+ bool full_timing_adjustment;
+ bool vce_multi_instance;
+};
+
+struct display_path_objects {
+ struct encoder *upstream_encoder;
+ struct encoder *downstream_encoder;
+ struct connector *connector;
+ struct audio *audio;
+ enum engine_id engine;
+};
+
+struct hw_sequencer;
+struct hw_path_mode;
+
+struct hw_sequencer_funcs {
+ void (*set_displaymark)(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct watermark_input_params *wm_params,
+ uint32_t param_number);
+ enum clocks_state (*get_required_clocks_state)(
+ struct hw_sequencer *hws,
+ struct display_clock *display_clock,
+ struct hw_path_mode_set *set,
+ const struct minimum_clocks_calculation_result *min_clk_result);
+ void (*set_safe_displaymark)(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct watermark_input_params *wm_params,
+ uint32_t params_number);
+ void (*set_display_clock)(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ const struct minimum_clocks_calculation_result *min_clk_result);
+ void (*setup_audio_wall_dto)(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode_set *path_set,
+ const struct hwss_build_params *build_params);
+ void (*setup_timing_and_blender)(
+ struct hw_sequencer *hws,
+ struct controller *current_controller,
+ const struct hw_path_mode *path_mode,
+ struct hw_crtc_timing *crtc_timing);
+ bool (*setup_line_buffer_pixel_depth)(
+ struct hw_sequencer *hws,
+ struct controller *controller,
+ enum lb_pixel_depth depth,
+ bool blank);
+ void (*apply_vce_timing_adjustment)(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *vce_adj_timing_params);
+ uint32_t (*get_dp_dto_source_clock)(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+ void (*start_gtc_counter)(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode_set *set);
+ void (*destroy)(struct hw_sequencer **hws);
+ enum hwss_result (*hwss_enable_link)(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in);
+};
+
+struct hw_sequencer {
+ struct dal_context *dal_context;
+ const struct hw_sequencer_funcs *funcs;
+ struct adapter_service *as;
+ struct hw_sync_control *sync_control;
+ bool use_pp_lib;
+};
+
+bool dal_hw_sequencer_construct_base(
+ struct hw_sequencer *hws,
+ struct hws_init_data *init_data);
+
+bool dal_hw_sequencer_get_global_objects(
+ const struct hw_path_mode_set *path_set,
+ struct hw_global_objects *obj);
+
+enum signal_type dal_hw_sequencer_get_timing_adjusted_signal(
+ const struct hw_path_mode *path_mode,
+ enum signal_type signal);
+
+enum signal_type dal_hw_sequencer_get_asic_signal(
+ const struct hw_path_mode *path_mode);
+
+void dal_hw_sequencer_get_objects(
+ struct display_path *dp,
+ struct display_path_objects *objs);
+
+enum engine_id dal_hw_sequencer_get_engine_id(
+ struct display_path *dp);
+
+void dal_hw_sequencer_build_audio_output(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ enum engine_id engine_id,
+ const struct pll_settings *pll_settings,
+ struct audio_output *audio_output);
+
+void dal_hw_sequencer_extend_vblank(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params);
+
+void dal_hw_sequencer_extend_hblank(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params);
+
+void dal_hw_sequencer_wireless_full_timing_adjustment(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params);
+
+void dal_hw_sequencer_get_pixel_clock_parameters(
+ const struct hw_path_mode *path_mode,
+ struct pixel_clk_params *pixel_clk_params);
+
+uint32_t dal_hw_sequencer_translate_to_graphics_bpp(
+ enum pixel_format pixel_format);
+
+uint32_t dal_hw_sequencer_translate_to_backend_bpp(
+ enum hw_overlay_backend_bpp backend_bpp);
+
+enum dc_deep_color_depth dal_hw_sequencer_translate_to_dec_deep_color_depth(
+ enum hw_color_depth hw_color_depth);
+
+
+enum hwss_result dal_hw_sequencer_enable_link_base(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in);
+
+uint32_t dal_hw_sequencer_translate_to_lb_color_depth(
+ enum lb_pixel_depth lb_color_depth);
+
+enum csc_color_depth dal_hw_sequencer_translate_to_csc_color_depth(
+ enum hw_color_depth color_depth);
+
+/*enum pixel_format dal_hw_sequencer_translate_to_pixel_format(
+ enum hw_pixel_format pixel_format);*/
+
+#endif /* __DAL_HW_SEQUENCER_H__ */
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_adjustments.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_adjustments.c
new file mode 100644
index 000000000000..895e83a375ea
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_adjustments.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/hw_sequencer_interface.h"
+#include "include/hw_sequencer_types.h"
+#include "include/hw_adjustment_types.h"
+#include "include/display_path_interface.h"
+#include "include/hw_adjustment_set.h"
+#include "include/adjustment_types.h"
+
+#include "hw_sequencer.h"
+
+struct hw_underscan_parameters {
+ struct scaling_tap_info *taps;
+ struct adjustment_factor scale_ratio_hp_factor;
+ struct adjustment_factor scale_ratio_lp_factor;
+ struct sharpness_adjustment sharp_gain;
+ uint32_t path_id;
+ struct hw_path_mode_set *path_set;
+ struct hw_path_mode *hw_path_mode;
+ struct minimum_clocks_calculation_result mini_clk_result;
+ struct pll_settings *pll_setting_array;
+ struct min_clock_params *min_clk_array;
+ struct watermark_input_params *watermark_input_array;
+ struct lb_params_data *line_buffer_array;
+};
+
+static struct hw_path_mode *get_required_mode_path(
+ struct hw_path_mode_set *set,
+ enum hw_path_action action,
+ uint32_t *path_id)
+{
+ struct hw_path_mode *path_mode;
+ uint32_t i;
+
+ for (i = 0; i < dal_hw_path_mode_set_get_paths_number(set); i++) {
+ path_mode = dal_hw_path_mode_set_get_path_by_index(set, i);
+ if (path_mode->action == action)
+ if (path_id)
+ *path_id = i;
+ return path_mode;
+ }
+ return NULL;
+}
+
+static enum hwss_result program_overscan(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct hw_underscan_parameters *param,
+ struct hwss_build_params *build_param,
+ struct display_path *display_path,
+ bool update_avi_info_frame,
+ uint32_t param_count)
+{
+ struct scaler_data scaler_data;
+ enum clocks_state required_clocks_state;
+ struct hw_global_objects g_obj = { NULL };
+ struct hw_crtc_timing hw_crtc_timing = {0};
+
+ struct controller *controller = dal_display_path_get_controller(
+ param->hw_path_mode->display_path);
+
+ bool scaler_will_be_enabled;
+ bool scaler_has_been_enabled = dal_controller_is_scaling_enabled(
+ controller);
+ /* for now used params for 0 plane always */
+ uint32_t plane = 0;
+
+ dal_memset(&scaler_data, 0, sizeof(scaler_data));
+ scaler_data.hw_crtc_timing = &hw_crtc_timing;
+ if (param->hw_path_mode)
+ dal_hw_sequencer_build_scaler_parameter(
+ param->hw_path_mode,
+ &param->taps[plane],
+ true,
+ &scaler_data);
+ scaler_data.scale_ratio_hp_factor = param->scale_ratio_hp_factor;
+ scaler_data.scale_ratio_lp_factor = param->scale_ratio_lp_factor;
+ scaler_data.sharp_gain = param->sharp_gain;
+ scaler_will_be_enabled = (scaler_data.taps.h_taps > 1 ||
+ scaler_data.taps.v_taps > 1);
+
+ dal_hw_sequencer_get_global_objects(param->path_set, &g_obj);
+
+ /* i skip HP/LP factor ,since i found it no needed since dce 8*/
+ hws->funcs->set_safe_displaymark(hws, set,
+ build_param->wm_input_params, build_param->params_num);
+
+ /* Use minimum clocks for the mode set.
+ * (because minimum clocks == maximum power efficiency) */
+ /* Raise Required Clock State PRIOR to programming of state-dependent
+ * clocks. */
+ required_clocks_state = hws->funcs->get_required_clocks_state(hws,
+ g_obj.dc, set, &build_param->min_clock_result);
+
+ /* Call PPLib and Bandwidth Manager. */
+ if (true == hws->use_pp_lib &&
+ HWSS_RESULT_OK != dal_hw_sequencer_set_clocks_and_clock_state(
+ hws,
+ &g_obj,
+ &build_param->min_clock_result,
+ required_clocks_state)) {
+ /* should never happen */
+ return HWSS_RESULT_ERROR;
+ }
+
+ if (!scaler_has_been_enabled && scaler_will_be_enabled) {
+ hws->funcs->set_display_clock(
+ hws, set, &build_param->min_clock_result);
+ dal_hw_sequencer_enable_line_buffer_power_gating(
+ dal_controller_get_line_buffer(controller),
+ dal_controller_get_id(controller),
+ scaler_data.pixel_type,
+ param->hw_path_mode->mode.scaling_info.src.width,
+ param->hw_path_mode->mode.scaling_info.dst.width,
+ &param->taps[plane],
+ param->line_buffer_array[plane].depth,
+ param->hw_path_mode->mode.scaling_info.src.height,
+ param->hw_path_mode->mode.scaling_info.dst.height,
+ param->hw_path_mode->mode.timing.flags.INTERLACED);
+
+ hws->funcs->setup_line_buffer_pixel_depth(
+ hws,
+ controller,
+ param->line_buffer_array[plane].depth,
+ true);
+
+ dal_controller_set_scaler_wrapper(controller, &scaler_data);
+ } else if (scaler_has_been_enabled && !scaler_will_be_enabled) {
+ dal_controller_set_scaler_wrapper(controller, &scaler_data);
+ hws->funcs->setup_line_buffer_pixel_depth(
+ hws,
+ controller,
+ param->line_buffer_array[plane].depth,
+ true);
+
+ dal_controller_wait_for_vblank(controller);
+ dal_hw_sequencer_enable_line_buffer_power_gating(
+ dal_controller_get_line_buffer(controller),
+ dal_controller_get_id(controller),
+ scaler_data.pixel_type,
+ param->hw_path_mode->mode.scaling_info.src.width,
+ param->hw_path_mode->mode.scaling_info.dst.width,
+ &param->taps[plane],
+ param->line_buffer_array[plane].depth,
+ param->hw_path_mode->mode.scaling_info.src.height,
+ param->hw_path_mode->mode.scaling_info.dst.height,
+ param->hw_path_mode->mode.timing.flags.INTERLACED);
+
+ hws->funcs->set_display_clock(
+ hws, set, &build_param->min_clock_result);
+
+ } else if (scaler_has_been_enabled && scaler_will_be_enabled) {
+ hws->funcs->set_display_clock(
+ hws, set, &build_param->min_clock_result);
+
+ dal_controller_set_scaler_wrapper(controller, &scaler_data);
+ }
+
+ hws->funcs->set_displaymark(
+ hws,
+ set,
+ build_param->wm_input_params,
+ build_param->params_num);
+
+ if (update_avi_info_frame)
+ dal_hw_sequencer_update_info_frame(param->hw_path_mode);
+ return HWSS_RESULT_OK;
+}
+
+enum hwss_result dal_hw_sequencer_set_overscan_adj(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct hw_underscan_adjustment_data *hw_underscan)
+{
+ struct hwss_build_params *build_params = NULL;
+ union hwss_build_params_mask params_mask;
+ struct hw_underscan_parameters underscan_params;
+ const struct hw_underscan_adjustment *value;
+
+ if (set == NULL || hw_underscan->hw_adj_id != HW_ADJUSTMENT_ID_OVERSCAN)
+ return HWSS_RESULT_ERROR;
+
+ value = &hw_underscan->hw_underscan_adj;
+ if (!value)
+ return HWSS_RESULT_ERROR;
+
+ dal_memset(&underscan_params, 0, sizeof(underscan_params));
+
+ underscan_params.hw_path_mode = get_required_mode_path(
+ set,
+ HW_PATH_ACTION_SET_ADJUSTMENT,
+ &underscan_params.path_id);
+ if (!underscan_params.hw_path_mode)
+ return HWSS_RESULT_ERROR;
+
+ params_mask.all = 0;
+ params_mask.bits.SCALING_TAPS = true;
+ params_mask.bits.PLL_SETTINGS = true;
+ params_mask.bits.MIN_CLOCKS = true;
+ params_mask.bits.WATERMARK = true;
+ params_mask.bits.BANDWIDTH = true;
+ params_mask.bits.LINE_BUFFER = true;
+
+ build_params = dal_hw_sequencer_prepare_path_parameters(
+ hws,
+ set,
+ params_mask,
+ false);
+
+ if (NULL == build_params)
+ return HWSS_RESULT_ERROR;
+
+ underscan_params.path_set = set;
+ underscan_params.pll_setting_array =
+ build_params->pll_settings_params;
+ underscan_params.watermark_input_array =
+ build_params->wm_input_params;
+ underscan_params.min_clk_array =
+ build_params->min_clock_params;
+ underscan_params.line_buffer_array =
+ build_params->line_buffer_params[underscan_params.path_id];
+ underscan_params.taps =
+ build_params->scaling_taps_params[underscan_params.path_id];
+ underscan_params.mini_clk_result =
+ build_params->min_clock_result;
+
+ underscan_params.scale_ratio_hp_factor.adjust =
+ value->deflicker.hp_factor;
+ underscan_params.scale_ratio_hp_factor.divider =
+ value->deflicker.hp_divider;
+ underscan_params.scale_ratio_lp_factor.adjust =
+ value->deflicker.lp_factor;
+ underscan_params.scale_ratio_lp_factor.divider =
+ value->deflicker.lp_divider;
+
+ underscan_params.sharp_gain.sharpness =
+ value->deflicker.sharpness;
+ underscan_params.sharp_gain.enable_sharpening =
+ value->deflicker.enable_sharpening;
+
+ if (program_overscan(
+ hws,
+ set,
+ &underscan_params,
+ build_params,
+ underscan_params.hw_path_mode->display_path,
+ true,
+ build_params->params_num) != HWSS_RESULT_OK) {
+ dal_hw_sequencer_free_path_parameters(build_params);
+ return HWSS_RESULT_ERROR;
+ }
+
+ dal_hw_sequencer_free_path_parameters(build_params);
+ return HWSS_RESULT_OK;
+}
+
+bool dal_hw_sequencer_is_support_custom_gamut_adj(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ enum hw_surface_type surface_type)
+{
+ struct controller *controller = NULL;
+
+ if (!disp_path)
+ return false;
+
+ controller =
+ dal_display_path_get_controller(disp_path);
+
+ if (!controller)
+ return false;
+
+ return dal_controller_is_supported_custom_gamut_adjustment(
+ controller,
+ (surface_type == HW_GRAPHIC_SURFACE ?
+ GRAPHIC_SURFACE :
+ OVERLAY_SURFACE));
+}
+
+enum hwss_result dal_hw_sequencer_get_hw_color_adj_range(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ struct hw_color_control_range *hw_color_range)
+{
+ struct controller *controller = NULL;
+
+ if (!disp_path)
+ return HWSS_RESULT_ERROR;
+ if (!hw_color_range)
+ return HWSS_RESULT_ERROR;
+
+ controller = dal_display_path_get_controller(disp_path);
+ if (!controller)
+ return HWSS_RESULT_ERROR;
+
+ dal_controller_get_grph_adjustment_range(
+ controller,
+ GRPH_ADJUSTMENT_HUE,
+ &hw_color_range->hue);
+ dal_controller_get_grph_adjustment_range(
+ controller,
+ GRPH_ADJUSTMENT_SATURATION,
+ &hw_color_range->saturation);
+ dal_controller_get_grph_adjustment_range(
+ controller,
+ GRPH_ADJUSTMENT_BRIGHTNESS,
+ &hw_color_range->brightness);
+ dal_controller_get_grph_adjustment_range(
+ controller,
+ GRPH_ADJUSTMENT_CONTRAST,
+ &hw_color_range->contrast);
+ dal_controller_get_grph_adjustment_range(
+ controller,
+ GRPH_ADJUSTMENT_COLOR_TEMPERATURE,
+ &hw_color_range->temperature);
+ return HWSS_RESULT_OK;
+
+}
+
+bool dal_hw_sequencer_is_support_custom_gamma_coefficients(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ enum hw_surface_type surface_type)
+{
+ struct controller *controller = NULL;
+
+ if (!disp_path)
+ return false;
+
+ controller = dal_display_path_get_controller(disp_path);
+ if (!controller)
+ return false;
+
+ return dal_controller_is_supported_custom_gamma_coefficients(
+ controller,
+ (surface_type == HW_GRAPHIC_SURFACE ?
+ GRAPHIC_SURFACE :
+ OVERLAY_SURFACE));
+}
+
+static enum ds_color_space translation_to_color_space(
+ enum hw_color_space hw_color_space)
+{
+ enum ds_color_space color_space;
+
+ switch (hw_color_space) {
+ case HW_COLOR_SPACE_SRGB_FULL_RANGE:
+ color_space = DS_COLOR_SPACE_SRGB_FULLRANGE;
+ break;
+ case HW_COLOR_SPACE_SRGB_LIMITED_RANGE:
+ color_space = DS_COLOR_SPACE_SRGB_LIMITEDRANGE;
+ break;
+ case HW_COLOR_SPACE_YPBPR601:
+ color_space = DS_COLOR_SPACE_YPBPR601;
+ break;
+ case HW_COLOR_SPACE_YPBPR709:
+ color_space = DS_COLOR_SPACE_YPBPR709;
+ break;
+ case HW_COLOR_SPACE_YCBCR601:
+ color_space = DS_COLOR_SPACE_YCBCR601;
+ break;
+ case HW_COLOR_SPACE_YCBCR709:
+ color_space = DS_COLOR_SPACE_YCBCR709;
+ break;
+ case HW_COLOR_SPACE_NMVPU_SUPERAA:
+ color_space = DS_COLOR_SPACE_NMVPU_SUPERAA;
+ break;
+ default:
+ color_space = DS_COLOR_SPACE_UNKNOWN;
+ break;
+ }
+ return color_space;
+}
+
+static enum csc_color_depth translation_to_csc_color_depth(
+ enum hw_color_depth hw_color_depth)
+{
+ enum csc_color_depth csc_color_depth;
+
+ switch (hw_color_depth) {
+ case HW_COLOR_DEPTH_666:
+ csc_color_depth = CSC_COLOR_DEPTH_666;
+ break;
+ case HW_COLOR_DEPTH_888:
+ csc_color_depth = CSC_COLOR_DEPTH_888;
+ break;
+ case HW_COLOR_DEPTH_101010:
+ csc_color_depth = CSC_COLOR_DEPTH_101010;
+ break;
+ case HW_COLOR_DEPTH_121212:
+ csc_color_depth = CSC_COLOR_DEPTH_121212;
+ break;
+ case HW_COLOR_DEPTH_141414:
+ csc_color_depth = CSC_COLOR_DEPTH_141414;
+ break;
+ case HW_COLOR_DEPTH_161616:
+ csc_color_depth = CSC_COLOR_DEPTH_161616;
+ break;
+ default:
+ csc_color_depth = CSC_COLOR_DEPTH_888;
+ break;
+ }
+ return csc_color_depth;
+}
+
+enum hwss_result dal_hw_sequencer_build_csc_adjust(
+ struct hw_sequencer *hws,
+ struct hw_adjustment_color_control *adjustment,
+ struct grph_csc_adjustment *adjust)
+{
+ if (!adjustment)
+ return HWSS_RESULT_ERROR;
+
+ if (adjustment->temperature_divider == 0 ||
+ adjustment->adjust_divider == 0)
+ return HWSS_RESULT_ERROR;
+
+ adjust->c_space = translation_to_color_space(
+ adjustment->color_space);
+ adjust->color_depth = translation_to_csc_color_depth(
+ adjustment->color_depth);
+ adjust->surface_pixel_format = adjustment->surface_pixel_format;
+
+ switch (adjustment->option) {
+ case HWS_COLOR_MATRIX_HW_DEFAULT:
+ adjust->color_adjust_option = GRPH_COLOR_MATRIX_HW_DEFAULT;
+ break;
+ case HWS_COLOR_MATRIX_SW:
+ adjust->color_adjust_option = GRPH_COLOR_MATRIX_SW;
+ break;
+ default:
+ adjust->color_adjust_option = GRPH_COLOR_MATRIX_HW_DEFAULT;
+ break;
+ }
+
+ adjust->grph_cont = adjustment->contrast;
+ adjust->grph_sat = adjustment->saturation;
+ adjust->grph_bright = adjustment->brightness;
+ adjust->grph_hue = adjustment->hue;
+ adjust->adjust_divider = adjustment->adjust_divider;
+ adjust->temperature_divider = adjustment->temperature_divider;
+ adjust->csc_adjust_type = GRAPHICS_CSC_ADJUST_TYPE_SW;
+ adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ dal_memmove(adjust->temperature_matrix,
+ adjustment->temperature_matrix,
+ sizeof(adjust->temperature_matrix));
+ return HWSS_RESULT_OK;
+
+}
+
+void dal_hw_sequencer_build_gamma_ramp_adj_params(
+ const struct hw_adjustment_gamma_ramp *adjustment,
+ struct gamma_parameters *gamma_param,
+ struct gamma_ramp *ramp) {
+ ramp->type = GAMMA_RAMP_DEFAULT;
+ ramp->size = adjustment->size;
+
+ switch (adjustment->type) {
+ case HW_GAMMA_RAMP_UNITIALIZED:
+ ramp->type = GAMMA_RAMP_UNINITIALIZED;
+ break;
+ case HW_GAMMA_RAMP_DEFAULT:
+ ramp->type = GAMMA_RAMP_DEFAULT;
+ break;
+ case HW_GAMMA_RAMP_RBG_256x3x16:
+ ramp->type = GAMMA_RAMP_RBG256X3X16;
+ dal_memmove(&ramp->gamma_ramp_rgb256x3x16,
+ &adjustment->gamma_ramp_rgb256x3x16,
+ adjustment->size);
+ break;
+ default:
+ break;
+ }
+ /* translate parameters */
+ gamma_param->surface_pixel_format = adjustment->surface_pixel_format;
+
+ translate_from_hw_to_controller_regamma(
+ &adjustment->regamma,
+ &gamma_param->regamma);
+
+ gamma_param->regamma_adjust_type = GRAPHICS_REGAMMA_ADJUST_SW;
+ gamma_param->degamma_adjust_type = GRAPHICS_REGAMMA_ADJUST_SW;
+
+ gamma_param->selected_gamma_lut = GRAPHICS_GAMMA_LUT_LEGACY;
+
+ /* TODO support non-legacy gamma */
+
+ gamma_param->disable_adjustments = false;
+ gamma_param->flag.bits.config_is_changed =
+ adjustment->flag.bits.config_is_changed;
+ gamma_param->flag.bits.regamma_update =
+ adjustment->flag.bits.regamma_update;
+ gamma_param->flag.bits.gamma_update =
+ adjustment->flag.bits.gamma_update;
+}
+
+void translate_from_hw_to_controller_regamma(
+ const struct hw_regamma_lut *hw_regamma,
+ struct regamma_lut *regamma)
+{
+ unsigned int i;
+
+ regamma->features.bits.GRAPHICS_DEGAMMA_SRGB =
+ hw_regamma->flags.bits.graphics_degamma_srgb;
+ regamma->features.bits.OVERLAY_DEGAMMA_SRGB =
+ hw_regamma->flags.bits.overlay_degamma_srgb;
+ regamma->features.bits.GAMMA_RAMP_ARRAY =
+ hw_regamma->flags.bits.gamma_ramp_array;
+
+ if (hw_regamma->flags.bits.gamma_ramp_array == 1) {
+ regamma->features.bits.APPLY_DEGAMMA =
+ hw_regamma->flags.bits.apply_degamma;
+
+ for (i = 0; i < 256 * 3; i++)
+ regamma->regamma_ramp.gamma[i] =
+ hw_regamma->gamma.gamma[i];
+
+ } else {
+ regamma->features.bits.APPLY_DEGAMMA = 0;
+
+ for (i = 0; i < 3; i++) {
+ regamma->gamma_coeff.a0[i] = hw_regamma->coeff.a0[i];
+ regamma->gamma_coeff.a1[i] = hw_regamma->coeff.a1[i];
+ regamma->gamma_coeff.a2[i] = hw_regamma->coeff.a2[i];
+ regamma->gamma_coeff.a3[i] = hw_regamma->coeff.a3[i];
+ regamma->gamma_coeff.gamma[i] =
+ hw_regamma->coeff.gamma[i];
+ }
+ }
+
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_helpers.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_helpers.c
new file mode 100644
index 000000000000..baec1a6e2fc7
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_helpers.c
@@ -0,0 +1,594 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/logger_interface.h"
+#include "include/audio_types.h"
+#include "include/bandwidth_manager_interface.h"
+#include "include/encoder_interface.h"
+
+#include "hw_sequencer.h"
+
+static enum audio_stream_color_depth translate_to_stream_color_depth(
+ enum hw_color_depth color_depth)
+{
+ switch (color_depth) {
+ case HW_COLOR_DEPTH_888:
+ return STREAM_COLOR_DEPTH_24;
+ case HW_COLOR_DEPTH_101010:
+ return STREAM_COLOR_DEPTH_30;
+ case HW_COLOR_DEPTH_121212:
+ return STREAM_COLOR_DEPTH_36;
+ case HW_COLOR_DEPTH_161616:
+ return STREAM_COLOR_DEPTH_48;
+ default:
+ return STREAM_COLOR_DEPTH_24;
+ }
+}
+
+static enum audio_dto_source translate_to_dto_source(enum controller_id crtc_id)
+{
+ switch (crtc_id) {
+ case CONTROLLER_ID_D0:
+ return DTO_SOURCE_ID0;
+ case CONTROLLER_ID_D1:
+ return DTO_SOURCE_ID1;
+ case CONTROLLER_ID_D2:
+ return DTO_SOURCE_ID2;
+ case CONTROLLER_ID_D3:
+ return DTO_SOURCE_ID3;
+ case CONTROLLER_ID_D4:
+ return DTO_SOURCE_ID4;
+ case CONTROLLER_ID_D5:
+ return DTO_SOURCE_ID5;
+ default:
+ return DTO_SOURCE_UNKNOWN;
+ }
+}
+
+bool dal_hw_sequencer_get_global_objects(
+ const struct hw_path_mode_set *path_set,
+ struct hw_global_objects *obj)
+{
+ const struct hw_path_mode *path_mode;
+ struct controller *crtc;
+
+ if (!path_set || !obj)
+ return false;
+
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, 0);
+
+ if (!path_mode)
+ return false;
+
+ crtc = dal_display_path_get_controller(path_mode->display_path);
+
+ if (crtc) {
+ obj->bm = dal_controller_get_bandwidth_manager(crtc);
+ obj->dc = dal_controller_get_display_clock(crtc);
+ obj->dccg = dal_controller_get_dc_clock_generator(crtc);
+
+ return true;
+ }
+
+ return false;
+}
+
+enum signal_type dal_hw_sequencer_get_timing_adjusted_signal(
+ const struct hw_path_mode *path_mode,
+ enum signal_type signal)
+{
+ /* Check for pixel clock fits in single link bandwidth. For DVI also
+ * check for High-Color timing */
+ if (path_mode->mode.timing.pixel_clock <= TMDS_MAX_PIXEL_CLOCK_IN_KHZ) {
+ if (signal == SIGNAL_TYPE_DVI_DUAL_LINK &&
+ path_mode->mode.timing.flags.COLOR_DEPTH <
+ HW_COLOR_DEPTH_101010)
+ signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ }
+
+ return signal;
+}
+
+enum signal_type dal_hw_sequencer_get_asic_signal(
+ const struct hw_path_mode *path_mode)
+{
+ return dal_hw_sequencer_get_timing_adjusted_signal(
+ path_mode,
+ dal_display_path_get_config_signal(
+ path_mode->display_path, ASIC_LINK_INDEX));
+}
+
+
+void dal_hw_sequencer_get_objects(
+ struct display_path *dp,
+ struct display_path_objects *objs)
+{
+ uint32_t i;
+ uint32_t links_number = dal_display_path_get_number_of_links(dp);
+
+ dal_memset(objs, 0, sizeof(struct display_path_objects));
+
+ for (i = 0; i < links_number; ++i) {
+ if (dal_display_path_is_link_active(dp, i)) {
+ objs->upstream_encoder =
+ dal_display_path_get_upstream_encoder(dp, i);
+ objs->downstream_encoder =
+ dal_display_path_get_downstream_encoder(dp, i);
+ objs->audio = dal_display_path_get_audio(dp, i);
+ objs->engine =
+ dal_display_path_get_stream_engine(dp, i);
+ break;
+ }
+ }
+
+ objs->connector = dal_display_path_get_connector(dp);
+}
+
+enum engine_id dal_hw_sequencer_get_engine_id(struct display_path *dp)
+{
+ uint32_t i;
+ uint32_t links_number = dal_display_path_get_number_of_links(dp);
+
+ for (i = 0; i < links_number; ++i) {
+ if (dal_display_path_get_stream_engine(dp, i) !=
+ ENGINE_ID_UNKNOWN) {
+ return dal_display_path_get_stream_engine(dp, i);
+ }
+ }
+ BREAK_TO_DEBUGGER();
+ return ENGINE_ID_UNKNOWN;
+}
+
+void dal_hw_sequencer_build_audio_output(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ enum engine_id engine_id,
+ const struct pll_settings *pll_settings,
+ struct audio_output *audio_output)
+{
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+
+ audio_output->engine_id = engine_id;
+ audio_output->signal = asic_signal;
+
+ audio_output->crtc_info.h_total =
+ path_mode->mode.timing.h_total;
+
+ /* Audio packets are sent during actual CRTC blank physical signal, we
+ * need to specify actual active signal portion */
+ audio_output->crtc_info.h_active =
+ path_mode->mode.timing.h_addressable
+ + path_mode->mode.timing.h_overscan_left
+ + path_mode->mode.timing.h_overscan_right;
+ audio_output->crtc_info.v_active =
+ path_mode->mode.timing.v_addressable
+ + path_mode->mode.timing.v_overscan_top
+ + path_mode->mode.timing.v_overscan_bottom;
+ audio_output->crtc_info.pixel_repetition =
+ path_mode->mode.timing.flags.PIXEL_REPETITION;
+ audio_output->crtc_info.interlaced =
+ path_mode->mode.timing.flags.INTERLACED;
+ audio_output->crtc_info.refresh_rate = path_mode->mode.refresh_rate;
+ audio_output->crtc_info.color_depth = translate_to_stream_color_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+
+ audio_output->crtc_info.requested_pixel_clock =
+ path_mode->mode.timing.pixel_clock;
+ audio_output->crtc_info.calculated_pixel_clock =
+ path_mode->mode.timing.pixel_clock;
+
+ audio_output->pll_info.dp_dto_source_clock_in_khz =
+ hws->funcs->get_dp_dto_source_clock(
+ hws,
+ path_mode->display_path);
+ audio_output->pll_info.feed_back_divider =
+ pll_settings->feedback_divider;
+ audio_output->pll_info.dto_source =
+ translate_to_dto_source(
+ dal_controller_get_id(
+ dal_display_path_get_controller(
+ path_mode->display_path)));
+ audio_output->pll_info.ss_enabled =
+ dal_display_path_is_ss_supported(
+ path_mode->display_path);
+ audio_output->pll_info.ss_percentage = pll_settings->ss_percentage;
+}
+
+/*
+ * dal_hw_sequecer_extend_vblank
+ *
+ * This function stretches the vblank of the timing and increases pixel clock
+ * to maintain same refresh rate. This is required only for VCE display path
+ * to allow VCE to have additional blank timing to process encode jobs.
+ *
+ * @param params timing parameters to adjust to for VCE timing
+ */
+void dal_hw_sequencer_extend_vblank(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params)
+{
+ uint32_t h_total_60_hz;
+ uint32_t pixel_clk_60_hz;
+ uint32_t h_sync_start_60_hz;
+
+ uint32_t new_v_total;
+ uint32_t new_pixel_clock;
+
+ /* Need to update timing for 1080p, 720p and 480p */
+ if (params->hw_crtc_timing->h_addressable <= 720) {
+ /* Using the 720p pixel clock for 60Hz */
+ h_total_60_hz = 900;
+ h_sync_start_60_hz = 760;
+ pixel_clk_60_hz = 74250;
+ } else if (params->hw_crtc_timing->h_addressable <= 1280) {
+ h_total_60_hz = 1800;
+ h_sync_start_60_hz = 1390;
+ pixel_clk_60_hz = 148500;
+ } else if (params->hw_crtc_timing->h_addressable <= 1920) {
+ h_total_60_hz = 2200;
+ h_sync_start_60_hz = 2008;
+ pixel_clk_60_hz = 148500;
+ } else {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "The horizontal timing is out of range");
+ return;
+ }
+
+ new_v_total = (pixel_clk_60_hz * 1000) / (h_total_60_hz *
+ params->refresh_rate);
+ new_pixel_clock = new_v_total * h_total_60_hz * params->refresh_rate
+ / 1000;
+
+ /*
+ * in most cases, new pixel clock will be same as original one,
+ * except 24Hz timings which has rounding error
+ */
+ params->hw_crtc_timing->pixel_clock = new_pixel_clock;
+
+ params->hw_crtc_timing->h_total = h_total_60_hz;
+ params->hw_crtc_timing->h_sync_start = h_sync_start_60_hz;
+
+ /* Move front porch as far back as possible */
+ params->hw_crtc_timing->v_total = new_v_total;
+ params->hw_crtc_timing->v_sync_start = params->hw_crtc_timing->v_total
+ - params->hw_crtc_timing->v_sync_width - 4;
+
+ /* These params have to be updated to match the verticalTotal */
+ params->hw_crtc_timing->ranged_timing.vertical_total_min =
+ params->hw_crtc_timing->v_total;
+ params->hw_crtc_timing->ranged_timing.vertical_total_max =
+ params->hw_crtc_timing->v_total;
+}
+
+/*
+ * dal_hw_sequecer_extend_hblank
+ *
+ * This function stretches the hblank of the timing by fixing pixel clock and
+ * vblank to a constant based off of the CEA 60Hz version of the CEA timing.
+ *
+ * @param params timing parameters to adjust to for VCE timing
+ */
+void dal_hw_sequencer_extend_hblank(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params)
+{
+ uint32_t pixel_clk_60_hz;
+ uint32_t h_sync_start_60_hz;
+ uint32_t v_blank;
+
+ uint32_t new_v_total;
+ uint32_t new_h_total;
+ uint32_t new_pixel_clock;
+
+ /* Need to update timing for 1080p, 720p and 480p */
+ if (params->hw_crtc_timing->h_addressable <= 720) {
+ /* Using the 720p pixel clock for 60Hz */
+ h_sync_start_60_hz = 760;
+ pixel_clk_60_hz = 74250;
+ v_blank = 30;
+ } else if (params->hw_crtc_timing->h_addressable <= 1280) {
+ h_sync_start_60_hz = 1390;
+ pixel_clk_60_hz = 148500;
+ v_blank = 30;
+ } else if (params->hw_crtc_timing->h_addressable <= 1920) {
+ h_sync_start_60_hz = 2008;
+ pixel_clk_60_hz = 148500;
+ v_blank = 45;
+ } else {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "The vertical timing is out of range");
+ return;
+ }
+
+ new_v_total = params->hw_crtc_timing->v_addressable + v_blank;
+ new_h_total = (pixel_clk_60_hz * 1000) /
+ (new_v_total * params->refresh_rate);
+ new_pixel_clock = new_v_total * new_h_total * params->refresh_rate /
+ 1000;
+
+ if (new_h_total <= params->hw_crtc_timing->h_addressable) {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "The horizontal timing is out of range");
+ return;
+ }
+
+ /*
+ * in most cases, new pixel clock will be same as original one, except
+ * 24Hz timings which has rounding error
+ */
+ params->hw_crtc_timing->h_total = new_h_total;
+ params->hw_crtc_timing->h_sync_start = h_sync_start_60_hz;
+
+ /* Move front porch as far back as possible */
+ params->hw_crtc_timing->v_total = new_v_total;
+ params->hw_crtc_timing->v_sync_start = params->hw_crtc_timing->v_total
+ - params->hw_crtc_timing->v_sync_width - 4;
+
+ /* These params have to be updated to match the verticalTotal */
+ params->hw_crtc_timing->ranged_timing.vertical_total_min =
+ params->hw_crtc_timing->v_total;
+ params->hw_crtc_timing->ranged_timing.vertical_total_max =
+ params->hw_crtc_timing->v_total;
+}
+
+/*
+ * wirelessFullTimingAdjustment
+ *
+ * This function modifies several properties of the CRTC timing specifications
+ * for the purposes of maintaining VCE performance levels when wireless
+ * display. This is to satisfy two requirements:
+ *
+ * 1.) HBlank must be sufficiently long in order for VCE to have sufficient
+ * time to encode the output of DCE. This is because VCE buffers and encodes
+ * one row of macroblocks at a time. Each row of macroblocks is 16 rows of
+ * pixels.
+ *
+ * 2.) VBlank must be sufficiently long enough for VCE to handle a second real
+ * time encoding job (most commonly a webcam).
+ * The most common use case is for video conferencing applications running
+ * alongside wireless display. VCE can only encode the frames from the webcam
+ * when it is not encoding frames from DCE (i.e. during VBlank).
+ *
+ * For a given input timing, both blank periods are maximized when the
+ * corresponding HTotal and VTotal are maximized.
+ *
+ * However: Refresh Rate = PixelClock / (HTotal x VTotal), which must be
+ * constant when doing adjustments.
+ *
+ * Also, PixelClock should be a multiple of 10 KHz to prevent loss of
+ * precision,and all values must be integers.
+ * Due to these numerous requirements, and the general requirement that no
+ * universal rule exists for what VCE requires,
+ * values used in this function are pre-calculated and verified by the VCE HW
+ * team.
+ *
+ * @param params timing parameters to adjust to for VCE timing
+ */
+void dal_hw_sequencer_wireless_full_timing_adjustment(
+ struct hw_sequencer *hws,
+ struct hw_vce_adjust_timing_params *params)
+{
+
+ uint32_t h_total_60_hz;
+ uint32_t pixel_clk_60_hz;
+ uint32_t h_sync_start_60_hz;
+ uint32_t new_v_total;
+ uint32_t new_pixel_clock;
+
+ /* Need to update timing for 1080p, 720p and 480p */
+ if (params->hw_crtc_timing->h_addressable <= 720) {
+ h_total_60_hz = 900;
+ h_sync_start_60_hz = 760;
+ pixel_clk_60_hz = 74250;
+ } else if (params->hw_crtc_timing->h_addressable <= 1280) {
+ h_total_60_hz = 1800;
+ h_sync_start_60_hz = 1390;
+ pixel_clk_60_hz = 148500;
+ } else if (params->hw_crtc_timing->h_addressable <= 1920) {
+ h_total_60_hz = 3000;
+ h_sync_start_60_hz = 2008;
+ pixel_clk_60_hz = 148500;
+ } else {
+ dal_logger_write(hws->dal_context->logger,
+ LOG_MAJOR_WARNING,
+ LOG_MINOR_COMPONENT_HWSS,
+ "The horizontal timing is out of range");
+ return;
+ }
+
+ new_v_total = (pixel_clk_60_hz * 1000) /
+ (h_total_60_hz * params->refresh_rate);
+ new_pixel_clock = new_v_total * h_total_60_hz * params->refresh_rate
+ / 1000;
+
+ /*
+ * in most cases, new pixel clock will be same as original one, except
+ * 24Hz timings which has rounding error
+ */
+ params->hw_crtc_timing->pixel_clock = new_pixel_clock;
+
+ params->hw_crtc_timing->h_total = h_total_60_hz;
+ params->hw_crtc_timing->h_sync_start = h_sync_start_60_hz;
+
+ /*Move front porch as far back as possible for VCE workaround */
+ params->hw_crtc_timing->v_total = new_v_total;
+ params->hw_crtc_timing->v_sync_start = params->hw_crtc_timing->v_total
+ - params->hw_crtc_timing->v_sync_width - 4;
+
+ /*These params have to be updated to match the verticalTotal */
+ params->hw_crtc_timing->ranged_timing.vertical_total_min =
+ params->hw_crtc_timing->v_total;
+ params->hw_crtc_timing->ranged_timing.vertical_total_max =
+ params->hw_crtc_timing->v_total;
+}
+
+static enum deep_color_depth translate_to_deep_color_depth(
+ enum hw_color_depth hw_color_depth)
+{
+ switch (hw_color_depth) {
+ case HW_COLOR_DEPTH_101010:
+ return DEEP_COLOR_DEPTH_30;
+ case HW_COLOR_DEPTH_121212:
+ return DEEP_COLOR_DEPTH_36;
+ case HW_COLOR_DEPTH_161616:
+ return DEEP_COLOR_DEPTH_48;
+ default:
+ return DEEP_COLOR_DEPTH_24;
+ }
+}
+
+static uint32_t build_dvo_config(enum signal_type signal)
+{
+ /* TODO: move this definition to the header file and change all code
+ * that uses it */
+ union dvo_config {
+ struct {
+ uint32_t DATA_RATE:1; /* 0: DDR, 1: SDR */
+ uint32_t RESERVED:1;
+ /* 0: lower 12 bits, 1: upper 12 bits */
+ uint32_t UPPER_LINK:1;
+ uint32_t LINK_WIDTH:1; /* 0: 12 bits, 1: 24 bits */
+ } bits;
+
+ uint32_t all;
+ };
+
+ union dvo_config dvo_config = { {0} };
+
+ switch (signal) {
+ case SIGNAL_TYPE_DVO24: /* 24 bits */
+ case SIGNAL_TYPE_MVPU_AB: /* 24 bits */
+ dvo_config.bits.LINK_WIDTH = 1;
+ break;
+ case SIGNAL_TYPE_DVO: /* lower 12 bits */
+ case SIGNAL_TYPE_MVPU_A: /* lower 12 bits */
+ break;
+ case SIGNAL_TYPE_MVPU_B: /* upper 12 bits */
+ dvo_config.bits.UPPER_LINK = 1;
+ break;
+ default:
+ break;
+ }
+
+ return dvo_config.all;
+}
+
+static enum disp_pll_config build_disp_pll_config(enum signal_type signal)
+{
+ switch (signal) {
+ case SIGNAL_TYPE_MVPU_A:
+ return DISP_PLL_CONFIG_DVO_DDR_MODE_LOW_12BIT;
+ case SIGNAL_TYPE_MVPU_B:
+ return DISP_PLL_CONFIG_DVO_DDR_MODE_UPPER_12BIT;
+ case SIGNAL_TYPE_MVPU_AB:
+ return DISP_PLL_CONFIG_DVO_DDR_MODE_24BIT;
+ default:
+ return DISP_PLL_CONFIG_UNKNOWN;
+ }
+}
+
+void dal_hw_sequencer_get_pixel_clock_parameters(
+ const struct hw_path_mode *path_mode,
+ struct pixel_clk_params *pixel_clk_params)
+{
+ struct display_path_objects obj;
+ struct display_path *display_path = path_mode->display_path;
+ enum deep_color_depth deep_color_depth = translate_to_deep_color_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ enum signal_type asic_signal =
+ dal_hw_sequencer_get_asic_signal(path_mode);
+ struct controller *crtc =
+ dal_display_path_get_controller(display_path);
+
+ /* extract objects */
+ dal_hw_sequencer_get_objects(display_path, &obj);
+
+ pixel_clk_params->requested_pix_clk =
+ path_mode->mode.timing.pixel_clock;
+ /* Default is enough here since Link is enabled separately and will
+ * reprogram SymClk if required */
+ pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
+ LINK_RATE_REF_FREQ_IN_KHZ;
+ pixel_clk_params->encoder_object_id =
+ dal_encoder_get_graphics_object_id(obj.upstream_encoder);
+ pixel_clk_params->signal_type = asic_signal;
+ pixel_clk_params->controller_id = dal_controller_get_id(crtc);
+ pixel_clk_params->color_depth = deep_color_depth;
+ pixel_clk_params->flags.ENABLE_SS =
+ dal_display_path_is_ss_supported(
+ display_path);
+ pixel_clk_params->flags.DISPLAY_BLANKED =
+ dal_display_path_is_target_blanked(display_path) ||
+ dal_display_path_is_target_powered_off(display_path);
+ pixel_clk_params->dvo_cfg = build_dvo_config(asic_signal);
+ pixel_clk_params->disp_pll_cfg = build_disp_pll_config(asic_signal);
+}
+
+uint32_t dal_hw_sequencer_translate_to_lb_color_depth(
+ enum lb_pixel_depth lb_color_depth)
+{
+ switch (lb_color_depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ return 6;
+ case LB_PIXEL_DEPTH_24BPP:
+ return 8;
+ case LB_PIXEL_DEPTH_30BPP:
+ return 10;
+ case LB_PIXEL_DEPTH_36BPP:
+ return 12;
+ default:
+ return 0;
+ }
+}
+
+enum csc_color_depth dal_hw_sequencer_translate_to_csc_color_depth(
+ enum hw_color_depth color_depth)
+{
+ switch (color_depth) {
+ case HW_COLOR_DEPTH_666:
+ return CSC_COLOR_DEPTH_666;
+ case HW_COLOR_DEPTH_888:
+ return CSC_COLOR_DEPTH_888;
+ case HW_COLOR_DEPTH_101010:
+ return CSC_COLOR_DEPTH_101010;
+ case HW_COLOR_DEPTH_121212:
+ return CSC_COLOR_DEPTH_121212;
+ case HW_COLOR_DEPTH_141414:
+ return CSC_COLOR_DEPTH_141414;
+ case HW_COLOR_DEPTH_161616:
+ return CSC_COLOR_DEPTH_161616;
+ default:
+ return CSC_COLOR_DEPTH_888;
+ }
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.c
new file mode 100644
index 000000000000..dd098602965e
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.c
@@ -0,0 +1,1198 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "include/logger_interface.h"
+#include "include/bandwidth_manager_interface.h"
+#include "include/vector.h"
+#include "include/plane_types.h"
+
+#include "hw_sequencer.h"
+
+/* This macro allows us to do pointer arithmetic with a void pointer. */
+#define UPDATE_MEMBLOCK_PTR(mem, count) { \
+ uint8_t *mem_block_current; \
+\
+ mem_block_current = mem; \
+ mem_block_current += count; \
+ mem = mem_block_current; \
+}
+
+#define PARAMS_TRACE(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_HW_TRACE, LOG_MINOR_HW_TRACE_MPO, __VA_ARGS__)
+
+#define PARAMS_WARNING(...) \
+ dal_logger_write(dal_context->logger, \
+ LOG_MAJOR_WARNING, LOG_MINOR_HW_TRACE_MPO, __VA_ARGS__)
+
+/******************************************************************************
+ * Private Functions
+ *****************************************************************************/
+
+static void reserve_planes_memory_block(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t paths_num,
+ void *mem_block,
+ void *params_area,
+ uint32_t per_plane_info_size)
+{
+ uint32_t i;
+ uint32_t planes_num;
+ void **planes_block = params_area;
+ struct vector *plane_configs;
+
+ /* move to the area after the per-path pointers */
+ UPDATE_MEMBLOCK_PTR(mem_block, sizeof(void *) * paths_num);
+
+ for (i = 0; i < paths_num; i++) {
+
+ plane_configs = dal_hw_path_mode_set_get_path_by_index(
+ path_set, i)->plane_configs;
+
+ planes_num = 0;
+
+ if (plane_configs)
+ planes_num = dal_vector_get_count(plane_configs);
+
+ if (planes_num == 0) {
+ /* validation case */
+ planes_num = 1;
+ }
+
+ /* Initialize pointer for current path to point to
+ * area which will store per-plane information. */
+ planes_block[i] = mem_block;
+
+ /* And reserve the area. */
+ UPDATE_MEMBLOCK_PTR(mem_block,
+ per_plane_info_size * planes_num);
+ } /* for() */
+}
+
+struct hwss_build_params *allocate_set_mode_params(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t paths_num,
+ uint32_t params_num,
+ union hwss_build_params_mask params_mask)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ struct hwss_build_params *build_params = NULL;
+ uint32_t scaling_tap_params_size = 0;
+ uint32_t pll_settings_params_size = 0;
+ uint32_t min_clock_params_size = 0;
+ uint32_t wm_input_params_size = 0;
+ uint32_t bandwidth_params_size = 0;
+ uint32_t line_buffer_params_size = 0;
+ uint32_t size;
+ void *mem_block;
+
+ if (!paths_num && !params_num) {
+ PARAMS_WARNING("%s: invalid input!\n", __func__);
+ return NULL;
+ }
+
+ if (params_mask.bits.SCALING_TAPS) {
+ scaling_tap_params_size =
+ sizeof(struct scaling_tap_info) * params_num +
+ sizeof(struct scaling_tap_info *) * paths_num;
+ }
+
+ if (params_mask.bits.PLL_SETTINGS) {
+ pll_settings_params_size =
+ sizeof(struct pll_settings) * paths_num;
+ }
+
+ if (params_mask.bits.MIN_CLOCKS) {
+ min_clock_params_size =
+ sizeof(struct min_clock_params) * params_num;
+ }
+
+ if (params_mask.bits.WATERMARK) {
+ wm_input_params_size =
+ sizeof(struct watermark_input_params) * params_num;
+ }
+
+ if (params_mask.bits.BANDWIDTH) {
+ bandwidth_params_size =
+ sizeof(struct bandwidth_params) * params_num;
+ }
+
+ if (params_mask.bits.LINE_BUFFER) {
+ line_buffer_params_size =
+ sizeof(struct lb_params_data) * params_num +
+ sizeof(struct lb_params_data *) * paths_num;
+ }
+
+ /* Get sum of required allocation */
+ size = scaling_tap_params_size + pll_settings_params_size +
+ min_clock_params_size + wm_input_params_size +
+ bandwidth_params_size + line_buffer_params_size;
+
+ /* Allocate entire memory size */
+ /*
+ * TODO: as an optimization, this memory can be preallocated inside
+ * of hws.
+ * But, it can be done only if:
+ * 1. allocation size is harcoded at maximum Paths/Planes.
+ * 2. assuming that DAL is thread-safe, so all threads can use
+ * the same params structure
+ * 3. if the optimization is done, this function simply 'resets' the
+ * preallocated memory and re-arranges the pointers according
+ * to the new number of parameters.
+ */
+ build_params = dal_alloc(sizeof(*build_params) + size);
+
+ build_params->mem_block = &build_params[1];
+ build_params->params_num = params_num;
+
+ if (NULL == build_params)
+ return NULL;
+
+ mem_block = build_params->mem_block;
+
+ /* Distribute the allocated chunk between available consumers */
+ if (scaling_tap_params_size > 0) {
+
+ build_params->scaling_taps_params = mem_block;
+
+ reserve_planes_memory_block(
+ hws,
+ path_set,
+ paths_num,
+ mem_block,
+ build_params->scaling_taps_params,
+ sizeof(struct scaling_tap_info));
+
+ UPDATE_MEMBLOCK_PTR(mem_block, scaling_tap_params_size);
+ }
+
+ if (pll_settings_params_size > 0) {
+ build_params->pll_settings_params = mem_block;
+ UPDATE_MEMBLOCK_PTR(mem_block, pll_settings_params_size);
+ }
+
+ if (min_clock_params_size > 0) {
+ build_params->min_clock_params = mem_block;
+ UPDATE_MEMBLOCK_PTR(mem_block, min_clock_params_size);
+ }
+
+ if (wm_input_params_size > 0) {
+ build_params->wm_input_params = mem_block;
+ UPDATE_MEMBLOCK_PTR(mem_block, wm_input_params_size);
+ }
+
+ if (bandwidth_params_size > 0) {
+ build_params->bandwidth_params = mem_block;
+ UPDATE_MEMBLOCK_PTR(mem_block, bandwidth_params_size);
+ }
+
+ if (line_buffer_params_size > 0) {
+
+ build_params->line_buffer_params = mem_block;
+
+ reserve_planes_memory_block(
+ hws,
+ path_set,
+ paths_num,
+ mem_block,
+ build_params->line_buffer_params,
+ sizeof(struct lb_params_data));
+
+ UPDATE_MEMBLOCK_PTR(mem_block, line_buffer_params_size);
+ }
+
+ /* validate if our distribution is fine. It should be ok for 32 & 64
+ * bits OS */
+ ASSERT(build_params->mem_block + size == mem_block);
+
+ return build_params;
+}
+
+/**
+ * Return the number of parameters for the Path Set.
+ *
+ * Validate that information in Path Set is adequate for the
+ * requested hwss_build_params_mask.
+ * If it is, then calculate the number of parameters for the Path Set.
+ *
+ * \return 0: Validation failed
+ * non-zero number of paramters: Validation passed
+ */
+static uint32_t get_params_num_to_allocate(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ union hwss_build_params_mask params_mask)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ uint32_t i;
+ uint32_t params_num = 0;
+ uint32_t paths_num;
+ struct hw_path_mode *path_mode;
+ struct vector *plane_configs;
+ uint32_t plane_count;
+
+ paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+
+ for (i = 0; i < paths_num; i++) {
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, i);
+
+ if (path_mode->action == HW_PATH_ACTION_RESET)
+ continue;
+
+ plane_configs = path_mode->plane_configs;
+
+ plane_count = 0;
+
+ if (PARAMS_MASK_REQUIRES_PLAIN_CONFIGS(params_mask)) {
+ /* Add parameters for each PLANE */
+
+ if (NULL == plane_configs) {
+ /* Validation of Path Set failed as we can not
+ * build parameters without Plane Configs. */
+ PARAMS_WARNING(
+ "%s: Path:%d: Plane Configs vector is NULL!\n",
+ __func__,
+ dal_display_path_get_display_index(
+ path_mode->display_path));
+ params_num = 0;
+ ASSERT(false);
+ break;
+ }
+
+ plane_count = dal_vector_get_count(plane_configs);
+ if (!plane_count) {
+ /* There is no use-case which should get us
+ * here. */
+ PARAMS_WARNING(
+ "%s: Path:%d: Plane Configs vector is empty!\n",
+ __func__,
+ dal_display_path_get_display_index(
+ path_mode->display_path));
+ params_num = 0;
+ ASSERT(false);
+ break;
+ }
+
+ /* This is the standard case when we prepare parameters
+ * for all planes */
+ } else {
+ /* no planes - possible for validation case */
+ plane_count = 1;
+ }
+
+ params_num += plane_count;
+ } /* for() */
+
+ return params_num;
+}
+
+static void wireless_parameters_adjust(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode)
+{
+ struct hw_vce_adjust_timing_params vce_params = {
+ &path_mode->mode.timing,
+ &path_mode->mode.overscan,
+ path_mode->mode.refresh_rate,
+ dal_adapter_service_is_feature_supported(
+ FEATURE_MODIFY_TIMINGS_FOR_WIRELESS),
+ dal_adapter_service_is_feature_supported(
+ FEATURE_WIRELESS_FULL_TIMING_ADJUSTMENT)
+ };
+
+ hws->funcs->apply_vce_timing_adjustment(hws, &vce_params);
+}
+
+/** translate timing color depth to graphic bpp */
+static uint32_t translate_to_display_bpp(
+ enum hw_color_depth color_depth)
+{
+ uint32_t bpp;
+
+ switch (color_depth) {
+ case HW_COLOR_DEPTH_666:
+ bpp = 18;
+ break;
+ case HW_COLOR_DEPTH_888:
+ bpp = 24;
+ break;
+ case HW_COLOR_DEPTH_101010:
+ bpp = 30;
+ break;
+ case HW_COLOR_DEPTH_121212:
+ bpp = 36;
+ break;
+ case HW_COLOR_DEPTH_141414:
+ bpp = 42;
+ break;
+ case HW_COLOR_DEPTH_161616:
+ bpp = 48;
+ break;
+ default:
+ bpp = 30;
+ break;
+ }
+ return bpp;
+}
+
+/** translate timing color depth to scaler efficiency
+ * and line buffer capabilities
+ */
+static enum v_scaler_efficiency
+translate_to_scaler_efficiency_and_lb_pixel_depth(
+ struct line_buffer *line_buffer,
+ enum hw_color_depth color_depth,
+ enum lb_pixel_depth *lb_depth)
+{
+ uint32_t display_bpp = 0;
+ *lb_depth = LB_PIXEL_DEPTH_30BPP;
+
+ if (line_buffer == NULL)
+ return V_SCALER_EFFICIENCY_LB30BPP;
+
+ display_bpp = translate_to_display_bpp(color_depth);
+
+ if (!dal_line_buffer_get_pixel_storage_depth(
+ line_buffer, display_bpp, lb_depth))
+ return V_SCALER_EFFICIENCY_LB30BPP;
+
+ switch (*lb_depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ return V_SCALER_EFFICIENCY_LB18BPP;
+ case LB_PIXEL_DEPTH_24BPP:
+ return V_SCALER_EFFICIENCY_LB24BPP;
+ case LB_PIXEL_DEPTH_30BPP:
+ return V_SCALER_EFFICIENCY_LB30BPP;
+ case LB_PIXEL_DEPTH_36BPP:
+ return V_SCALER_EFFICIENCY_LB36BPP;
+ default:
+ return V_SCALER_EFFICIENCY_LB30BPP;
+ }
+}
+
+static enum scaler_validation_code
+validate_display_clock_for_scaling_ex(
+ struct display_path *disp_path,
+ struct min_clock_params *min_clock_params,
+ struct scaling_tap_info *scaler_tap_info,
+ struct lb_params_data *actual_lb_params,
+ enum hw_color_depth timing_color_depth)
+{
+ /* TODO: Implementation. Do NOT use logger to print this message
+ * because this function is called many times */
+ return SCALER_VALIDATION_OK;
+}
+
+/*translate from depth to VScalerEfficiency*/
+static enum v_scaler_efficiency
+translate_lb_pixel_depth_to_scaler_efficiency(
+ enum lb_pixel_depth lb_depth)
+{
+ enum v_scaler_efficiency scaler_eff = V_SCALER_EFFICIENCY_LB30BPP;
+
+ switch (lb_depth) {
+ case LB_PIXEL_DEPTH_18BPP:
+ scaler_eff = V_SCALER_EFFICIENCY_LB18BPP;
+ break;
+ case LB_PIXEL_DEPTH_24BPP:
+ scaler_eff = V_SCALER_EFFICIENCY_LB24BPP;
+ break;
+ case LB_PIXEL_DEPTH_30BPP:
+ scaler_eff = V_SCALER_EFFICIENCY_LB30BPP;
+ break;
+ case LB_PIXEL_DEPTH_36BPP:
+ scaler_eff = V_SCALER_EFFICIENCY_LB36BPP;
+ break;
+ }
+ return scaler_eff;
+}
+
+static enum scaler_validation_code get_optimal_number_of_taps(
+ struct controller *crtc,
+ struct scaler_validation_params *scaling_params,
+ enum hw_color_depth display_color_depth,
+ struct lb_params_data *lb_params,
+ struct scaling_tap_info *actual_taps_info,
+ bool interlaced,
+ enum hw_pixel_encoding pixel_encoding)
+{
+ enum scaler_validation_code code =
+ SCALER_VALIDATION_INVALID_INPUT_PARAMETERS;
+
+ uint32_t max_lines_number = 0;
+
+ struct line_buffer *lb = dal_controller_get_line_buffer(crtc);
+
+ uint32_t display_bpp = translate_to_display_bpp(display_color_depth);
+ enum lb_pixel_depth depth = lb_params->depth;
+ enum lb_pixel_depth lower_depth = depth;
+
+ bool failure_max_number_of_supported_lines = false;
+ bool failure_next_lower_number_of_taps = false;
+ bool use_predefined_taps = false;
+
+ /* Variable to pass in to calculate number of lines supported by
+ * Line Buffer */
+ uint32_t pixel_width = 0;
+ uint32_t pixel_width_c;
+
+ /* If downscaling, use destination pixel width to calculate Line Buffer
+ * size (how many lines can fit in line buffer) (We assume the 4:1
+ * scaling ratio is already checked in static validation -> no modes
+ * should be here that doesn't fit that limitation)
+ */
+ if (scaling_params->source_view.width >
+ scaling_params->dest_view.width)
+ pixel_width = scaling_params->dest_view.width;
+ else
+ pixel_width = scaling_params->source_view.width;
+
+ pixel_width_c = pixel_width;
+
+ /* Source width for chroma is half size for 420 and 422 rotated to 90
+ * or 270 */
+ if (scaling_params->pixel_format == PIXEL_FORMAT_420BPP12 ||
+ ((scaling_params->rotation == ROTATION_ANGLE_90 ||
+ scaling_params->rotation == ROTATION_ANGLE_270) &&
+ scaling_params->pixel_format == PIXEL_FORMAT_420BPP12)) {
+ if (scaling_params->source_view.width / 2 <
+ scaling_params->dest_view.width) {
+ pixel_width_c = scaling_params->source_view.width / 2;
+ }
+ }
+
+ if (!dal_line_buffer_get_max_num_of_supported_lines(
+ lb,
+ depth,
+ pixel_width,
+ &max_lines_number))
+ return code;
+
+ /* TODO: before usage check taps_requested initialized
+ if (false && scaling_params->taps_requested.h_taps > 0 &&
+ scaling_params->taps_requested.v_taps > 0) {
+ struct lb_config_data lb_config_data = { 0 };
+ lb_config_data.src_height =
+ scaling_params->source_view.height;
+ lb_config_data.src_pixel_width = pixel_width;
+ lb_config_data.src_pixel_width_c = pixel_width_c;
+ lb_config_data.dst_height =
+ scaling_params->dest_view.height;
+ lb_config_data.dst_pixel_width =
+ scaling_params->dest_view.width;
+ lb_config_data.taps.h_taps =
+ scaling_params->taps_requested.h_taps;
+ lb_config_data.taps.v_taps =
+ scaling_params->taps_requested.v_taps;
+ lb_config_data.taps.h_taps_c =
+ scaling_params->taps_requested.h_taps_c;
+ lb_config_data.taps.v_taps_c =
+ scaling_params->taps_requested.v_taps_c;
+ lb_config_data.interlaced = interlaced;
+ lb_config_data.depth = lb_params->depth;
+
+ if (dal_line_buffer_validate_taps_info(
+ lb,
+ &lb_config_data,
+ display_bpp)) {
+ *actual_taps_info = scaling_params->taps_requested;
+ use_predefined_taps = true;
+ }
+ } else */
+
+ /* To get the number of taps, we still want to know the original scaling
+ * parameters. The horizontal downscaling TAPS use a value that is
+ * consistent to the original scaling parameters. ie. We don't need to
+ * use the destination horizontal width
+ */
+ if (dal_controller_get_optimal_taps_number(
+ crtc,
+ scaling_params,
+ actual_taps_info) != SCALER_VALIDATION_OK) {
+ return code;
+ }
+
+ /* v_taps go lower and maxNumberOfLines could go higher when lb format
+ * is changed
+ * Do loop until taps < lines - 1, taps + 1 = lines
+ */
+ while (actual_taps_info->v_taps > max_lines_number - 1) {
+ /* adjust lb size */
+ if (dal_line_buffer_get_next_lower_pixel_storage_depth(
+ lb,
+ display_bpp,
+ depth,
+ &lower_depth)) {
+ depth = lower_depth;
+ if (!dal_line_buffer_get_max_num_of_supported_lines(
+ lb,
+ depth,
+ pixel_width,
+ &max_lines_number)) {
+ failure_max_number_of_supported_lines = true;
+ break;
+ }
+
+ continue;
+ }
+
+ if (use_predefined_taps == true) {
+ code = SCALER_VALIDATION_FAILURE_PREDEFINED_TAPS_NUMBER;
+ break;
+ } else if (dal_controller_get_next_lower_taps_number(
+ crtc,
+ NULL,
+ actual_taps_info) != SCALER_VALIDATION_OK) {
+ failure_next_lower_number_of_taps = true;
+ break;
+ }
+ }
+
+ if (use_predefined_taps == true &&
+ code == SCALER_VALIDATION_FAILURE_PREDEFINED_TAPS_NUMBER)
+ return code;
+
+ if (actual_taps_info->v_taps > 1 && max_lines_number <= 2)
+ return SCALER_VALIDATION_SOURCE_VIEW_WIDTH_EXCEEDING_LIMIT;
+
+ if (failure_max_number_of_supported_lines == true
+ || failure_next_lower_number_of_taps == true) {
+ /* we requested scaling ,but it could not be supported */
+ return SCALER_VALIDATION_SOURCE_VIEW_WIDTH_EXCEEDING_LIMIT;
+ }
+
+ if (actual_taps_info->v_taps == 1 && max_lines_number < 2) {
+ /* no scaling ,but LB should accommodate at least 2 source lines
+ */
+ return SCALER_VALIDATION_SOURCE_VIEW_WIDTH_EXCEEDING_LIMIT;
+ }
+
+ lb_params->depth = depth;
+
+ return SCALER_VALIDATION_OK;
+}
+
+static enum scaler_validation_code build_path_parameters(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ uint32_t path_index,
+ const struct plane_config *pl_cfg,
+ struct scaling_tap_info *scaler_tap_info,
+ struct pll_settings *pll_settings_params,
+ struct min_clock_params *min_clock_params,
+ struct watermark_input_params *wm_input_params,
+ struct bandwidth_params *bandwidth_params,
+ struct lb_params_data *line_buffer_params,
+ bool use_predefined_hw_state)
+{
+ enum scaler_validation_code scaler_return_code = SCALER_VALIDATION_OK;
+ struct pixel_clk_params pixel_clk_params = { 0 };
+
+ uint32_t graphics_bpp = dal_hw_sequencer_translate_to_graphics_bpp(
+ path_mode->mode.pixel_format);
+
+ uint32_t backend_bpp = dal_hw_sequencer_translate_to_backend_bpp(
+ path_mode->mode.backend_bpp);
+
+ uint32_t dst_pixel_height =
+ path_mode->mode.scaling_info.dst.height;
+
+ struct controller *crtc = dal_display_path_get_controller(
+ path_mode->display_path);
+
+ struct scaling_tap_info tap_info = { 0 };
+ struct pll_settings pll_settings = { 0 };
+ struct min_clock_params min_clock_params_local = { 0 };
+ struct lb_params_data line_buffer_params_local = { 0 };
+
+ struct scaling_tap_info *actual_tap_info =
+ scaler_tap_info != NULL ?
+ scaler_tap_info : &tap_info;
+
+ struct pll_settings *actual_pll_settings =
+ pll_settings_params != NULL ?
+ pll_settings_params : &pll_settings;
+
+ struct min_clock_params *actual_min_clock_params =
+ min_clock_params != NULL ?
+ min_clock_params : &min_clock_params_local;
+
+ struct lb_params_data *actual_line_buffer_params =
+ line_buffer_params != NULL ?
+ line_buffer_params : &line_buffer_params_local;
+
+ bool line_buffer_prefetch_enabled;
+
+ uint32_t pixel_width;
+ uint32_t pixel_width_c;
+
+ /* If downscaling, use destination pixel width to calculate Line Buffer
+ * size (how many lines can fit in line buffer) (We assume the 4:1
+ * scaling ratio is already checked in static validation -> no modes
+ * should be here that doesn't fit that limitation)
+ */
+ if (path_mode->mode.scaling_info.src.width >
+ path_mode->mode.scaling_info.dst.width)
+ pixel_width = path_mode->mode.scaling_info.dst.width;
+ else
+ pixel_width = path_mode->mode.scaling_info.src.width;
+
+ pixel_width_c = pixel_width;
+
+ if (pl_cfg &&
+ (pl_cfg->config.dal_pixel_format == PIXEL_FORMAT_420BPP12 ||
+ (pl_cfg->config.dal_pixel_format == PIXEL_FORMAT_422BPP16 &&
+ (pl_cfg->config.rotation == PLANE_ROTATION_ANGLE_90 ||
+ pl_cfg->config.rotation ==
+ PLANE_ROTATION_ANGLE_270)))) {
+ if (path_mode->mode.scaling_info.src.width / 2 <
+ path_mode->mode.scaling_info.dst.width)
+ pixel_width_c = path_mode->mode.scaling_info.src.width;
+ }
+
+ if (use_predefined_hw_state)
+ line_buffer_prefetch_enabled = false;
+ else {
+ struct lb_config_data lb_config_data;
+
+ dal_memset(&lb_config_data, 0, sizeof(lb_config_data));
+ lb_config_data.src_height =
+ path_mode->mode.scaling_info.src.height;
+ lb_config_data.src_pixel_width = pixel_width;
+ lb_config_data.src_pixel_width_c = pixel_width_c;
+ lb_config_data.dst_height =
+ path_mode->mode.scaling_info.dst.height;
+ lb_config_data.dst_pixel_width =
+ path_mode->mode.scaling_info.dst.width;
+ lb_config_data.taps.h_taps = actual_tap_info->h_taps;
+ lb_config_data.taps.v_taps = actual_tap_info->v_taps;
+ lb_config_data.taps.h_taps_c = actual_tap_info->h_taps_c;
+ lb_config_data.taps.v_taps_c = actual_tap_info->v_taps_c;
+ lb_config_data.interlaced =
+ path_mode->mode.timing.flags.INTERLACED;
+ lb_config_data.depth = actual_line_buffer_params->depth;
+ line_buffer_prefetch_enabled =
+ dal_line_buffer_is_prefetch_supported(
+ dal_controller_get_line_buffer(crtc),
+ &lb_config_data);
+ }
+
+ /* calc pixel clock parameters and PLL dividers */
+ dal_hw_sequencer_get_pixel_clock_parameters(path_mode,
+ &pixel_clk_params);
+
+ if (pll_settings_params != NULL) {
+ dal_clock_source_get_pix_clk_dividers(
+ dal_display_path_get_clock_source(
+ path_mode->display_path),
+ &pixel_clk_params,
+ actual_pll_settings);
+ }
+
+ /* For Scaler and mini clock we need height adjusted by scan order
+ * (interlaced or progressive). Bandwidth and watermark - no need
+ * (compensated inside these components) */
+ if (path_mode->mode.timing.flags.INTERLACED)
+ dst_pixel_height /= 2;
+
+ /* calculate taps and minimum clock */
+ if (scaler_tap_info != NULL ||
+ wm_input_params != NULL ||
+ min_clock_params != NULL ||
+ bandwidth_params != NULL) {
+
+ struct scaler_validation_params scaler_params = { 0 };
+
+ /*get the lb depth equel or higher than display bpp */
+ actual_min_clock_params->scaler_efficiency =
+ translate_to_scaler_efficiency_and_lb_pixel_depth(
+ dal_controller_get_line_buffer(crtc),
+ path_mode->mode.timing.flags.COLOR_DEPTH,
+ &actual_line_buffer_params->depth);
+
+ scaler_params.signal_type = path_mode->mode.scaling_info.signal;
+
+ scaler_params.source_view = path_mode->mode.scaling_info.src;
+
+ if (pl_cfg) {
+ scaler_params.dest_view =
+ pl_cfg->mp_scaling_data.dst_res;
+ } else {
+ scaler_params.dest_view =
+ path_mode->mode.scaling_info.dst;
+ scaler_params.dest_view.height = dst_pixel_height;
+ }
+
+
+/* scaler_params.source_view.height =
+ pl_cfg->mp_scaling_data.viewport.height;
+ scaler_params.source_view.width =
+ pl_cfg->mp_scaling_data.viewport.height;
+ scaler_params.dest_view = pl_cfg->mp_scaling_data.dst_res;*/
+
+ /** get taps that are fit to lb size the lb depth could be
+ * updated , taps are set if return code is Ok */
+ scaler_return_code = get_optimal_number_of_taps(
+ crtc,
+ &scaler_params,
+ path_mode->mode.timing.flags.COLOR_DEPTH,
+ actual_line_buffer_params,
+ actual_tap_info,
+ path_mode->mode.timing.flags.INTERLACED == 0,
+ path_mode->mode.timing.flags.PIXEL_ENCODING);
+
+ /* Prepare min clock params */
+
+ actual_min_clock_params->id = path_index;
+ actual_min_clock_params->requested_pixel_clock =
+ pixel_clk_params.requested_pix_clk;
+ actual_min_clock_params->actual_pixel_clock =
+ actual_pll_settings->actual_pix_clk;
+
+ actual_min_clock_params->source_view.width =
+ path_mode->mode.scaling_info.src.width;
+ actual_min_clock_params->source_view.height =
+ path_mode->mode.scaling_info.src.height;
+
+ if (pl_cfg) {
+ actual_min_clock_params->dest_view.height =
+ pl_cfg->mp_scaling_data.dst_res.height;
+ actual_min_clock_params->dest_view.width =
+ pl_cfg->mp_scaling_data.dst_res.width;
+ } else {
+ actual_min_clock_params->dest_view.width =
+ path_mode->mode.scaling_info.dst.width;
+ actual_min_clock_params->dest_view.height =
+ dst_pixel_height;
+ }
+
+ actual_min_clock_params->timing_info.INTERLACED =
+ path_mode->mode.timing.flags.INTERLACED;
+ actual_min_clock_params->timing_info.HCOUNT_BY_TWO = 0;
+ actual_min_clock_params->timing_info.PIXEL_REPETITION =
+ path_mode->mode.timing.flags.PIXEL_REPETITION;
+ actual_min_clock_params->timing_info.PREFETCH = 1;
+
+ actual_min_clock_params->timing_info.h_total =
+ path_mode->mode.timing.h_total;
+ actual_min_clock_params->timing_info.h_addressable =
+ path_mode->mode.timing.h_addressable;
+ actual_min_clock_params->timing_info.h_sync_width =
+ path_mode->mode.timing.h_sync_width;
+
+ if (pl_cfg) {
+ actual_min_clock_params->scaling_info.h_overscan_right =
+ pl_cfg->mp_scaling_data.overscan.right;
+ actual_min_clock_params->scaling_info.h_overscan_left =
+ pl_cfg->mp_scaling_data.overscan.left;
+ } else {
+ actual_min_clock_params->scaling_info.h_overscan_right =
+ path_mode->mode.overscan.right;
+ actual_min_clock_params->scaling_info.h_overscan_left =
+ path_mode->mode.overscan.left;
+ }
+
+ actual_min_clock_params->scaling_info.h_taps =
+ actual_tap_info->h_taps;
+ actual_min_clock_params->scaling_info.v_taps =
+ actual_tap_info->v_taps;
+
+ actual_min_clock_params->color_info.bpp_backend_video =
+ backend_bpp;
+ actual_min_clock_params->color_info.bpp_graphics =
+ graphics_bpp;
+
+ actual_min_clock_params->signal_type =
+ pixel_clk_params.signal_type;
+ actual_min_clock_params->deep_color_depth =
+ dal_hw_sequencer_translate_to_dec_deep_color_depth(
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ actual_min_clock_params->scaler_efficiency =
+ translate_lb_pixel_depth_to_scaler_efficiency(
+ actual_line_buffer_params->depth);
+ actual_min_clock_params->line_buffer_prefetch_enabled =
+ line_buffer_prefetch_enabled;
+ actual_line_buffer_params->id = path_index;
+
+ /* validate taps if it is set */
+ if (scaler_return_code == SCALER_VALIDATION_OK) {
+ scaler_return_code =
+ validate_display_clock_for_scaling_ex(
+ path_mode->display_path,
+ actual_min_clock_params,
+ actual_tap_info,
+ actual_line_buffer_params,
+ path_mode->mode.timing.flags.COLOR_DEPTH);
+ }
+ }
+
+ if (wm_input_params != NULL) {
+ wm_input_params->color_info.bpp_backend_video = backend_bpp;
+ wm_input_params->color_info.bpp_graphics = graphics_bpp;
+
+ wm_input_params->controller_id = dal_controller_get_id(crtc);
+
+ wm_input_params->src_view.width =
+ path_mode->mode.scaling_info.src.width;
+ wm_input_params->src_view.height =
+ path_mode->mode.scaling_info.src.height;
+
+ wm_input_params->dst_view.width =
+ path_mode->mode.scaling_info.dst.width;
+ wm_input_params->dst_view.height =
+ path_mode->mode.scaling_info.dst.height;
+
+ wm_input_params->timing_info.INTERLACED =
+ path_mode->mode.timing.flags.INTERLACED;
+ wm_input_params->pixel_clk_khz =
+ pixel_clk_params.requested_pix_clk;
+
+ wm_input_params->v_taps = actual_tap_info->v_taps;
+ wm_input_params->h_taps = actual_tap_info->h_taps;
+
+ wm_input_params->fbc_enabled =
+ dal_display_path_get_fbc_info(
+ path_mode->display_path)->fbc_enable;
+ wm_input_params->lpt_enabled =
+ dal_display_path_get_fbc_info(
+ path_mode->display_path)->lpt_enable;
+ wm_input_params->tiling_mode = path_mode->mode.tiling_mode;
+
+ wm_input_params->timing_info.h_addressable =
+ path_mode->mode.timing.h_addressable;
+ wm_input_params->timing_info.h_total =
+ path_mode->mode.timing.h_total;
+ wm_input_params->timing_info.h_overscan_left =
+ path_mode->mode.overscan.left;
+ wm_input_params->timing_info.h_overscan_right =
+ path_mode->mode.overscan.right;
+ }
+
+ if (bandwidth_params != NULL) {
+ bandwidth_params->controller_id =
+ dal_controller_get_id(crtc);
+ bandwidth_params->src_vw.width =
+ path_mode->mode.scaling_info.src.width;
+ bandwidth_params->src_vw.height =
+ path_mode->mode.scaling_info.src.height;
+ bandwidth_params->dst_vw.width =
+ path_mode->mode.scaling_info.dst.width;
+ bandwidth_params->dst_vw.height =
+ path_mode->mode.scaling_info.dst.height;
+
+ bandwidth_params->color_info.bpp_graphics = graphics_bpp;
+ bandwidth_params->color_info.bpp_backend_video = backend_bpp;
+
+ bandwidth_params->timing_info.PREFETCH = 1;
+ bandwidth_params->timing_info.h_total =
+ path_mode->mode.timing.h_total;
+ bandwidth_params->timing_info.INTERLACED =
+ path_mode->mode.timing.flags.INTERLACED;
+ bandwidth_params->timing_info.h_addressable =
+ path_mode->mode.timing.h_addressable;
+ bandwidth_params->timing_info.v_total =
+ path_mode->mode.timing.v_total;
+ bandwidth_params->timing_info.pix_clk_khz =
+ pixel_clk_params.requested_pix_clk;
+
+ bandwidth_params->scaler_taps.h_taps =
+ actual_tap_info->h_taps;
+ bandwidth_params->scaler_taps.v_taps =
+ actual_tap_info->v_taps;
+
+ bandwidth_params->tiling_mode = path_mode->mode.tiling_mode;
+ bandwidth_params->surface_pixel_format =
+ path_mode->mode.pixel_format;
+ bandwidth_params->stereo_format =
+ path_mode->mode.stereo_format;
+ }
+
+ return scaler_return_code;
+}
+
+DAL_VECTOR_AT_INDEX(plane_configs, const struct plane_config *)
+
+/**
+ * Prepare parameters for a single path.
+ *
+ * \param [in] path_index: index of the path in the path_mode_set.
+ *
+ * \return Number of parameters which where prepared. Zero indicates an error.
+ *
+ * */
+static uint32_t prepare_per_path_parameters(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode,
+ union hwss_build_params_mask params_mask,
+ struct hwss_build_params *build_params,
+ uint32_t path_index,
+ uint32_t param_num_in,
+ bool use_predefined_hw_state)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ enum scaler_validation_code build_result = SCALER_VALIDATION_OK;
+ const struct plane_config *pl_cfg;
+ uint32_t params_prepared = 0;
+ uint32_t planes_num;
+ uint32_t plane_index;
+ uint32_t param_current = param_num_in;
+ struct vector *plane_configs;
+
+ if (path_mode->action == HW_PATH_ACTION_RESET) {
+ PARAMS_WARNING("%s: invalid action: RESET\n", __func__);
+ return params_prepared;
+ }
+
+ /* Apply VCE timing change *before* building path parameters */
+ if (SIGNAL_TYPE_WIRELESS == dal_hw_sequencer_get_asic_signal(path_mode))
+ wireless_parameters_adjust(hws, path_mode);
+
+ planes_num = 0;
+ plane_configs = path_mode->plane_configs;
+
+ if (!PARAMS_MASK_REQUIRES_PLAIN_CONFIGS(params_mask)) {
+ /* Disregard planes information, even if it is available!
+ * (We get here in the 'dal_hw_sequencer_set_safe_displaymark'
+ * case.) */
+ planes_num = 1;
+ } else if (plane_configs) {
+ /* Planes info must be taken into account. */
+ planes_num = dal_vector_get_count(plane_configs);
+ }
+
+ if (!planes_num) {
+ /* validation case */
+ planes_num = 1;
+ }
+
+ for (plane_index = 0; plane_index < planes_num; plane_index++) {
+
+ pl_cfg = NULL;
+ if (plane_configs) {
+ pl_cfg = plane_configs_vector_at_index(plane_configs,
+ plane_index);
+ }
+
+ /* Build parameters for the PATH/PLANE combination */
+ build_result = build_path_parameters(
+ hws,
+ path_mode,
+ path_index,
+ pl_cfg,
+ (params_mask.bits.SCALING_TAPS ?
+ &build_params->
+ scaling_taps_params[path_index][plane_index] :
+ NULL),
+ (params_mask.bits.PLL_SETTINGS ?
+ &build_params->pll_settings_params[path_index] :
+ NULL),
+ (params_mask.bits.MIN_CLOCKS ?
+ &build_params->min_clock_params[param_current] :
+ NULL),
+ (params_mask.bits.WATERMARK ?
+ &build_params->wm_input_params[param_current] :
+ NULL),
+ (params_mask.bits.BANDWIDTH ?
+ &build_params->bandwidth_params[param_current] :
+ NULL),
+ (params_mask.bits.LINE_BUFFER ?
+ &build_params->
+ line_buffer_params[path_index][plane_index] :
+ NULL),
+ use_predefined_hw_state);
+
+ if (build_result != SCALER_VALIDATION_OK) {
+ /* we can't support requested mode */
+ params_prepared = 0;
+ ASSERT(false);
+ return params_prepared;
+ }
+
+ /* OK */
+ param_current++;
+ } /* for() */
+
+ /* we prepared as many parameters as planes */
+ params_prepared = planes_num;
+
+ return params_prepared;
+}
+
+/* Compute the *lowest* clocks which can support the Path Mode Set.
+ * (lowest clocks == maximim power efficiency) */
+static void compute_minimum_clocks_for_path_mode_set(
+ struct display_clock *dc,
+ struct bandwidth_manager *bm,
+ struct min_clock_params *min_clock_params,
+ struct bandwidth_params *bandwidth_params,
+ uint32_t params_num,
+ struct minimum_clocks_calculation_result *result)
+{
+ dal_memset(result, 0, sizeof(*result));
+
+ result->min_dclk_khz = dal_display_clock_calculate_min_clock(dc,
+ params_num, min_clock_params);
+
+ result->min_sclk_khz = dal_bandwidth_manager_get_min_sclk(bm,
+ params_num, bandwidth_params);
+
+ result->min_mclk_khz = dal_bandwidth_manager_get_min_mclk(bm,
+ params_num, bandwidth_params);
+
+ result->min_deep_sleep_sclk =
+ dal_bandwidth_manager_get_min_deep_sleep_sclk(bm,
+ params_num, bandwidth_params,
+ 0 /*TODO: use result->min_dclk_khz here? */);
+}
+
+/******************************************************************************
+ * Public Functions
+ *****************************************************************************/
+
+/* TODO: handle case of 'hwss_build_params_mask.INCLUDE_CHROMA == 1'
+ */
+struct hwss_build_params *dal_hw_sequencer_prepare_path_parameters(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ union hwss_build_params_mask params_mask,
+ bool use_predefined_hw_state)
+{
+ struct dal_context *dal_context = hws->dal_context;
+ struct hw_path_mode *path_mode;
+ struct controller *crtc;
+ struct display_clock *display_clock;
+ struct bandwidth_manager *bm;
+ uint32_t paths_num;
+ uint32_t params_num_to_allocate = 0;
+ uint32_t total_params_num_prepared = 0;
+ uint32_t path_params_num_prepared = 0;
+ struct hwss_build_params *build_params = NULL;
+ uint32_t i;
+
+ ASSERT(path_set != NULL);
+
+ /* We need to get DisplayEngineClock and BandwidthManager from any path
+ * (first one is good enough) */
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, 0);
+ if (NULL == path_mode)
+ return NULL;
+
+ crtc = dal_display_path_get_controller(path_mode->display_path);
+ display_clock = dal_controller_get_display_clock(crtc);
+ bm = dal_controller_get_bandwidth_manager(crtc);
+ paths_num = dal_hw_path_mode_set_get_paths_number(path_set);
+
+ params_num_to_allocate = get_params_num_to_allocate(hws,
+ path_set,
+ params_mask);
+
+ /* number of paramters could be 0. This is the case when reset mode
+ * called on last path that was enabled, and now is going to be
+ * disabled. It is either possible when no display connected to board,
+ * or in case when OS calls explicit reset mode and before set
+ */
+
+ build_params = allocate_set_mode_params(hws,
+ path_set,
+ paths_num,
+ params_num_to_allocate,
+ params_mask);
+
+ if (NULL == build_params)
+ return NULL;
+
+ total_params_num_prepared = 0;
+
+ for (i = 0; i < paths_num; i++) {
+
+ path_mode = dal_hw_path_mode_set_get_path_by_index(path_set, i);
+
+ if (path_mode->action == HW_PATH_ACTION_RESET)
+ continue;
+
+ path_params_num_prepared = prepare_per_path_parameters(hws,
+ path_mode,
+ params_mask,
+ build_params,
+ i,
+ total_params_num_prepared,
+ use_predefined_hw_state);
+
+ if (!path_params_num_prepared) {
+ dal_hw_sequencer_free_path_parameters(build_params);
+ return NULL;
+ }
+
+ total_params_num_prepared += path_params_num_prepared;
+ }
+
+ if (total_params_num_prepared != params_num_to_allocate) {
+ PARAMS_WARNING("%s: params prepared [%d] != allocated [%d]\n",
+ __func__,
+ total_params_num_prepared,
+ params_num_to_allocate,
+ dal_display_path_get_display_index(
+ path_mode->display_path));
+ ASSERT(false);
+ }
+
+ build_params->params_num = total_params_num_prepared;
+
+ if (params_mask.bits.MIN_CLOCKS) {
+ compute_minimum_clocks_for_path_mode_set(
+ display_clock,
+ bm,
+ build_params->min_clock_params,
+ build_params->bandwidth_params,
+ total_params_num_prepared,
+ &build_params->min_clock_result);
+ }
+
+ if (params_mask.bits.BANDWIDTH &&
+ build_params->bandwidth_params &&
+ build_params->wm_input_params) {
+ uint32_t value = dal_bandwidth_manager_get_min_deep_sleep_sclk(
+ bm,
+ build_params->params_num,
+ build_params->bandwidth_params,
+ 0);
+ for (i = 0; i < total_params_num_prepared; i++) {
+ build_params->wm_input_params[i].deep_sleep_sclk_khz =
+ value;
+ }
+ }
+
+ return build_params;
+}
+
+void dal_hw_sequencer_free_path_parameters(
+ struct hwss_build_params *build_params)
+{
+ dal_free(build_params);
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.h b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.h
new file mode 100644
index 000000000000..fa1403eb709a
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sequencer_parameters.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SEQUENCER_PARAMETERS_H__
+#define __DAL_HW_SEQUENCER_PARAMETERS_H__
+
+#include "include/bandwidth_manager_types.h"
+#include "include/display_clock_interface.h"
+#include "include/scaler_types.h"
+#include "include/clock_source_types.h"
+#include "include/line_buffer_interface.h"
+
+/* Forward declarations */
+struct vector;
+
+enum build_option {
+ /* for set mode on first encoder that configured for ASIC signal */
+ BUILD_OPTION_SET_MODE,
+ /* for set mode on first encoder that configured for display signal */
+ BUILD_OPTION_SET_MODE2,
+ BUILD_OPTION_ENABLE_UPSTREAM,
+ BUILD_OPTION_ENABLE_DOWNSTREAM,
+ BUILD_OPTION_DISABLE,
+ BUILD_OPTION_DISABLE2,
+ BUILD_OPTION_STATIC_VALIDATE_UPSTREAM,
+ BUILD_OPTION_STATIC_VALIDATE_DOWNSTREAM,
+};
+
+union hwss_build_params_mask {
+ struct {
+ /* 0-3 */
+ uint32_t SCALING_TAPS:1;
+ uint32_t PLL_SETTINGS:1;
+ uint32_t MIN_CLOCKS:1;
+ uint32_t WATERMARK:1;
+ /* 4-7 */
+ uint32_t BANDWIDTH:1;
+ uint32_t LINE_BUFFER:1;
+ uint32_t INCLUDE_CHROMA:1;/* For Video on Underlay */
+ /* 8-31 */
+ uint32_t FUTURE_USE:25;
+ } bits;
+ uint32_t all;
+};
+
+#define PARAMS_MASK_ALL 0xFFFFFFFF
+
+/* Checks if any of the bits in the mask require Plane Configs information
+ * in order to build the parameters. */
+#define PARAMS_MASK_REQUIRES_PLAIN_CONFIGS(mask) \
+ (mask.bits.SCALING_TAPS || mask.bits.LINE_BUFFER)
+
+struct hwss_build_params {
+
+ /* filled when hwss_build_params_mask.MIN_CLOCKS==1 */
+ struct minimum_clocks_calculation_result min_clock_result;
+
+ /* array, indexed by path_id and plane_id
+ * These arrays are structured as follows:
+ * XXX_params[Per-Path-Pointers to Planes Area][param_type*planes_num]
+ *
+ * For example: XXX_params[0] points to
+ * XXX_params[Per-Path-Pointers][param_type*num_of_planes_for_path_zero]
+ */
+ struct scaling_tap_info **scaling_taps_params;
+ struct lb_params_data **line_buffer_params;
+
+ /* array, indexed by path_id */
+ struct pll_settings *pll_settings_params;
+
+ /* array, indexed (compressed) by params number */
+ struct min_clock_params *min_clock_params;
+ struct watermark_input_params *wm_input_params;
+ struct bandwidth_params *bandwidth_params;
+
+ uint32_t params_num;
+ /* pointer to all allocated memory for all members of this struct */
+ void *mem_block;
+};
+
+struct hwss_build_params *dal_hw_sequencer_prepare_path_parameters(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ union hwss_build_params_mask params_mask,
+ bool use_predefined_hw_state);
+
+void dal_hw_sequencer_free_path_parameters(
+ struct hwss_build_params *build_params);
+
+#endif /*__DAL_HW_SEQUENCER_PARAMETERS_H__ */
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.c b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.c
new file mode 100644
index 000000000000..b8627c7ba1c8
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dal_services.h"
+
+#include "hw_sync_control.h"
+
+bool dal_hw_sync_control_construct_base(
+ struct hw_sync_control *hw_sync_control)
+{
+ return true;
+}
+
+enum hwss_result dal_hw_sync_control_inter_path_synchronize(
+ struct hw_sync_control *sync_control,
+ struct hw_path_mode_set *path_mode_set)
+{
+ /*Check if synchronization needed*/
+ uint32_t i = 0;
+ bool sync_needed = false;
+ uint32_t size = dal_hw_path_mode_set_get_paths_number(path_mode_set);
+
+ for (i = 0; i < size; ++i) {
+
+ const struct hw_path_mode *path_mode =
+ dal_hw_path_mode_set_get_path_by_index(
+ path_mode_set, i);
+
+ if (path_mode->sync_info.sync_request ==
+ HW_SYNC_REQUEST_SET_INTERPATH) {
+ sync_needed = true;
+ break;
+ }
+ }
+
+ /* Switch DP clock to active PLL if
+ * applicable for timing synchronization.*/
+ sync_control->funcs->switch_dp_clock_source(
+ sync_control, path_mode_set);
+
+ /* Perform CRTC synchronization*/
+ if (sync_needed) {
+ struct hw_resync_flags resync_flags = { false, false, false };
+
+ resync_flags.INTERPATH = true;
+ resync_flags.NOTIFY_SYNCED = true;
+ return sync_control->funcs->resync_display_paths(
+ sync_control, path_mode_set, resync_flags);
+ }
+
+ return HWSS_RESULT_OK;
+}
+
+bool dal_hw_sync_control_resync_required(
+ enum hw_sync_request sync_request,
+ struct hw_resync_flags resync_flags)
+{
+ bool required = false;
+
+ switch (sync_request) {
+ case HW_SYNC_REQUEST_SET_INTERPATH:
+ required = resync_flags.INTERPATH;
+ break;
+
+ case HW_SYNC_REQUEST_SET_GL_SYNC_GENLOCK:
+ case HW_SYNC_REQUEST_SET_GL_SYNC_FREE_RUN:
+ case HW_SYNC_REQUEST_SET_GL_SYNC_SHADOW:
+ case HW_SYNC_REQUEST_RESYNC_GLSYNC:
+ required = resync_flags.GL_SYNC;
+ break;
+
+ default:
+ break;
+ }
+
+ return required;
+}
+
+void dal_hw_sync_control_notify_sync_established(
+ struct hw_sync_control *sync_control,
+ struct display_path *display_path,
+ struct hw_resync_flags resync_flags)
+{
+
+}
diff --git a/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.h b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.h
new file mode 100644
index 000000000000..53c6979b2696
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/hw_sequencer/hw_sync_control.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SYNC_CONTROL_H__
+#define __DAL_HW_SYNC_CONTROL_H__
+
+#include "hw_sequencer.h"
+
+struct hw_resync_flags {
+ bool INTERPATH:1;
+ bool GL_SYNC:1;
+ bool NOTIFY_SYNCED:1;
+};
+
+struct hw_sync_control;
+struct hw_path_mode_set;
+
+struct hw_sync_control_funcs {
+ bool (*switch_dp_clock_source)(
+ struct hw_sync_control *hw_sync_control,
+ struct hw_path_mode_set *path_mode_set);
+ enum hwss_result (*resync_display_paths)(
+ struct hw_sync_control *hw_sync_control,
+ struct hw_path_mode_set *path_mode_set,
+ struct hw_resync_flags resync_flags);
+ void (*destroy)(struct hw_sync_control **cntrl);
+};
+
+struct hw_sync_control {
+ const struct hw_sync_control_funcs *funcs;
+ struct adapter_service *as;
+};
+
+bool dal_hw_sync_control_construct_base(
+ struct hw_sync_control *hw_sync_control);
+
+enum hwss_result dal_hw_sync_control_inter_path_synchronize(
+ struct hw_sync_control *sync_control,
+ struct hw_path_mode_set *path_mode_set);
+
+bool dal_hw_sync_control_resync_required(
+ enum hw_sync_request sync_request,
+ struct hw_resync_flags resync_flags);
+
+void dal_hw_sync_control_notify_sync_established(
+ struct hw_sync_control *sync_control,
+ struct display_path *display_path,
+ struct hw_resync_flags resync_flags);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/dal/include/hw_adjustment_set.h b/drivers/gpu/drm/amd/dal/include/hw_adjustment_set.h
new file mode 100644
index 000000000000..10fb8e217f3b
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/include/hw_adjustment_set.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_ADJUSTMENT_SET_H__
+#define __DAL_HW_ADJUSTMENT_SET_H__
+
+#include "include/hw_adjustment_types.h"
+
+struct hw_adjustment_gamma_ramp;
+
+struct hw_adjustment_set {
+ struct hw_adjustment_gamma_ramp *gamma_ramp;
+ struct hw_adjustment_deflicker *deflicker_filter;
+ struct hw_adjustment_value *coherent;
+ struct hw_adjustment_value *h_sync;
+ struct hw_adjustment_value *v_sync;
+ struct hw_adjustment_value *composite_sync;
+ struct hw_adjustment_value *backlight;
+ struct hw_adjustment_value *vb_level;
+ struct hw_adjustment_color_control *color_control;
+ union hw_adjustment_bit_depth_reduction *bit_depth;
+};
+/*
+struct hw_adjustment *dal_adjustment_set_get_by_id(
+ struct hw_adjustment_set *adjustment_set,
+ enum hw_adjustment_id id);*/
+
+#endif
diff --git a/drivers/gpu/drm/amd/dal/include/hw_sequencer_interface.h b/drivers/gpu/drm/amd/dal/include/hw_sequencer_interface.h
new file mode 100644
index 000000000000..e84493360919
--- /dev/null
+++ b/drivers/gpu/drm/amd/dal/include/hw_sequencer_interface.h
@@ -0,0 +1,391 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_HW_SEQUENCER_INTERFACE_H__
+#define __DAL_HW_SEQUENCER_INTERFACE_H__
+
+#include "hw_sequencer_types.h"
+#include "hw_adjustment_types.h"
+#include "include/line_buffer_interface.h"
+#include "include/display_clock_interface.h"
+#include "include/scaler_types.h"
+#include "include/grph_csc_types.h"
+#include "controller/grph_gamma_types.h"
+#include "plane_types.h"
+
+#include "adapter_service_interface.h"
+
+enum hwss_result {
+ HWSS_RESULT_OK,
+ HWSS_RESULT_ERROR,
+ HWSS_RESULT_NO_BANDWIDTH,
+ HWSS_RESULT_OUT_OF_RANGE,
+ HWSS_RESULT_NOT_SUPPORTED,
+ HWSS_RESULT_UNKNOWN
+};
+
+struct hws_init_data {
+ struct adapter_service *as;
+ struct dal_context *dal_context;
+};
+
+/* TODO: below is three almost equal structures.
+ * We should decide what to do with them */
+struct blank_stream_param {
+ struct display_path *display_path;
+ uint32_t link_idx;
+ struct hw_crtc_timing timing;
+ struct link_settings link_settings;
+};
+
+struct enable_stream_param {
+ struct display_path *display_path;
+ uint32_t link_idx;
+ struct hw_crtc_timing timing;
+ struct link_settings link_settings;
+
+ const struct hw_path_mode *path_mode;
+};
+
+struct enable_link_param {
+ struct display_path *display_path;
+ uint32_t link_idx;
+ struct hw_crtc_timing timing;
+ struct link_settings link_settings;
+
+ bool optimized_programming;
+ const struct hw_path_mode *path_mode;
+};
+
+struct validate_link_param {
+ const struct display_path *display_path;
+ uint32_t link_idx;
+ struct link_settings link_settings;
+};
+
+struct set_dp_phy_pattern_param {
+ struct display_path *display_path;
+ uint32_t link_idx;
+ enum dp_test_pattern test_pattern;
+ const uint8_t *custom_pattern;
+ uint32_t cust_pattern_size;
+ enum dp_alt_scrambler_reset alt_scrambler_reset;
+};
+
+struct hw_global_objects;
+struct hw_sequencer;
+struct hw_adjustment;
+struct hw_path_mode_set;
+struct hw_path_mode;
+struct hwss_build_params;
+struct controller;
+
+void dal_hw_sequencer_mute_audio_endpoint(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ bool mute);
+
+void dal_hw_sequencer_enable_audio_endpoint(
+ struct hw_sequencer *hws,
+ struct link_settings *ls,
+ struct display_path *display_path,
+ bool enable);
+
+enum hwss_result dal_hw_sequencer_reset_audio_device(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+enum hwss_result dal_hw_sequencer_validate_link(
+ struct hw_sequencer *hws,
+ const struct validate_link_param *param);
+
+bool dal_hw_sequencer_is_supported_dp_training_pattern3(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ uint32_t link_idx);
+
+enum hwss_result dal_hw_sequencer_set_dp_phy_pattern(
+ struct hw_sequencer *hws,
+ const struct set_dp_phy_pattern_param *param);
+
+enum hwss_result dal_hw_sequencer_set_lane_settings(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ const struct link_training_settings *link_settings);
+
+void dal_hw_sequencer_set_test_pattern(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode,
+ enum dp_test_pattern test_pattern,
+ const struct link_training_settings *link_settings,
+ const uint8_t *custom_pattern,
+ uint8_t cust_pattern_size);
+
+bool dal_hw_sequencer_has_audio_bandwidth_changed(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *old,
+ const struct hw_path_mode *new);
+
+void dal_hw_sequencer_enable_azalia_audio_jack_presence(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+void dal_hw_sequencer_disable_azalia_audio_jack_presence(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+void dal_hw_sequencer_enable_memory_requests(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode);
+
+void dal_hw_sequencer_update_info_packets(
+ struct hw_sequencer *hws,
+ struct hw_path_mode *path_mode);
+
+/* Static validation for a SINGLE path mode.
+ * Already "active" paths (if any) are NOT taken into account. */
+enum hwss_result dal_hw_sequencer_validate_display_path_mode(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode);
+
+/* Validation for a SET of path modes, including Video Memory Bandwidth
+ * validation. */
+enum hwss_result dal_hw_sequencer_validate_display_hwpms(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set);
+
+struct hw_adjustment_gamma_ramp;
+
+enum hwss_result dal_hw_sequencer_set_gamma_ramp_adjustment(
+ struct hw_sequencer *hws,
+ const struct display_path *display_path,
+ struct hw_adjustment_gamma_ramp *adjusment);
+
+enum hwss_result dal_hw_sequencer_set_color_control_adjustment(
+ struct hw_sequencer *hws,
+ struct controller *crtc,
+ struct hw_adjustment_color_control *adjustment);
+
+enum hwss_result dal_hw_sequencer_set_vertical_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment);
+
+enum hwss_result dal_hw_sequencer_set_horizontal_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment);
+
+enum hwss_result dal_hw_sequencer_set_composite_sync_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment);
+
+enum hwss_result dal_hw_sequencer_enable_sync_output(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+enum hwss_result dal_hw_sequencer_disable_sync_output(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+enum hwss_result dal_hw_sequencer_set_backlight_adjustment(
+ struct hw_sequencer *hws,
+ struct display_path *display_path,
+ struct hw_adjustment_value *adjustment);
+
+void dal_hw_sequencer_disable_memory_requests(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode);
+
+enum hwss_result dal_hw_sequencer_enable_link(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in);
+
+void dal_hw_sequencer_disable_link(
+ struct hw_sequencer *hws,
+ const struct enable_link_param *in);
+
+void dal_hw_sequencer_enable_stream(
+ struct hw_sequencer *hws,
+ const struct enable_stream_param *in);
+
+void dal_hw_sequencer_disable_stream(
+ struct hw_sequencer *hws,
+ const struct enable_stream_param *in);
+
+void dal_hw_sequencer_blank_stream(
+ struct hw_sequencer *hws,
+ const struct blank_stream_param *in);
+
+void dal_hw_sequencer_unblank_stream(
+ struct hw_sequencer *hws,
+ const struct blank_stream_param *in);
+
+enum hwss_result dal_hw_sequencer_set_clocks_and_clock_state(
+ struct hw_sequencer *hws,
+ struct hw_global_objects *g_obj,
+ const struct minimum_clocks_calculation_result *min_clk_in,
+ enum clocks_state required_clocks_state);
+
+enum hwss_result dal_hw_sequencer_set_mode(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set);
+
+enum signal_type dal_hw_sequencer_detect_sink(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+enum signal_type dal_hw_sequencer_detect_load(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+bool dal_hw_sequencer_is_sink_present(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+void dal_hw_sequencer_psr_setup(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode,
+ const struct psr_caps *psr_caps);
+
+void dal_hw_sequencer_psr_enable(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+void dal_hw_sequencer_psr_disable(
+ struct hw_sequencer *hws,
+ struct display_path *display_path);
+
+void dal_hw_sequencer_program_drr(
+ struct hw_sequencer *hws,
+ const struct hw_path_mode *path_mode);
+
+enum hwss_result dal_hw_sequencer_set_safe_displaymark(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set);
+
+enum hwss_result dal_hw_sequencer_set_displaymark(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set);
+
+void dal_hw_sequencer_destroy(struct hw_sequencer **hws);
+
+struct hw_sequencer *dal_hw_sequencer_create(
+ struct hws_init_data *hws_init_data);
+
+enum hwss_result dal_hw_sequencer_set_overscan_adj(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *set,
+ struct hw_underscan_adjustment_data *hw_underscan);
+
+bool dal_hw_sequencer_enable_line_buffer_power_gating(
+ struct line_buffer *lb,
+ enum controller_id id,
+ enum pixel_type pixel_type,
+ uint32_t src_pixel_width,
+ uint32_t dst_pixel_width,
+ struct scaling_tap_info *taps,
+ enum lb_pixel_depth lb_depth,
+ uint32_t src_height,
+ uint32_t dst_height,
+ bool interlaced);
+
+void dal_hw_sequencer_build_scaler_parameter(
+ const struct hw_path_mode *path_mode,
+ const struct scaling_tap_info *taps,
+ bool build_timing_required,
+ struct scaler_data *scaler_data);
+
+void dal_hw_sequencer_update_info_frame(
+ const struct hw_path_mode *hw_path_mode);
+
+enum hwss_result dal_hw_sequencer_set_bit_depth_reduction_adj(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ union hw_adjustment_bit_depth_reduction *bit_depth);
+
+bool dal_hw_sequencer_is_support_custom_gamut_adj(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ enum hw_surface_type surface_type);
+
+enum hwss_result dal_hw_sequencer_get_hw_color_adj_range(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ struct hw_color_control_range *hw_color_range);
+
+bool dal_hw_sequencer_is_support_custom_gamma_coefficients(
+ struct hw_sequencer *hws,
+ struct display_path *disp_path,
+ enum hw_surface_type surface_type);
+
+enum hwss_result dal_hw_sequencer_build_csc_adjust(
+ struct hw_sequencer *hws,
+ struct hw_adjustment_color_control *color_control,
+ struct grph_csc_adjustment *adjust);
+
+void dal_hw_sequencer_build_gamma_ramp_adj_params(
+ const struct hw_adjustment_gamma_ramp *adjusment,
+ struct gamma_parameters *gamma_param,
+ struct gamma_ramp *ramp);
+
+void translate_from_hw_to_controller_regamma(
+ const struct hw_regamma_lut *hw_regamma,
+ struct regamma_lut *regamma);
+
+void dal_hw_sequencer_enable_wireless_idle_detection(
+ struct hw_sequencer *hws,
+ bool enable);
+
+/* Cursor interface */
+enum hwss_result dal_hw_sequencer_set_cursor_position(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ const struct cursor_position *position);
+
+enum hwss_result dal_hw_sequencer_set_cursor_attributes(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ const struct cursor_attributes *attributes);
+
+/* Underlay/MPO interface */
+enum hwss_result dal_hw_sequencer_set_plane_config(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t display_index);
+
+bool dal_hw_sequencer_update_plane_address(
+ struct hw_sequencer *hws,
+ struct display_path *dp,
+ uint32_t num_planes,
+ struct plane_addr_flip_info *info);
+
+void dal_hw_sequencer_prepare_to_release_planes(
+ struct hw_sequencer *hws,
+ struct hw_path_mode_set *path_set,
+ uint32_t display_index);
+
+#endif /* __DAL_HW_SEQUENCER_INTERFACE_H__ */